2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  32  * Mach Operating System 
  33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University 
  34  * All Rights Reserved. 
  36  * Permission to use, copy, modify and distribute this software and its 
  37  * documentation is hereby granted, provided that both the copyright 
  38  * notice and this permission notice appear in all copies of the 
  39  * software, derivative works or modified versions, and any portions 
  40  * thereof, and that both notices appear in supporting documentation. 
  42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  46  * Carnegie Mellon requests users of this software to return to 
  48  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  49  *  School of Computer Science 
  50  *  Carnegie Mellon University 
  51  *  Pittsburgh PA 15213-3890 
  53  * any improvements or extensions that they make and grant Carnegie Mellon 
  54  * the rights to redistribute these changes. 
  57 #include <mach_assert.h> 
  60 #include <kern/cpu_number.h> 
  61 #include <kern/kalloc.h> 
  63 #include <kern/thread.h> 
  64 #include <kern/assert.h> 
  65 #include <kern/sched_prim.h> 
  66 #include <kern/misc_protos.h> 
  67 #include <kern/clock.h> 
  68 #include <kern/telemetry.h> 
  70 #include <kern/kern_cdata.h> 
  71 #include <kern/zalloc.h> 
  72 #include <vm/vm_kern.h> 
  75 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) 
  76 #include <kdp/kdp_udp.h> 
  79 #if defined(__i386__) || defined(__x86_64__) 
  80 #include <i386/cpu_threads.h> 
  81 #include <i386/pmCPU.h> 
  84 #include <IOKit/IOPlatformExpert.h> 
  85 #include <machine/pal_routines.h> 
  87 #include <sys/kdebug.h> 
  88 #include <libkern/OSKextLibPrivate.h> 
  89 #include <libkern/OSAtomic.h> 
  90 #include <libkern/kernel_mach_header.h> 
  91 #include <uuid/uuid.h> 
  92 #include <mach_debug/zone_info.h> 
  94 #include <os/log_private.h> 
  96 #if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS) 
  97 #include <pexpert/pexpert.h> /* For gPanicBase */ 
 101 unsigned int    halt_in_debugger 
= 0; 
 102 unsigned int    switch_debugger 
= 0; 
 103 unsigned int    current_debugger 
= 0; 
 104 unsigned int    active_debugger 
= 0; 
 105 unsigned int    debug_mode
=0; 
 106 unsigned int    disable_debug_output 
= TRUE
; 
 107 unsigned int    systemLogDiags 
= FALSE
; 
 108 unsigned int    panicDebugging 
= FALSE
; 
 109 unsigned int    logPanicDataToScreen 
= FALSE
; 
 110 unsigned int    kdebug_serial 
= FALSE
; 
 111 boolean_t       lock_panic_mode 
= FALSE
; 
 115 const char              *panicstr 
= (char *) 0; 
 116 decl_simple_lock_data(,panic_lock
) 
 118 volatile int            panicwait
; 
 119 volatile unsigned int   nestedpanic
= 0; 
 120 unsigned int            panic_is_inited 
= 0; 
 121 unsigned int            return_on_panic 
= 0; 
 122 unsigned long           panic_caller
; 
 124 #define DEBUG_BUF_SIZE (3 * PAGE_SIZE) 
 126 /* debug_buf is directly linked with iBoot panic region for ARM64 targets */ 
 127 #if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS) 
 128 char *debug_buf_addr 
= NULL
; 
 129 char *debug_buf_ptr 
= NULL
; 
 130 unsigned int debug_buf_size 
= 0; 
 132 char debug_buf
[DEBUG_BUF_SIZE
]; 
 133 __used 
char *debug_buf_addr 
= debug_buf
; 
 134 char *debug_buf_ptr 
= debug_buf
; 
 135 unsigned int debug_buf_size 
= sizeof(debug_buf
); 
 138 char *debug_buf_stackshot_start
; 
 139 char *debug_buf_stackshot_end
; 
 141 static char model_name
[64]; 
 142 unsigned char *kernel_uuid
; 
 143 /* uuid_string_t */ char kernel_uuid_string
[37]; 
 144 char   panic_disk_error_description
[512]; 
 145 size_t panic_disk_error_description_size 
= sizeof(panic_disk_error_description
); 
 147 static spl_t 
panic_prologue(const char *str
); 
 148 static void panic_epilogue(spl_t s
); 
 159 }  __attribute__((packed
)); 
 161 typedef struct pasc pasc_t
; 
 163 /* Prevent CPP from breaking the definition below */ 
 164 #ifdef CONFIG_NO_PANIC_STRINGS 
 168 int kext_assertions_enable 
= 
 169 #if DEBUG || DEVELOPMENT 
 175 void __attribute__((noinline
)) 
 179         const char      *expression
 
 182         int saved_return_on_panic
; 
 185                 kprintf("%s:%d non-fatal Assertion: %s", file
, line
, expression
); 
 189         saved_return_on_panic 
= return_on_panic
; 
 192          * If we don't have a debugger configured, returning from an 
 193          * assert is a bad, bad idea; there is no guarantee that we 
 194          * didn't simply assert before we were able to restart the 
 197         if (current_debugger 
!= NO_CUR_DB
) 
 200         panic_plain("%s:%d Assertion failed: %s", file
, line
, expression
); 
 202         return_on_panic 
= saved_return_on_panic
; 
 206  *      Carefully use the panic_lock.  There's always a chance that 
 207  *      somehow we'll call panic before getting to initialize the 
 208  *      panic_lock -- in this case, we'll assume that the world is 
 209  *      in uniprocessor mode and just avoid using the panic lock. 
 211 #define PANIC_LOCK()                                                    \ 
 213         if (panic_is_inited)                                            \ 
 214                 simple_lock(&panic_lock);                               \ 
 217 #define PANIC_UNLOCK()                                                  \ 
 219         if (panic_is_inited)                                            \ 
 220                 simple_unlock(&panic_lock);                             \ 
 226         unsigned long uuidlen 
= 0; 
 229         uuid 
= getuuidfromheader(&_mh_execute_header
, &uuidlen
); 
 230         if ((uuid 
!= NULL
) && (uuidlen 
== sizeof(uuid_t
))) { 
 232                 uuid_unparse_upper(*(uuid_t 
*)uuid
, kernel_uuid_string
); 
 235         simple_lock_init(&panic_lock
, 0); 
 239         if (!PE_parse_boot_argn("assertions", &mach_assert
, sizeof(mach_assert
))) { 
 247         if (debug_buf_size 
!= 0) 
 249 #if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS) 
 251                 printf("debug_log_init: Error!! gPanicBase is still not initialized\n"); 
 254         /* Shift debug buf start location and size by 8 bytes for magic header and crc value */ 
 255         debug_buf_addr 
= (char*)gPanicBase 
+ 8; 
 256         debug_buf_ptr 
= debug_buf_addr
; 
 257         debug_buf_size 
= gPanicSize 
- 8; 
 259         debug_buf_addr 
= debug_buf
; 
 260         debug_buf_ptr 
= debug_buf
; 
 261         debug_buf_size 
= sizeof(debug_buf
); 
 265 #if defined(__i386__) || defined(__x86_64__) 
 266 #define panic_stop()    pmCPUHalt(PM_HALT_PANIC) 
 267 #define panic_safe()    pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE) 
 268 #define panic_normal()  pmSafeMode(x86_lcpu(), PM_SAFE_FL_NORMAL) 
 270 #define panic_stop()    { while (1) ; } 
 272 #define panic_normal() 
 276  * Prevent CPP from breaking the definition below, 
 277  * since all clients get a #define to prepend line numbers 
 281 void _consume_panic_args(int a __unused
, ...) 
 286 extern unsigned int write_trace_on_panic
; 
 289 panic_prologue(const char *str
) 
 293         if (write_trace_on_panic 
&& kdebug_enable
) { 
 294                 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) { 
 295                         ml_set_interrupts_enabled(TRUE
); 
 296                         kdbg_dump_trace_to_file("/var/tmp/panic.trace"); 
 301         disable_preemption(); 
 302         /* Locking code should relax some checks at panic time */ 
 303         lock_panic_mode 
= TRUE
; 
 305 #if     defined(__i386__) || defined(__x86_64__) 
 306         /* Attempt to display the unparsed panic string */ 
 307         const char *tstr 
= str
; 
 309         kprintf("Panic initiated, string: "); 
 310         while (tstr 
&& *tstr
) 
 311                 kprintf("%c", *tstr
++); 
 317         if( logPanicDataToScreen 
) 
 318                 disable_debug_output 
= FALSE
; 
 326                 if (cpu_number() != paniccpu
) { 
 329                          * Wait until message has been printed to identify correct 
 330                          * cpu that made the first panic. 
 338                         // Other cores will not be resumed on double panic 
 339                         Debugger("double panic"); 
 340                         // a printf statement here was removed to avoid a panic-loop caused 
 341                         // by a panic from printf 
 347         paniccpu 
= cpu_number(); 
 352         // halt other cores now in anticipation of the debugger call 
 356 #if DEVELOPMENT || DEBUG 
 358 panic_epilogue(spl_t    s
) 
 360 #if !defined(__i386__) && !defined(__x86_64__) 
 361 __attribute__((noreturn
)) 
 364 panic_epilogue(__unused spl_t   s
) 
 368          * Release panicstr so that we can handle normally other panics. 
 371         panicstr 
= (char *)0; 
 374 #if DEVELOPMENT || DEBUG 
 375         if (return_on_panic
) { 
 376                 // resume other cores as we are returning 
 383         kdb_printf("panic: We are hanging here...\n"); 
 388 #if !DEVELOPMENT && !DEBUG && !defined(__i386__) && !defined(__x86_64__) 
 389 __attribute__((noreturn
)) 
 392 panic(const char *str
, ...) 
 396         boolean_t       old_doprnt_hide_pointers 
= doprnt_hide_pointers
; 
 398 #if defined (__x86_64__) 
 401         /* panic_caller is initialized to 0.  If set, don't change it */ 
 402         if ( ! panic_caller 
) 
 403                 panic_caller 
= (unsigned long)(char *)__builtin_return_address(0); 
 406         s 
= panic_prologue(str
); 
 408         /* Never hide pointers from panic logs. */ 
 409         doprnt_hide_pointers 
= FALSE
; 
 411         kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu
, panic_caller
); 
 413                 va_start(listp
, str
); 
 414                 _doprnt(str
, &listp
, consdebug_putc
, 0); 
 420          * Release panicwait indicator so that other cpus may call Debugger(). 
 425         doprnt_hide_pointers 
= old_doprnt_hide_pointers
; 
 431  * panic_with_options: wraps the panic call in a way that allows us to pass 
 432  *                      a bitmask of specific debugger options. 
 434 #if !DEVELOPMENT && !DEBUG && !defined(__i386__) && !defined(__x86_64__) 
 435 __attribute__((noreturn
)) 
 438 panic_with_options(unsigned int reason
, void *ctx
, uint64_t debugger_options_mask
, const char *str
, ...) 
 444         /* panic_caller is initialized to 0.  If set, don't change it */ 
 445         if ( ! panic_caller 
) 
 446                 panic_caller 
= (unsigned long)(char *)__builtin_return_address(0); 
 448         s 
= panic_prologue(str
); 
 449         kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu
, panic_caller
); 
 451                 va_start(listp
, str
); 
 452                 _doprnt(str
, &listp
, consdebug_putc
, 0); 
 458          * Release panicwait indicator so that other cpus may call Debugger(). 
 461         DebuggerWithContext(reason
, ctx
, "panic", debugger_options_mask
); 
 465 #if !DEVELOPMENT && !DEBUG && !defined(__i386__) && !defined(__x86_64__) 
 466 __attribute__((noreturn
)) 
 469 panic_context(unsigned int reason
, void *ctx
, const char *str
, ...) 
 475         /* panic_caller is initialized to 0.  If set, don't change it */ 
 476         if ( ! panic_caller 
) 
 477                 panic_caller 
= (unsigned long)(char *)__builtin_return_address(0); 
 479         s 
= panic_prologue(str
); 
 480         kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu
, panic_caller
); 
 482                 va_start(listp
, str
); 
 483                 _doprnt(str
, &listp
, consdebug_putc
, 0); 
 489          * Release panicwait indicator so that other cpus may call Debugger(). 
 492         DebuggerWithContext(reason
, ctx
, "panic", DEBUGGER_OPTION_NONE
); 
 496 __attribute__((noinline
,not_tail_called
)) 
 497 void log(__unused 
int level
, char *fmt
, ...) 
 499         void *caller 
= __builtin_return_address(0); 
 508         va_start(listp
, fmt
); 
 509         va_copy(listp2
, listp
); 
 511         disable_preemption(); 
 512         _doprnt(fmt
, &listp
, cons_putc_locked
, 0); 
 517         os_log_with_args(OS_LOG_DEFAULT
, OS_LOG_TYPE_DEFAULT
, fmt
, listp2
, caller
); 
 523  * Skip appending log messages to the new logging infrastructure in contexts 
 524  * where safety is uncertain. These contexts include: 
 525  *   - When we're in the debugger 
 527  *   - Interrupts are disabled 
 528  *   - Or Pre-emption is disabled 
 529  * In all the above cases, it is potentially unsafe to log messages. 
 532 boolean_t 
oslog_is_safe(void) { 
 533         return (debug_mode 
== 0 && 
 535                 get_preemption_level() == 0 && 
 536                 ml_get_interrupts_enabled() == TRUE
); 
 542         if ((debug_buf_size 
!= 0) && 
 543                 ((debug_buf_ptr
-debug_buf_addr
) < (int)debug_buf_size
)) { 
 549 /* In-place packing routines -- inefficient, but they're called at most once. 
 550  * Assumes "buflen" is a multiple of 8. 
 553 int packA(char *inbuf
, uint32_t length
, uint32_t buflen
) 
 555   unsigned int i
, j 
= 0; 
 558   length 
= MIN(((length 
+ 7) & ~7), buflen
); 
 560   for (i 
= 0; i 
< length
; i
+=8) 
 570       bcopy ((char *) &pack
, inbuf 
+ j
, 7); 
 576 void unpackA(char *inbuf
, uint32_t length
) 
 580         length 
= (length 
* 8)/7; 
 583           packs 
= *(pasc_t 
*)&inbuf
[i
]; 
 584           bcopy(&inbuf
[i
+7], &inbuf
[i
+8], MAX(0, (int) (length 
- i 
- 8))); 
 585           inbuf
[i
++] = packs
.a
; 
 586           inbuf
[i
++] = packs
.b
; 
 587           inbuf
[i
++] = packs
.c
; 
 588           inbuf
[i
++] = packs
.d
; 
 589           inbuf
[i
++] = packs
.e
; 
 590           inbuf
[i
++] = packs
.f
; 
 591           inbuf
[i
++] = packs
.g
; 
 592           inbuf
[i
++] = packs
.h
; 
 596 extern void *proc_name_address(void *p
); 
 599 panic_display_process_name(void) { 
 600         /* because of scoping issues len(p_comm) from proc_t is hard coded here */ 
 601         char proc_name
[17] = "Unknown"; 
 605         if (ml_nofault_copy((vm_offset_t
)¤t_thread()->task
, (vm_offset_t
) &ctask
, sizeof(task_t
)) == sizeof(task_t
)) 
 606                 if(ml_nofault_copy((vm_offset_t
)&ctask
->bsd_info
, (vm_offset_t
)&cbsd_info
, sizeof(cbsd_info
)) == sizeof(cbsd_info
)) 
 607                         if (cbsd_info 
&& (ml_nofault_copy((vm_offset_t
) proc_name_address(cbsd_info
), (vm_offset_t
) &proc_name
, sizeof(proc_name
)) > 0)) 
 608                                 proc_name
[sizeof(proc_name
) - 1] = '\0'; 
 609         kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name
); 
 612 unsigned        panic_active(void) { 
 613         return ((panicstr 
!= (char *) 0)); 
 616 void populate_model_name(char *model_string
) { 
 617         strlcpy(model_name
, model_string
, sizeof(model_name
)); 
 620 void panic_display_model_name(void) { 
 621         char tmp_model_name
[sizeof(model_name
)]; 
 623         if (ml_nofault_copy((vm_offset_t
) &model_name
, (vm_offset_t
) &tmp_model_name
, sizeof(model_name
)) != sizeof(model_name
)) 
 626         tmp_model_name
[sizeof(tmp_model_name
) - 1] = '\0'; 
 628         if (tmp_model_name
[0] != 0) 
 629                 kdb_printf("System model name: %s\n", tmp_model_name
); 
 632 void panic_display_kernel_uuid(void) { 
 633         char tmp_kernel_uuid
[sizeof(kernel_uuid_string
)]; 
 635         if (ml_nofault_copy((vm_offset_t
) &kernel_uuid_string
, (vm_offset_t
) &tmp_kernel_uuid
, sizeof(kernel_uuid_string
)) != sizeof(kernel_uuid_string
)) 
 638         if (tmp_kernel_uuid
[0] != '\0') 
 639                 kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid
); 
 642 void panic_display_kernel_aslr(void) { 
 643         if (vm_kernel_slide
) { 
 644                 kdb_printf("Kernel slide:     0x%016lx\n", (unsigned long) vm_kernel_slide
); 
 645                 kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext
); 
 649 void panic_display_hibb(void) { 
 650 #if defined(__i386__) || defined (__x86_64__) 
 651         kdb_printf("__HIB  text base: %p\n", (void *) vm_hib_base
); 
 655 static void panic_display_uptime(void) { 
 657         absolutetime_to_nanoseconds(mach_absolute_time(), &uptime
); 
 659         kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime
); 
 662 static void panic_display_disk_errors(void) { 
 664         if (panic_disk_error_description
[0]) { 
 665                 panic_disk_error_description
[sizeof(panic_disk_error_description
) - 1] = '\0'; 
 666                 kdb_printf("Root disk errors: \"%s\"\n", panic_disk_error_description
); 
 670 extern const char version
[]; 
 671 extern char osversion
[]; 
 673 static volatile uint32_t config_displayed 
= 0; 
 675 __private_extern__ 
void panic_display_system_configuration(boolean_t launchd_exit
) { 
 677         if (!launchd_exit
) panic_display_process_name(); 
 678         if (OSCompareAndSwap(0, 1, &config_displayed
)) { 
 680                 if (!launchd_exit 
&& strlcpy(buf
, PE_boot_args(), sizeof(buf
))) 
 681                         kdb_printf("Boot args: %s\n", buf
); 
 682                 kdb_printf("\nMac OS version:\n%s\n", 
 683                     (osversion
[0] != 0) ? osversion 
: "Not yet set"); 
 684                 kdb_printf("\nKernel version:\n%s\n",version
); 
 685                 panic_display_kernel_uuid(); 
 687                         panic_display_kernel_aslr(); 
 688                         panic_display_hibb(); 
 689                         panic_display_pal_info(); 
 691                 panic_display_model_name(); 
 692                 panic_display_disk_errors(); 
 694                         panic_display_uptime(); 
 695                         panic_display_zprint(); 
 697                         panic_display_ztrace(); 
 698 #endif /* CONFIG_ZLEAKS */ 
 699                         kext_dump_panic_lists(&kdb_log
); 
 704 extern unsigned int     stack_total
; 
 705 extern unsigned long long stack_allocs
; 
 707 #if defined(__i386__) || defined (__x86_64__) 
 708 extern unsigned int     inuse_ptepages_count
; 
 709 extern long long alloc_ptepages_count
; 
 712 extern boolean_t        panic_include_zprint
; 
 713 extern vm_offset_t      panic_kext_memory_info
; 
 714 extern vm_size_t        panic_kext_memory_size
; 
 716 __private_extern__ 
void panic_display_zprint() 
 718         if(panic_include_zprint 
== TRUE
) { 
 721                 struct zone     zone_copy
; 
 723                 kdb_printf("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); 
 724                 for (i 
= 0; i 
< num_zones
; i
++) { 
 725                         if(ml_nofault_copy((vm_offset_t
)(&zone_array
[i
]), (vm_offset_t
)&zone_copy
, sizeof(struct zone
)) == sizeof(struct zone
)) { 
 726                                 if(zone_copy
.cur_size 
> (1024*1024)) { 
 727                                         kdb_printf("%-20s %10lu %10lu\n",zone_copy
.zone_name
, (uintptr_t)zone_copy
.cur_size
,(uintptr_t)(zone_copy
.countfree 
* zone_copy
.elem_size
)); 
 732                 kdb_printf("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size 
* stack_total
)); 
 734 #if defined(__i386__) || defined (__x86_64__) 
 735                 kdb_printf("%-20s %10lu\n", "PageTables",(uintptr_t)(PAGE_SIZE 
* inuse_ptepages_count
)); 
 738                 kdb_printf("%-20s %10lu\n", "Kalloc.Large", (uintptr_t)kalloc_large_total
); 
 739                 if (panic_kext_memory_info
) { 
 740                         mach_memory_info_t 
*mem_info 
= (mach_memory_info_t 
*)panic_kext_memory_info
; 
 741                         kdb_printf("\n%-5s %10s\n", "Kmod", "Size"); 
 742                         for (i 
= 0; i 
< VM_KERN_MEMORY_COUNT 
+ VM_KERN_COUNTER_COUNT
; i
++) { 
 743                                 if (((mem_info
[i
].flags 
& VM_KERN_SITE_TYPE
) == VM_KERN_SITE_KMOD
) && (mem_info
[i
].size 
> (1024 * 1024))) { 
 744                                         kdb_printf("%-5lld %10lld\n", mem_info
[i
].site
, mem_info
[i
].size
); 
 751 #if CONFIG_ECC_LOGGING 
 752 __private_extern__ 
void panic_display_ecc_errors()  
 754         uint32_t count 
= ecc_log_get_correction_count(); 
 757                 kdb_printf("ECC Corrections:%u\n", count
); 
 760 #endif /* CONFIG_ECC_LOGGING */ 
 763 extern boolean_t        panic_include_ztrace
; 
 764 extern struct ztrace
* top_ztrace
; 
 765 void panic_print_symbol_name(vm_address_t search
); 
 768  * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. 
 769  * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c 
 771 __private_extern__ 
void panic_display_ztrace(void) 
 773         if(panic_include_ztrace 
== TRUE
) { 
 775                 boolean_t keepsyms 
= FALSE
; 
 777                 PE_parse_boot_argn("keepsyms", &keepsyms
, sizeof (keepsyms
)); 
 778                 struct ztrace top_ztrace_copy
; 
 780                 /* Make sure not to trip another panic if there's something wrong with memory */ 
 781                 if(ml_nofault_copy((vm_offset_t
)top_ztrace
, (vm_offset_t
)&top_ztrace_copy
, sizeof(struct ztrace
)) == sizeof(struct ztrace
)) { 
 782                         kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy
.zt_size
); 
 783                         /* Print the backtrace addresses */ 
 784                         for (i 
= 0; (i 
< top_ztrace_copy
.zt_depth 
&& i 
< MAX_ZTRACE_DEPTH
) ; i
++) { 
 785                                 kdb_printf("%p ", top_ztrace_copy
.zt_stack
[i
]); 
 787                                         panic_print_symbol_name((vm_address_t
)top_ztrace_copy
.zt_stack
[i
]); 
 791                         /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ 
 792                         kmod_panic_dump((vm_offset_t 
*)&top_ztrace_copy
.zt_stack
[0], top_ztrace_copy
.zt_depth
); 
 795                         kdb_printf("\nCan't access top_ztrace...\n"); 
 800 #endif /* CONFIG_ZLEAKS */ 
 802 #if ! (MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) 
 803 static struct kdp_ether_addr kdp_current_mac_address 
= {{0, 0, 0, 0, 0, 0}}; 
 805 /* XXX ugly forward declares to stop warnings */ 
 806 void *kdp_get_interface(void); 
 807 void kdp_set_ip_and_mac_addresses(struct kdp_in_addr 
*, struct kdp_ether_addr 
*); 
 808 void kdp_set_gateway_mac(void *); 
 809 void kdp_set_interface(void *); 
 810 void kdp_register_send_receive(void *, void *); 
 811 void kdp_unregister_send_receive(void *, void *); 
 813 int kdp_stack_snapshot_geterror(void); 
 814 uint32_t kdp_stack_snapshot_bytes_traced(void); 
 817 kdp_get_interface( void) 
 823 kdp_get_ip_address(void ) 
 826 struct kdp_ether_addr
 
 827 kdp_get_mac_addr(void) 
 829         return kdp_current_mac_address
; 
 833 kdp_set_ip_and_mac_addresses(    
 834         __unused 
struct kdp_in_addr          
*ipaddr
, 
 835         __unused 
struct kdp_ether_addr       
*macaddr
) 
 839 kdp_set_gateway_mac(__unused 
void *gatewaymac
) 
 843 kdp_set_interface(__unused 
void *ifp
) 
 847 kdp_register_send_receive(__unused 
void *send
, __unused 
void *receive
) 
 851 kdp_unregister_send_receive(__unused 
void *send
, __unused 
void *receive
) 
 854 void kdp_register_link(__unused kdp_link_t link
, __unused kdp_mode_t mode
) 
 857 void kdp_unregister_link(__unused kdp_link_t link
, __unused kdp_mode_t mode
) 
 862 #if !CONFIG_TELEMETRY 
 863 int telemetry_gather(user_addr_t buffer __unused
, uint32_t *length __unused
, boolean_t mark __unused
) 
 865         return KERN_NOT_SUPPORTED
;