2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
66 * Basic initialization for I386 - ISA bus machines.
70 #define __APPLE_API_PRIVATE 1
71 #define __APPLE_API_UNSTABLE 1
72 #include <kern/debug.h>
74 #include <mach/i386/vm_param.h>
77 #include <mach/vm_param.h>
78 #include <mach/vm_prot.h>
79 #include <mach/machine.h>
80 #include <mach/time_value.h>
81 #include <sys/kdebug.h>
84 #include <kern/assert.h>
85 #include <kern/lock_group.h>
86 #include <kern/misc_protos.h>
87 #include <kern/startup.h>
88 #include <kern/clock.h>
89 #include <kern/cpu_data.h>
90 #include <kern/machine.h>
91 #include <i386/postcode.h>
92 #include <i386/mp_desc.h>
93 #include <i386/misc_protos.h>
94 #include <i386/panic_notify.h>
95 #include <i386/thread.h>
96 #include <i386/trap.h>
97 #include <i386/machine_routines.h>
98 #include <i386/mp.h> /* mp_rendezvous_break_lock */
99 #include <i386/cpuid.h>
100 #include <i386/fpu.h>
101 #include <i386/machine_cpu.h>
102 #include <i386/pmap.h>
104 #include <i386/mtrr.h>
106 #include <i386/ucode.h>
107 #include <i386/pmCPU.h>
108 #include <i386/panic_hooks.h>
110 #include <architecture/i386/pio.h> /* inb() */
111 #include <pexpert/i386/boot.h>
113 #include <kdp/kdp_dyld.h>
114 #include <kdp/kdp_core.h>
116 #include <vm/vm_map.h>
117 #include <vm/vm_kern.h>
119 #include <IOKit/IOBSD.h>
120 #include <IOKit/IOPlatformExpert.h>
121 #include <IOKit/IOHibernatePrivate.h>
123 #include <pexpert/i386/efi.h>
125 #include <kern/thread.h>
126 #include <kern/sched.h>
127 #include <mach-o/loader.h>
128 #include <mach-o/nlist.h>
130 #include <libkern/kernel_mach_header.h>
131 #include <libkern/OSKextLibPrivate.h>
132 #include <libkern/crc.h>
134 #if DEBUG || DEVELOPMENT
135 #define DPRINTF(x ...) kprintf(x)
137 #define DPRINTF(x ...)
141 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
145 #define ROUNDDOWN(x, y) (((x)/(y))*(y))
148 static void machine_conf(void);
149 void panic_print_symbol_name(vm_address_t search
);
150 void RecordPanicStackshot(void);
152 typedef enum paniclog_flush_type
{
153 kPaniclogFlushBase
= 1,/* Flush the initial log and paniclog header */
154 kPaniclogFlushStackshot
= 2,/* Flush only the stackshot data, then flush the header */
155 kPaniclogFlushOtherLog
= 3/* Flush the other log, then flush the header */
156 } paniclog_flush_type_t
;
158 void paniclog_flush_internal(paniclog_flush_type_t variant
);
160 extern const char version
[];
161 extern char osversion
[];
162 extern int max_poll_quanta
;
163 extern unsigned int panic_is_inited
;
165 extern int proc_pid(struct proc
*);
167 /* Definitions for frame pointers */
168 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
169 #define FP_LR_OFFSET ((uint32_t)4)
170 #define FP_LR_OFFSET64 ((uint32_t)8)
171 #define FP_MAX_NUM_TO_EVALUATE (50)
173 volatile int pbtcpu
= -1;
174 hw_lock_data_t pbtlock
; /* backtrace print lock */
177 volatile int panic_double_fault_cpu
= -1;
179 #define PRINT_ARGS_FROM_STACK_FRAME 0
181 typedef struct _cframe_t
{
182 struct _cframe_t
*prev
;
184 #if PRINT_ARGS_FROM_STACK_FRAME
189 static unsigned commit_paniclog_to_nvram
;
190 boolean_t coprocessor_paniclog_flush
= FALSE
;
192 struct kcdata_descriptor kc_panic_data
;
193 static boolean_t begun_panic_stackshot
= FALSE
;
194 extern kern_return_t
do_stackshot(void *);
196 extern void kdp_snapshot_preflight(int pid
, void * tracebuf
,
197 uint32_t tracebuf_size
, uint64_t flags
,
198 kcdata_descriptor_t data_p
,
199 uint64_t since_timestamp
, uint32_t pagetable_mask
);
200 extern int kdp_stack_snapshot_bytes_traced(void);
201 extern int kdp_stack_snapshot_bytes_uncompressed(void);
203 extern void stackshot_memcpy(void *dst
, const void *src
, size_t len
);
204 vm_offset_t panic_stackshot_buf
= 0;
205 size_t panic_stackshot_buf_len
= 0;
207 size_t panic_stackshot_len
= 0;
209 boolean_t is_clock_configured
= FALSE
;
212 * Backtrace a single frame.
215 print_one_backtrace(pmap_t pmap
, vm_offset_t topfp
, const char *cur_marker
,
223 boolean_t dump_kernel_stack
;
229 if (fp
>= VM_MIN_KERNEL_ADDRESS
) {
230 dump_kernel_stack
= TRUE
;
232 dump_kernel_stack
= FALSE
;
236 if ((fp
== 0) || ((fp
& FP_ALIGNMENT_MASK
) != 0)) {
239 if (dump_kernel_stack
&& ((fp
< VM_MIN_KERNEL_ADDRESS
) || (fp
> VM_MAX_KERNEL_ADDRESS
))) {
242 if ((!dump_kernel_stack
) && (fp
>= VM_MIN_KERNEL_ADDRESS
)) {
246 /* Check to see if current address will result in a different
247 * ppn than previously computed (to avoid recomputation) via
248 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
250 if ((((fp
+ FP_LR_OFFSET
) ^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
251 ppn
= pmap_find_phys(pmap
, fp
+ FP_LR_OFFSET
);
252 fp_for_ppn
= fp
+ (is_64_bit
? FP_LR_OFFSET64
: FP_LR_OFFSET
);
254 if (ppn
!= (ppnum_t
)NULL
) {
256 lr
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET64
) & PAGE_MASK
));
258 lr
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET
) & PAGE_MASK
));
262 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker
, fp
+ FP_LR_OFFSET64
);
264 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker
, (uint32_t)(fp
+ FP_LR_OFFSET
));
268 if (((fp
^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
269 ppn
= pmap_find_phys(pmap
, fp
);
272 if (ppn
!= (ppnum_t
)NULL
) {
274 fp
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
276 fp
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
280 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker
, fp
);
282 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker
, (uint32_t)fp
);
288 paniclog_append_noflush("%s\t0x%016llx\n", cur_marker
, lr
);
290 paniclog_append_noflush("%s\t0x%08x\n", cur_marker
, (uint32_t)lr
);
292 } while ((++i
< FP_MAX_NUM_TO_EVALUATE
) && (fp
!= topfp
));
295 machine_startup(void)
300 if (PE_get_hotkey( kPEControlKey
)) {
301 halt_in_debugger
= halt_in_debugger
? 0 : 1;
305 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram
, sizeof(commit_paniclog_to_nvram
))) {
306 commit_paniclog_to_nvram
= 1;
310 * Entering the debugger will put the CPUs into a "safe"
313 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg
, sizeof(boot_arg
))) {
314 pmsafe_debug
= boot_arg
;
317 hw_lock_init(&pbtlock
); /* initialize print backtrace lock */
319 if (PE_parse_boot_argn("yield", &boot_arg
, sizeof(boot_arg
))) {
320 sched_poll_yield_shift
= boot_arg
;
340 machine_info
.memory_size
= (typeof(machine_info
.memory_size
))mem_size
;
343 extern void *gPEEFIRuntimeServices
;
344 extern void *gPEEFISystemTable
;
347 efi_set_tables_64(EFI_SYSTEM_TABLE_64
* system_table
)
349 EFI_RUNTIME_SERVICES_64
*runtime
;
353 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table
);
355 DPRINTF("Header:\n");
356 DPRINTF(" Signature: 0x%016llx\n", system_table
->Hdr
.Signature
);
357 DPRINTF(" Revision: 0x%08x\n", system_table
->Hdr
.Revision
);
358 DPRINTF(" HeaderSize: 0x%08x\n", system_table
->Hdr
.HeaderSize
);
359 DPRINTF(" CRC32: 0x%08x\n", system_table
->Hdr
.CRC32
);
360 DPRINTF("RuntimeServices: 0x%016llx\n", system_table
->RuntimeServices
);
361 if (system_table
->Hdr
.Signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
362 kprintf("Bad EFI system table signature\n");
365 // Verify signature of the system table
366 hdr_cksum
= system_table
->Hdr
.CRC32
;
367 system_table
->Hdr
.CRC32
= 0;
368 cksum
= crc32(0L, system_table
, system_table
->Hdr
.HeaderSize
);
370 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
371 system_table
->Hdr
.CRC32
= hdr_cksum
;
372 if (cksum
!= hdr_cksum
) {
373 kprintf("Bad EFI system table checksum\n");
377 gPEEFISystemTable
= system_table
;
379 if (system_table
->RuntimeServices
== 0) {
380 kprintf("No runtime table present\n");
383 DPRINTF("RuntimeServices table at 0x%qx\n", system_table
->RuntimeServices
);
384 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
385 runtime
= (EFI_RUNTIME_SERVICES_64
*) (uintptr_t)system_table
->RuntimeServices
;
386 DPRINTF("Checking runtime services table %p\n", runtime
);
387 if (runtime
->Hdr
.Signature
!= EFI_RUNTIME_SERVICES_SIGNATURE
) {
388 kprintf("Bad EFI runtime table signature\n");
392 // Verify signature of runtime services table
393 hdr_cksum
= runtime
->Hdr
.CRC32
;
394 runtime
->Hdr
.CRC32
= 0;
395 cksum
= crc32(0L, runtime
, runtime
->Hdr
.HeaderSize
);
397 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
398 runtime
->Hdr
.CRC32
= hdr_cksum
;
399 if (cksum
!= hdr_cksum
) {
400 kprintf("Bad EFI runtime table checksum\n");
404 gPEEFIRuntimeServices
= runtime
;
408 /* Map in EFI runtime areas. */
412 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
414 kprintf("Initializing EFI runtime services\n");
417 vm_offset_t vm_size
, vm_addr
;
418 vm_map_offset_t phys_addr
;
419 EfiMemoryRange
*mptr
;
420 unsigned int msize
, mcount
;
423 msize
= args
->MemoryMapDescriptorSize
;
424 mcount
= args
->MemoryMapSize
/ msize
;
426 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
427 args
->kaddr
, args
->ksize
);
428 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
429 args
->efiSystemTable
,
430 (void *) ml_static_ptovirt(args
->efiSystemTable
));
431 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
432 args
->efiRuntimeServicesPageStart
);
433 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
434 args
->efiRuntimeServicesPageCount
);
435 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
436 args
->efiRuntimeServicesVirtualPageStart
);
437 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
438 for (i
= 0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
439 if (((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
)) {
440 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
441 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
442 /* For K64 on EFI32, shadow-map into high KVA */
443 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
) {
444 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
446 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
447 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
449 (void *) (uintptr_t) phys_addr
,
450 (void *) (uintptr_t) mptr
->VirtualStart
,
453 pmap_map_bd(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
454 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
| VM_PROT_WRITE
,
455 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
459 if (args
->Version
!= kBootArgsVersion2
) {
460 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
463 DPRINTF("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
464 if (args
->efiMode
== kBootArgsEfiMode64
) {
465 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
467 panic("Unsupported 32-bit EFI system table!");
474 /* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */
476 bootloader_valid_page(ppnum_t ppn
)
478 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
479 ppnum_t pstart
= args
->efiRuntimeServicesPageStart
;
480 ppnum_t pend
= pstart
+ args
->efiRuntimeServicesPageCount
;
482 return pstart
<= ppn
&& ppn
< pend
;
485 /* Remap EFI runtime areas. */
487 hibernate_newruntime_map(void * map
, vm_size_t map_size
, uint32_t system_table_offset
)
489 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
491 kprintf("Reinitializing EFI runtime services\n");
494 vm_offset_t vm_size
, vm_addr
;
495 vm_map_offset_t phys_addr
;
496 EfiMemoryRange
*mptr
;
497 unsigned int msize
, mcount
;
500 gPEEFISystemTable
= 0;
501 gPEEFIRuntimeServices
= 0;
503 system_table_offset
+= ptoa_32(args
->efiRuntimeServicesPageStart
);
505 kprintf("Old system table 0x%x, new 0x%x\n",
506 (uint32_t)args
->efiSystemTable
, system_table_offset
);
508 args
->efiSystemTable
= system_table_offset
;
510 kprintf("Old map:\n");
511 msize
= args
->MemoryMapDescriptorSize
;
512 mcount
= args
->MemoryMapSize
/ msize
;
513 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
514 for (i
= 0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
515 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
516 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
517 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
519 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
) {
520 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
522 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
524 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
528 pmap_remove(kernel_pmap
, i386_ptob(args
->efiRuntimeServicesPageStart
),
529 i386_ptob(args
->efiRuntimeServicesPageStart
+ args
->efiRuntimeServicesPageCount
));
531 kprintf("New map:\n");
532 msize
= args
->MemoryMapDescriptorSize
;
533 mcount
= (unsigned int)(map_size
/ msize
);
535 for (i
= 0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
536 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
537 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
538 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
539 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
) {
540 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
542 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
544 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
546 pmap_map(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
547 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
| VM_PROT_WRITE
,
548 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
552 if (args
->Version
!= kBootArgsVersion2
) {
553 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
556 kprintf("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
557 if (args
->efiMode
== kBootArgsEfiMode64
) {
558 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
560 panic("Unsupported 32-bit EFI system table!");
564 kprintf("Done reinitializing EFI runtime services\n");
570 * Find devices. The system is alive.
575 /* Now with VM up, switch to dynamically allocated cpu data */
578 /* Ensure panic buffer is initialized. */
582 * Display CPU identification
584 cpuid_cpu_display("CPU identification");
585 cpuid_feature_display("CPU features");
586 cpuid_extfeature_display("CPU extended features");
589 * Initialize EFI runtime services.
596 * Set up to use floating point.
601 * Configure clock devices.
604 is_clock_configured
= TRUE
;
608 * Initialize MTRR from boot processor.
613 * Set up PAT for boot processor.
619 * Free lowmem pages and complete other setup
621 pmap_lowmem_finalize();
630 halt_all_cpus(FALSE
);
633 int reset_mem_on_reboot
= 1;
636 * Halt the system or reboot.
638 __attribute__((noreturn
))
640 halt_all_cpus(boolean_t reboot
)
643 printf("MACH Reboot\n");
644 PEHaltRestart( kPERestartCPU
);
646 printf("CPU halted\n");
647 PEHaltRestart( kPEHaltCPU
);
654 /* For use with the MP rendezvous mechanism
657 uint64_t panic_restart_timeout
= ~(0ULL);
659 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
662 * We should always return from this function with the other log offset
663 * set in the panic_info structure.
666 RecordPanicStackshot()
669 size_t bytes_traced
= 0, bytes_uncompressed
= 0, bytes_used
= 0, bytes_remaining
= 0;
670 char *stackshot_begin_loc
= NULL
;
672 /* Don't re-enter this code if we panic here */
673 if (begun_panic_stackshot
) {
674 if (panic_info
->mph_other_log_offset
== 0) {
675 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
679 begun_panic_stackshot
= TRUE
;
681 /* The panic log length should have been set before we came to capture a stackshot */
682 if (panic_info
->mph_panic_log_len
== 0) {
683 kdb_printf("Found zero length panic log, skipping capturing panic stackshot\n");
684 if (panic_info
->mph_other_log_offset
== 0) {
685 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
690 if (stackshot_active()) {
691 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED
;
692 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
693 kdb_printf("Panicked during stackshot, skipping panic stackshot\n");
697 /* Try to capture an in memory panic_stackshot */
698 if (extended_debug_log_enabled
) {
699 /* On coprocessor systems we write this into the extended debug log */
700 stackshot_begin_loc
= debug_buf_ptr
;
701 bytes_remaining
= debug_buf_size
- (unsigned int)((uintptr_t)stackshot_begin_loc
- (uintptr_t)debug_buf_base
);
702 } else if (panic_stackshot_buf
!= 0) {
703 /* On other systems we use the panic stackshot_buf */
704 stackshot_begin_loc
= (char *) panic_stackshot_buf
;
705 bytes_remaining
= panic_stackshot_buf_len
;
707 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
712 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)stackshot_begin_loc
,
713 KCDATA_BUFFER_BEGIN_COMPRESSED
, (unsigned int) bytes_remaining
, KCFLAG_USE_MEMCOPY
);
714 if (err
!= KERN_SUCCESS
) {
715 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
716 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
717 kdb_printf("Failed to initialize kcdata buffer for in-memory panic stackshot, skipping ...\n");
721 uint64_t stackshot_flags
= (STACKSHOT_SAVE_KEXT_LOADINFO
| STACKSHOT_SAVE_LOADINFO
| STACKSHOT_KCDATA_FORMAT
|
722 STACKSHOT_ENABLE_BT_FAULTING
| STACKSHOT_ENABLE_UUID_FAULTING
| STACKSHOT_FROM_PANIC
| STACKSHOT_DO_COMPRESS
|
723 STACKSHOT_NO_IO_STATS
| STACKSHOT_THREAD_WAITINFO
| STACKSHOT_DISABLE_LATENCY_INFO
| STACKSHOT_GET_DQ
);
725 err
= kcdata_init_compress(&kc_panic_data
, KCDATA_BUFFER_BEGIN_STACKSHOT
, stackshot_memcpy
, KCDCT_ZLIB
);
726 if (err
!= KERN_SUCCESS
) {
727 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_COMPRESS
;
728 stackshot_flags
&= ~STACKSHOT_DO_COMPRESS
;
733 * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate
734 * panic stackshots from corefiles.
736 stackshot_flags
|= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
;
739 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc
, (uint32_t) bytes_remaining
, stackshot_flags
, &kc_panic_data
, 0, 0);
740 err
= do_stackshot(NULL
);
741 bytes_traced
= (size_t) kdp_stack_snapshot_bytes_traced();
742 bytes_uncompressed
= (size_t) kdp_stack_snapshot_bytes_uncompressed();
743 bytes_used
= (size_t) kcdata_memory_get_used_bytes(&kc_panic_data
);
745 if ((err
!= KERN_SUCCESS
) && (bytes_used
> 0)) {
747 * We ran out of space while trying to capture a stackshot, try again without user frames.
748 * It's not safe to log from here (in case we're writing in the middle of the debug buffer on coprocessor systems)
749 * but append a flag to the panic flags.
751 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_KERNEL_ONLY
;
752 panic_stackshot_reset_state();
754 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
755 memset(stackshot_begin_loc
, '\0', bytes_used
);
757 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)stackshot_begin_loc
,
758 KCDATA_BUFFER_BEGIN_STACKSHOT
, (unsigned int) bytes_remaining
, KCFLAG_USE_MEMCOPY
);
759 if (err
!= KERN_SUCCESS
) {
760 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
761 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
762 kdb_printf("Failed to re-initialize kcdata buffer for kernel only in-memory panic stackshot, skipping ...\n");
766 stackshot_flags
= (STACKSHOT_SAVE_KEXT_LOADINFO
| STACKSHOT_KCDATA_FORMAT
| STACKSHOT_FROM_PANIC
| STACKSHOT_DISABLE_LATENCY_INFO
|
767 STACKSHOT_NO_IO_STATS
| STACKSHOT_THREAD_WAITINFO
| STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY
| STACKSHOT_GET_DQ
);
770 * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate
771 * panic stackshots from corefiles.
773 stackshot_flags
|= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
;
776 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc
, (uint32_t) bytes_remaining
, stackshot_flags
, &kc_panic_data
, 0, 0);
777 err
= do_stackshot(NULL
);
778 bytes_traced
= (size_t) kdp_stack_snapshot_bytes_traced();
779 bytes_uncompressed
= (size_t) kdp_stack_snapshot_bytes_uncompressed();
780 bytes_used
= (size_t) kcdata_memory_get_used_bytes(&kc_panic_data
);
783 if (err
== KERN_SUCCESS
) {
784 if (extended_debug_log_enabled
) {
785 debug_buf_ptr
+= bytes_traced
;
787 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED
;
789 /* On other systems this is not in the debug buffer itself, it's in a separate buffer allocated at boot. */
790 if (extended_debug_log_enabled
) {
791 panic_info
->mph_stackshot_offset
= PE_get_offset_into_panic_region(stackshot_begin_loc
);
792 panic_info
->mph_stackshot_len
= (uint32_t) bytes_traced
;
794 panic_info
->mph_stackshot_offset
= panic_info
->mph_stackshot_len
= 0;
797 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
798 if (stackshot_flags
& STACKSHOT_DO_COMPRESS
) {
799 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED
;
800 kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu (Uncompressed %zu) **\n", bytes_traced
, bytes_uncompressed
);
802 kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu **\n", bytes_traced
);
805 /* Used by the code that writes the buffer to disk */
806 panic_stackshot_buf
= (vm_offset_t
) stackshot_begin_loc
;
807 panic_stackshot_len
= bytes_traced
;
809 if (!extended_debug_log_enabled
&& (gIOPolledCoreFileMode
== kIOPolledCoreFileModeStackshot
)) {
810 /* System configured to write panic stackshot to disk */
811 kern_dump(KERN_DUMP_STACKSHOT_DISK
);
814 if (bytes_used
> 0) {
815 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
816 memset(stackshot_begin_loc
, '\0', bytes_used
);
817 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE
;
819 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
820 kdb_printf("\n** In Memory Panic Stackshot Incomplete ** Bytes Filled %zu ** Err %d\n", bytes_used
, err
);
822 bzero(stackshot_begin_loc
, bytes_used
);
823 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
825 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
826 kdb_printf("\n** In Memory Panic Stackshot Failed ** Bytes Traced %zu, err %d\n", bytes_traced
, err
);
835 __unused
const char *message
, void *panic_data
, uint64_t panic_options
)
837 void *stackptr
= NULL
;
838 thread_t thread_to_trace
= (thread_t
) panic_data
;
839 cframe_t synthetic_stack_frame
= { };
840 char *debugger_msg
= NULL
;
841 int cn
= cpu_number();
844 * Issue an I/O port read if one has been requested - this is an event logic
845 * analyzers can use as a trigger point.
849 /* Obtain frame pointer for stack to trace */
850 if (panic_options
& DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE
) {
851 if (!mp_kdp_all_cpus_halted()) {
852 debugger_msg
= "Backtracing panicked thread because failed to halt all CPUs\n";
853 } else if (thread_to_trace
== THREAD_NULL
) {
854 debugger_msg
= "Backtracing panicked thread because no thread pointer provided\n";
855 } else if (kvtophys((vm_offset_t
)thread_to_trace
) == 0ULL) {
856 debugger_msg
= "Backtracing panicked thread because unable to access specified thread\n";
857 } else if (thread_to_trace
->kernel_stack
== 0) {
858 debugger_msg
= "Backtracing panicked thread because kernel_stack is NULL for specified thread\n";
859 } else if (kvtophys(STACK_IKS(thread_to_trace
->kernel_stack
) == 0ULL)) {
860 debugger_msg
= "Backtracing panicked thread because unable to access kernel_stack for specified thread\n";
862 debugger_msg
= "Backtracing specified thread\n";
863 /* We construct a synthetic stack frame so we can include the current instruction pointer */
864 synthetic_stack_frame
.prev
= (cframe_t
*)STACK_IKS(thread_to_trace
->kernel_stack
)->k_rbp
;
865 synthetic_stack_frame
.caller
= (uintptr_t) STACK_IKS(thread_to_trace
->kernel_stack
)->k_rip
;
866 stackptr
= (void *) &synthetic_stack_frame
;
870 if (stackptr
== NULL
) {
871 __asm__
volatile ("movq %%rbp, %0" : "=m" (stackptr
));
874 /* Print backtrace - callee is internally synchronized */
875 if (panic_options
& DEBUGGER_OPTION_INITPROC_PANIC
) {
876 /* Special handling of launchd died panics */
877 print_launchd_info();
879 panic_i386_backtrace(stackptr
, ((panic_double_fault_cpu
== cn
) ? 80 : 48), debugger_msg
, FALSE
, NULL
);
882 if (panic_options
& DEBUGGER_OPTION_COPROC_INITIATED_PANIC
) {
883 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC
;
886 if (PE_get_offset_into_panic_region(debug_buf_ptr
) < panic_info
->mph_panic_log_offset
) {
887 kdb_printf("Invalid panic log offset found (not properly initialized?): debug_buf_ptr : 0x%p, panic_info: 0x%p mph_panic_log_offset: 0x%x\n",
888 debug_buf_ptr
, panic_info
, panic_info
->mph_panic_log_offset
);
889 panic_info
->mph_panic_log_len
= 0;
891 panic_info
->mph_panic_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->mph_panic_log_offset
;
894 /* Flush the panic log */
895 paniclog_flush_internal(kPaniclogFlushBase
);
897 /* Try to take a panic stackshot */
898 RecordPanicStackshot();
901 * Flush the panic log again with the stackshot or any relevant logging
902 * from when we tried to capture it.
904 paniclog_flush_internal(kPaniclogFlushStackshot
);
908 paniclog_flush_internal(paniclog_flush_type_t variant
)
910 /* Update the other log offset if we've opened the other log */
911 if (panic_info
->mph_other_log_offset
!= 0) {
912 panic_info
->mph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->mph_other_log_offset
;
916 * If we've detected that we're on a co-processor system, we flush the panic log via the kPEPanicSync
917 * panic callbacks, otherwise we flush via nvram (unless that has been disabled).
919 if (coprocessor_paniclog_flush
) {
920 uint32_t overall_buffer_size
= debug_buf_size
;
921 uint32_t size_to_flush
= 0, offset_to_flush
= 0;
922 if (extended_debug_log_enabled
) {
924 * debug_buf_size for the extended log does not include the length of the header.
925 * There may be some extra data at the end of the 'basic' log that wouldn't get flushed
926 * for the non-extended case (this is a concession we make to not shrink the paniclog data
927 * for non-coprocessor systems that only use the basic log).
929 overall_buffer_size
= debug_buf_size
+ sizeof(struct macos_panic_header
);
933 panic_info
->mph_crc
= crc32(0L, &panic_info
->mph_version
, (overall_buffer_size
- offsetof(struct macos_panic_header
, mph_version
)));
935 if (variant
== kPaniclogFlushBase
) {
936 /* Flush the header and base panic log. */
937 kprintf("Flushing base panic log\n");
938 size_to_flush
= ROUNDUP((panic_info
->mph_panic_log_offset
+ panic_info
->mph_panic_log_len
), PANIC_FLUSH_BOUNDARY
);
940 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
941 } else if ((variant
== kPaniclogFlushStackshot
) || (variant
== kPaniclogFlushOtherLog
)) {
942 if (variant
== kPaniclogFlushStackshot
) {
944 * We flush the stackshot before flushing the updated header because the stackshot
945 * can take a while to flush. We want the paniclog header to be as consistent as possible even
946 * if the stackshot isn't flushed completely. Flush starting from the end of the panic log.
948 kprintf("Flushing panic log stackshot\n");
949 offset_to_flush
= ROUNDDOWN((panic_info
->mph_panic_log_offset
+ panic_info
->mph_panic_log_len
), PANIC_FLUSH_BOUNDARY
);
950 size_to_flush
= ROUNDUP((panic_info
->mph_stackshot_len
+ (panic_info
->mph_stackshot_offset
- offset_to_flush
)), PANIC_FLUSH_BOUNDARY
);
951 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
954 /* Flush the other log -- everything after the stackshot */
955 kprintf("Flushing panic 'other' log\n");
956 offset_to_flush
= ROUNDDOWN((panic_info
->mph_stackshot_offset
+ panic_info
->mph_stackshot_len
), PANIC_FLUSH_BOUNDARY
);
957 size_to_flush
= ROUNDUP((panic_info
->mph_other_log_len
+ (panic_info
->mph_other_log_offset
- offset_to_flush
)), PANIC_FLUSH_BOUNDARY
);
958 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
960 /* Flush the header -- everything before the paniclog */
961 kprintf("Flushing panic log header\n");
962 size_to_flush
= ROUNDUP(panic_info
->mph_panic_log_offset
, PANIC_FLUSH_BOUNDARY
);
964 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
966 } else if (commit_paniclog_to_nvram
) {
967 assert(debug_buf_size
!= 0);
969 unsigned long pi_size
= 0;
975 * Now call the compressor
976 * XXX Consider using the WKdm compressor in the
977 * future, rather than just packing - would need to
978 * be co-ordinated with crashreporter, which decodes
979 * this post-restart. The compressor should be
980 * capable of in-place compression.
982 * Don't include the macOS panic header (for co-processor systems only)
984 bufpos
= packA(debug_buf_base
, (unsigned int) (debug_buf_ptr
- debug_buf_base
),
987 * If compression was successful, use the compressed length
989 pi_size
= bufpos
? bufpos
: (unsigned) (debug_buf_ptr
- debug_buf_base
);
992 * The following sequence is a workaround for:
993 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
994 * any routines that use floating point (MMX in this case) when saving panic
995 * logs to nvram/flash.
1001 * Save panic log to non-volatile store
1002 * Panic info handler must truncate data that is
1003 * too long for this platform.
1004 * This call must save data synchronously,
1005 * since we can subsequently halt the system.
1007 kprintf("Attempting to commit panic log to NVRAM\n");
1008 pi_size
= PESavePanicInfo((unsigned char *)debug_buf_base
,
1009 (uint32_t)pi_size
);
1013 * Uncompress in-place, to permit examination of
1014 * the panic log by debuggers.
1017 unpackA(debug_buf_base
, bufpos
);
1025 /* Called outside of this file to update logging appended to the "other" log */
1026 paniclog_flush_internal(kPaniclogFlushOtherLog
);
1031 machine_boot_info(char *buf
, __unused vm_size_t size
)
1037 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1038 * boot-arg is supplied.
1042 panic_print_macho_symbol_name(kernel_mach_header_t
*mh
, vm_address_t search
, const char *module_name
)
1044 kernel_nlist_t
*sym
= NULL
;
1045 struct load_command
*cmd
;
1046 kernel_segment_command_t
*orig_ts
= NULL
, *orig_le
= NULL
;
1047 struct symtab_command
*orig_st
= NULL
;
1049 char *strings
, *bestsym
= NULL
;
1050 vm_address_t bestaddr
= 0, diff
, curdiff
;
1052 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1054 cmd
= (struct load_command
*) &mh
[1];
1055 for (i
= 0; i
< mh
->ncmds
; i
++) {
1056 if (cmd
->cmd
== LC_SEGMENT_KERNEL
) {
1057 kernel_segment_command_t
*orig_sg
= (kernel_segment_command_t
*) cmd
;
1059 if (strncmp(SEG_TEXT
, orig_sg
->segname
,
1060 sizeof(orig_sg
->segname
)) == 0) {
1062 } else if (strncmp(SEG_LINKEDIT
, orig_sg
->segname
,
1063 sizeof(orig_sg
->segname
)) == 0) {
1065 } else if (strncmp("", orig_sg
->segname
,
1066 sizeof(orig_sg
->segname
)) == 0) {
1067 orig_ts
= orig_sg
; /* pre-Lion i386 kexts have a single unnamed segment */
1069 } else if (cmd
->cmd
== LC_SYMTAB
) {
1070 orig_st
= (struct symtab_command
*) cmd
;
1073 cmd
= (struct load_command
*) ((uintptr_t) cmd
+ cmd
->cmdsize
);
1076 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
)) {
1080 if ((search
< orig_ts
->vmaddr
) ||
1081 (search
>= orig_ts
->vmaddr
+ orig_ts
->vmsize
)) {
1082 /* search out of range for this mach header */
1086 sym
= (kernel_nlist_t
*)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->symoff
- orig_le
->fileoff
);
1087 strings
= (char *)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->stroff
- orig_le
->fileoff
);
1090 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
1091 if (sym
[i
].n_type
& N_STAB
) {
1095 if (sym
[i
].n_value
<= search
) {
1096 curdiff
= search
- (vm_address_t
)sym
[i
].n_value
;
1097 if (curdiff
< diff
) {
1099 bestaddr
= sym
[i
].n_value
;
1100 bestsym
= strings
+ sym
[i
].n_un
.n_strx
;
1105 if (bestsym
!= NULL
) {
1107 paniclog_append_noflush("%s : %s + 0x%lx", module_name
, bestsym
, (unsigned long)diff
);
1109 paniclog_append_noflush("%s : %s", module_name
, bestsym
);
1117 panic_display_uptime(void)
1120 absolutetime_to_nanoseconds(mach_absolute_time(), &uptime
);
1122 paniclog_append_noflush("\nSystem uptime in nanoseconds: %llu\n", uptime
);
1125 extern uint32_t gIOHibernateCount
;
1128 panic_display_hib_count(void)
1130 paniclog_append_noflush("Hibernation exit count: %u\n", gIOHibernateCount
);
1133 extern AbsoluteTime gIOLastSleepAbsTime
;
1134 extern AbsoluteTime gIOLastWakeAbsTime
;
1135 extern uint64_t gAcpiLastSleepTscBase
;
1136 extern uint64_t gAcpiLastSleepNanoBase
;
1137 extern uint64_t gAcpiLastWakeTscBase
;
1138 extern uint64_t gAcpiLastWakeNanoBase
;
1139 extern boolean_t is_clock_configured
;
1142 panic_display_times(void)
1144 if (!is_clock_configured
) {
1145 paniclog_append_noflush("Warning: clock is not configured. Can't get time\n");
1149 paniclog_append_noflush("Last Sleep: absolute base_tsc base_nano\n");
1150 paniclog_append_noflush(" Uptime : 0x%016llx\n", mach_absolute_time());
1151 paniclog_append_noflush(" Sleep : 0x%016llx 0x%016llx 0x%016llx\n", gIOLastSleepAbsTime
, gAcpiLastSleepTscBase
, gAcpiLastSleepNanoBase
);
1152 paniclog_append_noflush(" Wake : 0x%016llx 0x%016llx 0x%016llx\n", gIOLastWakeAbsTime
, gAcpiLastWakeTscBase
, gAcpiLastWakeNanoBase
);
1156 panic_display_disk_errors(void)
1158 if (panic_disk_error_description
[0]) {
1159 panic_disk_error_description
[panic_disk_error_description_size
- 1] = '\0';
1160 paniclog_append_noflush("Root disk errors: \"%s\"\n", panic_disk_error_description
);
1165 panic_display_shutdown_status(void)
1167 #if defined(__i386__) || defined(__x86_64__)
1168 paniclog_append_noflush("System shutdown begun: %s\n", IOPMRootDomainGetWillShutdown() ? "YES" : "NO");
1169 if (gIOPolledCoreFileMode
== kIOPolledCoreFileModeNotInitialized
) {
1170 paniclog_append_noflush("Panic diags file unavailable, panic occurred prior to initialization\n");
1171 } else if (gIOPolledCoreFileMode
!= kIOPolledCoreFileModeDisabled
) {
1173 * If we haven't marked the corefile as explicitly disabled, and we've made it past initialization, then we know the current
1174 * system was configured to use disk based diagnostics at some point.
1176 paniclog_append_noflush("Panic diags file available: %s (0x%x)\n", (gIOPolledCoreFileMode
!= kIOPolledCoreFileModeClosed
) ? "YES" : "NO", kdp_polled_corefile_error());
1181 extern const char version
[];
1182 extern char osversion
[];
1184 static volatile uint32_t config_displayed
= 0;
1187 panic_display_system_configuration(boolean_t launchd_exit
)
1189 if (!launchd_exit
) {
1190 panic_display_process_name();
1192 if (OSCompareAndSwap(0, 1, &config_displayed
)) {
1194 if (!launchd_exit
&& strlcpy(buf
, PE_boot_args(), sizeof(buf
))) {
1195 paniclog_append_noflush("Boot args: %s\n", buf
);
1197 paniclog_append_noflush("\nMac OS version:\n%s\n",
1198 (osversion
[0] != 0) ? osversion
: "Not yet set");
1199 paniclog_append_noflush("\nKernel version:\n%s\n", version
);
1200 panic_display_kernel_uuid();
1201 if (!launchd_exit
) {
1202 panic_display_kernel_aslr();
1203 panic_display_hibb();
1204 panic_display_pal_info();
1206 panic_display_model_name();
1207 panic_display_disk_errors();
1208 panic_display_shutdown_status();
1209 if (!launchd_exit
) {
1210 panic_display_hib_count();
1211 panic_display_uptime();
1212 panic_display_times();
1213 panic_display_zprint();
1215 panic_display_ztrace();
1216 #endif /* CONFIG_ZLEAKS */
1217 kext_dump_panic_lists(&paniclog_append_noflush
);
1222 extern kmod_info_t
* kmod
; /* the list of modules */
1225 panic_print_kmod_symbol_name(vm_address_t search
)
1229 if (gLoadedKextSummaries
== NULL
) {
1232 for (i
= 0; i
< gLoadedKextSummaries
->numSummaries
; ++i
) {
1233 OSKextLoadedKextSummary
*summary
= gLoadedKextSummaries
->summaries
+ i
;
1235 if ((search
>= summary
->address
) &&
1236 (search
< (summary
->address
+ summary
->size
))) {
1237 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)(uintptr_t) summary
->address
;
1238 if (panic_print_macho_symbol_name(header
, search
, summary
->name
) == 0) {
1239 paniclog_append_noflush("%s + %llu", summary
->name
, (unsigned long)search
- summary
->address
);
1247 panic_print_symbol_name(vm_address_t search
)
1249 /* try searching in the kernel */
1250 if (panic_print_macho_symbol_name(&_mh_execute_header
, search
, "mach_kernel") == 0) {
1251 /* that failed, now try to search for the right kext */
1252 panic_print_kmod_symbol_name(search
);
1256 /* Generate a backtrace, given a frame pointer - this routine
1257 * should walk the stack safely. The trace is appended to the panic log
1258 * and conditionally, to the console. If the trace contains kernel module
1259 * addresses, display the module name, load address and dependencies.
1262 #define DUMPFRAMES 32
1263 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1265 panic_i386_backtrace(void *_frame
, int nframes
, const char *msg
, boolean_t regdump
, x86_saved_state_t
*regs
)
1267 cframe_t
*frame
= (cframe_t
*)_frame
;
1268 vm_offset_t raddrs
[DUMPFRAMES
];
1271 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1272 uint64_t bt_tsc_timeout
;
1273 boolean_t keepsyms
= FALSE
;
1274 int cn
= cpu_number();
1275 boolean_t old_doprnt_hide_pointers
= doprnt_hide_pointers
;
1277 #if DEVELOPMENT || DEBUG
1278 /* Turn off I/O tracing now that we're panicking */
1279 mmiotrace_enabled
= 0;
1283 os_atomic_inc(&pbtcnt
, relaxed
);
1284 /* Spin on print backtrace lock, which serializes output
1285 * Continue anyway if a timeout occurs.
1287 hw_lock_to(&pbtlock
, ~0U, LCK_GRP_NULL
);
1291 if (__improbable(doprnt_hide_pointers
== TRUE
)) {
1292 /* If we're called directly, the Debugger() function will not be called,
1293 * so we need to reset the value in here. */
1294 doprnt_hide_pointers
= FALSE
;
1299 PE_parse_boot_argn("keepsyms", &keepsyms
, sizeof(keepsyms
));
1302 paniclog_append_noflush("%s", msg
);
1305 if ((regdump
== TRUE
) && (regs
!= NULL
)) {
1306 x86_saved_state64_t
*ss64p
= saved_state64(regs
);
1307 paniclog_append_noflush(
1308 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1309 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1310 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1311 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1312 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1313 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
1314 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
1315 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
1316 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
1317 ss64p
->isf
.rflags
, ss64p
->isf
.rip
, ss64p
->isf
.cs
,
1319 PC
= ss64p
->isf
.rip
;
1322 paniclog_append_noflush("Backtrace (CPU %d), "
1323 #if PRINT_ARGS_FROM_STACK_FRAME
1324 "Frame : Return Address (4 potential args on stack)\n", cn
);
1326 "Frame : Return Address\n", cn
);
1329 for (frame_index
= 0; frame_index
< nframes
; frame_index
++) {
1330 vm_offset_t curframep
= (vm_offset_t
) frame
;
1336 if (curframep
& 0x3) {
1337 paniclog_append_noflush("Unaligned frame\n");
1341 if (!kvtophys(curframep
) ||
1342 !kvtophys(curframep
+ sizeof(cframe_t
) - 1)) {
1343 paniclog_append_noflush("No mapping exists for frame pointer\n");
1347 paniclog_append_noflush("%p : 0x%lx ", frame
, frame
->caller
);
1348 if (frame_index
< DUMPFRAMES
) {
1349 raddrs
[frame_index
] = frame
->caller
;
1352 #if PRINT_ARGS_FROM_STACK_FRAME
1353 if (kvtophys((vm_offset_t
)&(frame
->args
[3]))) {
1354 paniclog_append_noflush("(0x%x 0x%x 0x%x 0x%x) ",
1355 frame
->args
[0], frame
->args
[1],
1356 frame
->args
[2], frame
->args
[3]);
1360 /* Display address-symbol translation only if the "keepsyms"
1361 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1362 * This routine is potentially unsafe; also, function
1363 * boundary identification is unreliable after a strip -x.
1366 panic_print_symbol_name((vm_address_t
)frame
->caller
);
1369 paniclog_append_noflush("\n");
1371 frame
= frame
->prev
;
1374 if (frame_index
>= nframes
) {
1375 paniclog_append_noflush("\tBacktrace continues...\n");
1381 paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n", frame
);
1384 /* Identify kernel modules in the backtrace and display their
1385 * load addresses and dependencies. This routine should walk
1386 * the kmod list safely.
1389 kmod_panic_dump((vm_offset_t
*)&raddrs
[0], frame_index
);
1393 kmod_panic_dump(&PC
, 1);
1396 panic_display_system_configuration(FALSE
);
1398 doprnt_hide_pointers
= old_doprnt_hide_pointers
;
1400 /* Release print backtrace lock, to permit other callers in the
1401 * event of panics on multiple processors.
1403 hw_lock_unlock(&pbtlock
);
1404 os_atomic_dec(&pbtcnt
, relaxed
);
1405 /* Wait for other processors to complete output
1406 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1408 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1409 while (*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
)) {
1415 debug_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
)
1418 char *kvaddr
= dest
;
1421 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1422 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1423 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1424 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1425 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1426 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1427 cur_size
= MIN(cur_size
, rem
);
1429 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1430 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1442 print_threads_registers(thread_t thread
)
1444 x86_saved_state_t
*savestate
;
1446 savestate
= get_user_regs(thread
);
1447 paniclog_append_noflush(
1448 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1449 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1450 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1451 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1452 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1453 savestate
->ss_64
.rax
, savestate
->ss_64
.rbx
, savestate
->ss_64
.rcx
, savestate
->ss_64
.rdx
,
1454 savestate
->ss_64
.isf
.rsp
, savestate
->ss_64
.rbp
, savestate
->ss_64
.rsi
, savestate
->ss_64
.rdi
,
1455 savestate
->ss_64
.r8
, savestate
->ss_64
.r9
, savestate
->ss_64
.r10
, savestate
->ss_64
.r11
,
1456 savestate
->ss_64
.r12
, savestate
->ss_64
.r13
, savestate
->ss_64
.r14
, savestate
->ss_64
.r15
,
1457 savestate
->ss_64
.isf
.rflags
, savestate
->ss_64
.isf
.rip
, savestate
->ss_64
.isf
.cs
,
1458 savestate
->ss_64
.isf
.ss
);
1462 print_tasks_user_threads(task_t task
)
1464 thread_t thread
= current_thread();
1465 x86_saved_state_t
*savestate
;
1468 const char *cur_marker
= 0;
1471 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1472 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1473 paniclog_append_noflush("Thread %d: %p\n", j
, thread
);
1474 pmap
= get_task_pmap(task
);
1475 savestate
= get_user_regs(thread
);
1476 rbp
= savestate
->ss_64
.rbp
;
1477 paniclog_append_noflush("\t0x%016llx\n", savestate
->ss_64
.isf
.rip
);
1478 print_one_backtrace(pmap
, (vm_offset_t
)rbp
, cur_marker
, TRUE
);
1479 paniclog_append_noflush("\n");
1484 print_thread_num_that_crashed(task_t task
)
1486 thread_t c_thread
= current_thread();
1490 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1491 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1492 if (c_thread
== thread
) {
1493 paniclog_append_noflush("\nThread %d crashed\n", j
);
1499 #define PANICLOG_UUID_BUF_SIZE 256
1502 print_uuid_info(task_t task
)
1504 uint32_t uuid_info_count
= 0;
1505 mach_vm_address_t uuid_info_addr
= 0;
1506 boolean_t have_map
= (task
->map
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
1507 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
1508 int task_pid
= pid_from_task(task
);
1509 char uuidbuf
[PANICLOG_UUID_BUF_SIZE
] = {0};
1510 char *uuidbufptr
= uuidbuf
;
1513 if (have_pmap
&& task
->active
&& task_pid
> 0) {
1514 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1515 struct user64_dyld_all_image_infos task_image_infos
;
1516 if (debug_copyin(task
->map
->pmap
, task
->all_image_info_addr
,
1517 &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1518 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1519 uuid_info_addr
= task_image_infos
.uuidArray
;
1522 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1523 * in the middle of updating this data structure), we zero the
1524 * uuid_info_count so that we won't even try to save load info for this task
1526 if (!uuid_info_addr
) {
1527 uuid_info_count
= 0;
1531 if (task_pid
> 0 && uuid_info_count
> 0) {
1532 uint32_t uuid_info_size
= sizeof(struct user64_dyld_uuid_info
);
1533 uint32_t uuid_array_size
= uuid_info_count
* uuid_info_size
;
1534 uint32_t uuid_copy_size
= 0;
1535 uint32_t uuid_image_count
= 0;
1536 char *current_uuid_buffer
= NULL
;
1537 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1539 paniclog_append_noflush("\nuuid info:\n");
1540 while (uuid_array_size
) {
1541 if (uuid_array_size
<= PANICLOG_UUID_BUF_SIZE
) {
1542 uuid_copy_size
= uuid_array_size
;
1543 uuid_image_count
= uuid_array_size
/ uuid_info_size
;
1545 uuid_image_count
= PANICLOG_UUID_BUF_SIZE
/ uuid_info_size
;
1546 uuid_copy_size
= uuid_image_count
* uuid_info_size
;
1548 if (have_pmap
&& !debug_copyin(task
->map
->pmap
, uuid_info_addr
, uuidbufptr
,
1550 paniclog_append_noflush("Error!! Failed to copy UUID info for task %p pid %d\n", task
, task_pid
);
1551 uuid_image_count
= 0;
1555 if (uuid_image_count
> 0) {
1556 current_uuid_buffer
= uuidbufptr
;
1557 for (k
= 0; k
< uuid_image_count
; k
++) {
1558 paniclog_append_noflush(" %#llx", *(uint64_t *)current_uuid_buffer
);
1559 current_uuid_buffer
+= sizeof(uint64_t);
1560 uint8_t *uuid
= (uint8_t *)current_uuid_buffer
;
1561 paniclog_append_noflush("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1562 uuid
[0], uuid
[1], uuid
[2], uuid
[3], uuid
[4], uuid
[5], uuid
[6], uuid
[7], uuid
[8],
1563 uuid
[9], uuid
[10], uuid
[11], uuid
[12], uuid
[13], uuid
[14], uuid
[15]);
1564 current_uuid_buffer
+= 16;
1566 bzero(&uuidbuf
, sizeof(uuidbuf
));
1568 uuid_info_addr
+= uuid_copy_size
;
1569 uuid_array_size
-= uuid_copy_size
;
1575 print_launchd_info(void)
1577 task_t task
= current_task();
1578 thread_t thread
= current_thread();
1579 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1580 uint64_t bt_tsc_timeout
;
1581 int cn
= cpu_number();
1584 os_atomic_inc(&pbtcnt
, relaxed
);
1585 /* Spin on print backtrace lock, which serializes output
1586 * Continue anyway if a timeout occurs.
1588 hw_lock_to(&pbtlock
, ~0U, LCK_GRP_NULL
);
1592 print_uuid_info(task
);
1593 print_thread_num_that_crashed(task
);
1594 print_threads_registers(thread
);
1595 print_tasks_user_threads(task
);
1597 panic_display_system_configuration(TRUE
);
1599 /* Release print backtrace lock, to permit other callers in the
1600 * event of panics on multiple processors.
1602 hw_lock_unlock(&pbtlock
);
1603 os_atomic_dec(&pbtcnt
, relaxed
);
1604 /* Wait for other processors to complete output
1605 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1607 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1608 while (*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
)) {
1614 * Compares 2 EFI GUIDs. Returns true if they match.
1617 efi_compare_guids(EFI_GUID
*guid1
, EFI_GUID
*guid2
)
1619 return (bcmp(guid1
, guid2
, sizeof(EFI_GUID
)) == 0) ? true : false;
1623 * Converts from an efiboot-originated virtual address to a physical
1626 static inline uint64_t
1627 efi_efiboot_virtual_to_physical(uint64_t addr
)
1629 if (addr
>= VM_MIN_KERNEL_ADDRESS
) {
1630 return addr
& (0x40000000ULL
- 1);
1637 * Convers from a efiboot-originated virtual address to an accessible
1638 * pointer to that physical address by translating it to a physmap-relative
1642 efi_efiboot_virtual_to_physmap_virtual(uint64_t addr
)
1644 return PHYSMAP_PTOV(efi_efiboot_virtual_to_physical(addr
));
1648 * Returns the physical address of the firmware table identified
1649 * by the passed-in GUID, or 0 if the table could not be located.
1652 efi_get_cfgtbl_by_guid(EFI_GUID
*guidp
)
1654 EFI_CONFIGURATION_TABLE_64
*cfg_table_entp
, *cfgTable
;
1655 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
1656 EFI_SYSTEM_TABLE_64
*estp
;
1657 uint32_t i
, hdr_cksum
, cksum
;
1659 estp
= (EFI_SYSTEM_TABLE_64
*)efi_efiboot_virtual_to_physmap_virtual(args
->efiSystemTable
);
1663 // Verify signature of the system table
1664 hdr_cksum
= estp
->Hdr
.CRC32
;
1665 estp
->Hdr
.CRC32
= 0;
1666 cksum
= crc32(0L, estp
, estp
->Hdr
.HeaderSize
);
1667 estp
->Hdr
.CRC32
= hdr_cksum
;
1669 if (cksum
!= hdr_cksum
) {
1670 DPRINTF("efi_get_cfgtbl_by_guid: EST CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
1671 DPRINTF("Bad EFI system table checksum\n");
1676 * efiboot can (and will) change the address of ConfigurationTable (and each table's VendorTable address)
1677 * to a kernel-virtual address. Reverse that to get the physical address, which we then use to get a
1678 * physmap-based virtual address.
1680 cfgTable
= (EFI_CONFIGURATION_TABLE_64
*)efi_efiboot_virtual_to_physmap_virtual(estp
->ConfigurationTable
);
1682 for (i
= 0; i
< estp
->NumberOfTableEntries
; i
++) {
1683 cfg_table_entp
= (EFI_CONFIGURATION_TABLE_64
*)&cfgTable
[i
];
1685 DPRINTF("EST: Comparing GUIDs for entry %d\n", i
);
1686 if (cfg_table_entp
== 0) {
1690 if (efi_compare_guids(&cfg_table_entp
->VendorGuid
, guidp
) == true) {
1691 DPRINTF("GUID match: returning %p\n", (void *)(uintptr_t)cfg_table_entp
->VendorTable
);
1692 return efi_efiboot_virtual_to_physical(cfg_table_entp
->VendorTable
);
1701 * Returns the physical address of the RSDP (either v1 or >=v2) or 0
1702 * if the RSDP could not be located.
1705 efi_get_rsdp_physaddr(void)
1708 #define ACPI_RSDP_GUID \
1709 { 0xeb9d2d30, 0x2d88, 0x11d3, {0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d} }
1710 #define ACPI_20_RSDP_GUID \
1711 { 0x8868e871, 0xe4f1, 0x11d3, {0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81} }
1713 static EFI_GUID EFI_RSDP_GUID_ACPI20
= ACPI_20_RSDP_GUID
;
1714 static EFI_GUID EFI_RSDP_GUID_ACPI10
= ACPI_RSDP_GUID
;
1716 if ((rsdp_addr
= efi_get_cfgtbl_by_guid(&EFI_RSDP_GUID_ACPI20
)) == 0) {
1717 DPRINTF("RSDP ACPI 2.0 lookup failed. Trying RSDP ACPI 1.0...\n");
1718 rsdp_addr
= efi_get_cfgtbl_by_guid(&EFI_RSDP_GUID_ACPI10
);
1719 if (rsdp_addr
== 0) {
1720 DPRINTF("RSDP ACPI 1.0 lookup failed also.\n");