2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
66 * Basic initialization for I386 - ISA bus machines.
70 #define __APPLE_API_PRIVATE 1
71 #define __APPLE_API_UNSTABLE 1
72 #include <kern/debug.h>
74 #include <mach/i386/vm_param.h>
77 #include <mach/vm_param.h>
78 #include <mach/vm_prot.h>
79 #include <mach/machine.h>
80 #include <mach/time_value.h>
81 #include <sys/kdebug.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/startup.h>
86 #include <kern/clock.h>
87 #include <kern/cpu_data.h>
88 #include <kern/machine.h>
89 #include <i386/postcode.h>
90 #include <i386/mp_desc.h>
91 #include <i386/misc_protos.h>
92 #include <i386/thread.h>
93 #include <i386/trap.h>
94 #include <i386/machine_routines.h>
95 #include <i386/mp.h> /* mp_rendezvous_break_lock */
96 #include <i386/cpuid.h>
98 #include <i386/machine_cpu.h>
99 #include <i386/pmap.h>
101 #include <i386/mtrr.h>
103 #include <i386/ucode.h>
104 #include <i386/pmCPU.h>
105 #include <i386/panic_hooks.h>
107 #include <architecture/i386/pio.h> /* inb() */
108 #include <pexpert/i386/boot.h>
110 #include <kdp/kdp_dyld.h>
111 #include <kdp/kdp_core.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_kern.h>
116 #include <IOKit/IOPlatformExpert.h>
117 #include <IOKit/IOHibernatePrivate.h>
119 #include <pexpert/i386/efi.h>
121 #include <kern/thread.h>
122 #include <kern/sched.h>
123 #include <mach-o/loader.h>
124 #include <mach-o/nlist.h>
126 #include <libkern/kernel_mach_header.h>
127 #include <libkern/OSKextLibPrivate.h>
129 #include <mach/branch_predicates.h>
131 #if DEBUG || DEVELOPMENT
132 #define DPRINTF(x...) kprintf(x)
134 #define DPRINTF(x...)
138 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
142 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
145 static void machine_conf(void);
146 void panic_print_symbol_name(vm_address_t search
);
147 void RecordPanicStackshot(void);
149 typedef enum paniclog_flush_type
{
150 kPaniclogFlushBase
= 1, /* Flush the initial log and paniclog header */
151 kPaniclogFlushStackshot
= 2, /* Flush only the stackshot data, then flush the header */
152 kPaniclogFlushOtherLog
= 3 /* Flush the other log, then flush the header */
153 } paniclog_flush_type_t
;
155 void paniclog_flush_internal(paniclog_flush_type_t variant
);
157 extern const char version
[];
158 extern char osversion
[];
159 extern int max_unsafe_quanta
;
160 extern int max_poll_quanta
;
161 extern unsigned int panic_is_inited
;
163 extern int proc_pid(void *p
);
165 /* Definitions for frame pointers */
166 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
167 #define FP_LR_OFFSET ((uint32_t)4)
168 #define FP_LR_OFFSET64 ((uint32_t)8)
169 #define FP_MAX_NUM_TO_EVALUATE (50)
171 volatile int pbtcpu
= -1;
172 hw_lock_data_t pbtlock
; /* backtrace print lock */
175 volatile int panic_double_fault_cpu
= -1;
177 #define PRINT_ARGS_FROM_STACK_FRAME 0
179 typedef struct _cframe_t
{
180 struct _cframe_t
*prev
;
182 #if PRINT_ARGS_FROM_STACK_FRAME
187 static unsigned panic_io_port
;
188 static unsigned commit_paniclog_to_nvram
;
189 boolean_t coprocessor_paniclog_flush
= FALSE
;
191 struct kcdata_descriptor kc_panic_data
;
192 static boolean_t begun_panic_stackshot
= FALSE
;
193 extern kern_return_t
do_stackshot(void *);
195 extern void kdp_snapshot_preflight(int pid
, void *tracebuf
,
196 uint32_t tracebuf_size
, uint32_t flags
,
197 kcdata_descriptor_t data_p
,
198 boolean_t enable_faulting
);
199 extern int kdp_stack_snapshot_bytes_traced(void);
201 #if DEVELOPMENT || DEBUG
202 vm_offset_t panic_stackshot_buf
= 0;
203 size_t panic_stackshot_len
= 0;
207 * Backtrace a single frame.
210 print_one_backtrace(pmap_t pmap
, vm_offset_t topfp
, const char *cur_marker
,
218 boolean_t dump_kernel_stack
;
224 if (fp
>= VM_MIN_KERNEL_ADDRESS
)
225 dump_kernel_stack
= TRUE
;
227 dump_kernel_stack
= FALSE
;
230 if ((fp
== 0) || ((fp
& FP_ALIGNMENT_MASK
) != 0))
232 if (dump_kernel_stack
&& ((fp
< VM_MIN_KERNEL_ADDRESS
) || (fp
> VM_MAX_KERNEL_ADDRESS
)))
234 if ((!dump_kernel_stack
) && (fp
>=VM_MIN_KERNEL_ADDRESS
))
237 /* Check to see if current address will result in a different
238 ppn than previously computed (to avoid recomputation) via
239 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
241 if ((((fp
+ FP_LR_OFFSET
) ^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
242 ppn
= pmap_find_phys(pmap
, fp
+ FP_LR_OFFSET
);
243 fp_for_ppn
= fp
+ (is_64_bit
? FP_LR_OFFSET64
: FP_LR_OFFSET
);
245 if (ppn
!= (ppnum_t
)NULL
) {
247 lr
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET64
) & PAGE_MASK
));
249 lr
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET
) & PAGE_MASK
));
253 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker
, fp
+ FP_LR_OFFSET64
);
255 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker
, (uint32_t)(fp
+ FP_LR_OFFSET
));
259 if (((fp
^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
260 ppn
= pmap_find_phys(pmap
, fp
);
263 if (ppn
!= (ppnum_t
)NULL
) {
265 fp
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
267 fp
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
271 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker
, fp
);
273 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker
, (uint32_t)fp
);
279 paniclog_append_noflush("%s\t0x%016llx\n", cur_marker
, lr
);
281 paniclog_append_noflush("%s\t0x%08x\n", cur_marker
, (uint32_t)lr
);
283 } while ((++i
< FP_MAX_NUM_TO_EVALUATE
) && (fp
!= topfp
));
286 machine_startup(void)
291 if( PE_get_hotkey( kPEControlKey
))
292 halt_in_debugger
= halt_in_debugger
? 0 : 1;
295 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram
, sizeof (commit_paniclog_to_nvram
)))
296 commit_paniclog_to_nvram
= 1;
299 * Entering the debugger will put the CPUs into a "safe"
302 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg
, sizeof (boot_arg
)))
303 pmsafe_debug
= boot_arg
;
305 hw_lock_init(&pbtlock
); /* initialize print backtrace lock */
307 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
308 default_preemption_rate
= boot_arg
;
310 if (PE_parse_boot_argn("unsafe", &boot_arg
, sizeof (boot_arg
))) {
311 max_unsafe_quanta
= boot_arg
;
313 if (PE_parse_boot_argn("poll", &boot_arg
, sizeof (boot_arg
))) {
314 max_poll_quanta
= boot_arg
;
316 if (PE_parse_boot_argn("yield", &boot_arg
, sizeof (boot_arg
))) {
317 sched_poll_yield_shift
= boot_arg
;
319 /* The I/O port to issue a read from, in the event of a panic. Useful for
320 * triggering logic analyzers.
322 if (PE_parse_boot_argn("panic_io_port", &boot_arg
, sizeof (boot_arg
))) {
323 /*I/O ports range from 0 through 0xFFFF */
324 panic_io_port
= boot_arg
& 0xffff;
342 machine_info
.memory_size
= (typeof(machine_info
.memory_size
))mem_size
;
346 extern void *gPEEFIRuntimeServices
;
347 extern void *gPEEFISystemTable
;
350 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
351 * code or tables extracted from it, as desired without restriction.
353 * First, the polynomial itself and its table of feedback terms. The
355 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
357 * Note that we take it "backwards" and put the highest-order term in
358 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
359 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
362 * Note that the usual hardware shift register implementation, which
363 * is what we're using (we're merely optimizing it by doing eight-bit
364 * chunks at a time) shifts bits into the lowest-order term. In our
365 * implementation, that means shifting towards the right. Why do we
366 * do it this way? Because the calculated CRC must be transmitted in
367 * order from highest-order term to lowest-order term. UARTs transmit
368 * characters in order from LSB to MSB. By storing the CRC this way
369 * we hand it to the UART in the order low-byte to high-byte; the UART
370 * sends each low-bit to hight-bit; and the result is transmission bit
371 * by bit from highest- to lowest-order term without requiring any bit
372 * shuffling on our part. Reception works similarly
374 * The feedback terms table consists of 256, 32-bit entries. Notes
376 * The table can be generated at runtime if desired; code to do so
377 * is shown later. It might not be obvious, but the feedback
378 * terms simply represent the results of eight shift/xor opera
379 * tions for all combinations of data and CRC register values
381 * The values must be right-shifted by eight bits by the "updcrc
382 * logic; the shift must be unsigned (bring in zeroes). On some
383 * hardware you could probably optimize the shift in assembler by
384 * using byte-swap instructions
385 * polynomial $edb88320
388 * CRC32 code derived from work by Gary S. Brown.
391 static uint32_t crc32_tab
[] = {
392 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
393 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
394 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
395 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
396 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
397 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
398 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
399 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
400 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
401 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
402 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
403 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
404 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
405 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
406 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
407 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
408 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
409 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
410 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
411 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
412 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
413 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
414 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
415 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
416 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
417 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
418 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
419 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
420 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
421 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
422 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
423 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
424 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
425 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
426 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
427 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
428 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
429 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
430 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
431 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
432 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
433 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
434 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
438 crc32(uint32_t crc
, const void *buf
, size_t size
)
446 crc
= crc32_tab
[(crc
^ *p
++) & 0xFF] ^ (crc
>> 8);
452 efi_set_tables_64(EFI_SYSTEM_TABLE_64
* system_table
)
454 EFI_RUNTIME_SERVICES_64
*runtime
;
458 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table
);
460 DPRINTF("Header:\n");
461 DPRINTF(" Signature: 0x%016llx\n", system_table
->Hdr
.Signature
);
462 DPRINTF(" Revision: 0x%08x\n", system_table
->Hdr
.Revision
);
463 DPRINTF(" HeaderSize: 0x%08x\n", system_table
->Hdr
.HeaderSize
);
464 DPRINTF(" CRC32: 0x%08x\n", system_table
->Hdr
.CRC32
);
465 DPRINTF("RuntimeServices: 0x%016llx\n", system_table
->RuntimeServices
);
466 if (system_table
->Hdr
.Signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
467 kprintf("Bad EFI system table signature\n");
470 // Verify signature of the system table
471 hdr_cksum
= system_table
->Hdr
.CRC32
;
472 system_table
->Hdr
.CRC32
= 0;
473 cksum
= crc32(0L, system_table
, system_table
->Hdr
.HeaderSize
);
475 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
476 system_table
->Hdr
.CRC32
= hdr_cksum
;
477 if (cksum
!= hdr_cksum
) {
478 kprintf("Bad EFI system table checksum\n");
482 gPEEFISystemTable
= system_table
;
484 if(system_table
->RuntimeServices
== 0) {
485 kprintf("No runtime table present\n");
488 DPRINTF("RuntimeServices table at 0x%qx\n", system_table
->RuntimeServices
);
489 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
490 runtime
= (EFI_RUNTIME_SERVICES_64
*) (uintptr_t)system_table
->RuntimeServices
;
491 DPRINTF("Checking runtime services table %p\n", runtime
);
492 if (runtime
->Hdr
.Signature
!= EFI_RUNTIME_SERVICES_SIGNATURE
) {
493 kprintf("Bad EFI runtime table signature\n");
497 // Verify signature of runtime services table
498 hdr_cksum
= runtime
->Hdr
.CRC32
;
499 runtime
->Hdr
.CRC32
= 0;
500 cksum
= crc32(0L, runtime
, runtime
->Hdr
.HeaderSize
);
502 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
503 runtime
->Hdr
.CRC32
= hdr_cksum
;
504 if (cksum
!= hdr_cksum
) {
505 kprintf("Bad EFI runtime table checksum\n");
509 gPEEFIRuntimeServices
= runtime
;
515 efi_set_tables_32(EFI_SYSTEM_TABLE_32
* system_table
)
517 EFI_RUNTIME_SERVICES_32
*runtime
;
521 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table
);
523 DPRINTF("Header:\n");
524 DPRINTF(" Signature: 0x%016llx\n", system_table
->Hdr
.Signature
);
525 DPRINTF(" Revision: 0x%08x\n", system_table
->Hdr
.Revision
);
526 DPRINTF(" HeaderSize: 0x%08x\n", system_table
->Hdr
.HeaderSize
);
527 DPRINTF(" CRC32: 0x%08x\n", system_table
->Hdr
.CRC32
);
528 DPRINTF("RuntimeServices: 0x%08x\n", system_table
->RuntimeServices
);
529 if (system_table
->Hdr
.Signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
530 kprintf("Bad EFI system table signature\n");
533 // Verify signature of the system table
534 hdr_cksum
= system_table
->Hdr
.CRC32
;
535 system_table
->Hdr
.CRC32
= 0;
536 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table
, system_table
->Hdr
.HeaderSize
);
537 cksum
= crc32(0L, system_table
, system_table
->Hdr
.HeaderSize
);
539 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
540 system_table
->Hdr
.CRC32
= hdr_cksum
;
541 if (cksum
!= hdr_cksum
) {
542 kprintf("Bad EFI system table checksum\n");
546 gPEEFISystemTable
= system_table
;
548 if(system_table
->RuntimeServices
== 0) {
549 kprintf("No runtime table present\n");
552 DPRINTF("RuntimeServices table at 0x%x\n", system_table
->RuntimeServices
);
553 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
554 // For a 64-bit kernel, booter provides a virtual address mod 4G
555 runtime
= (EFI_RUNTIME_SERVICES_32
*)
556 (system_table
->RuntimeServices
| VM_MIN_KERNEL_ADDRESS
);
557 DPRINTF("Runtime table addressed at %p\n", runtime
);
558 if (runtime
->Hdr
.Signature
!= EFI_RUNTIME_SERVICES_SIGNATURE
) {
559 kprintf("Bad EFI runtime table signature\n");
563 // Verify signature of runtime services table
564 hdr_cksum
= runtime
->Hdr
.CRC32
;
565 runtime
->Hdr
.CRC32
= 0;
566 cksum
= crc32(0L, runtime
, runtime
->Hdr
.HeaderSize
);
568 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
569 runtime
->Hdr
.CRC32
= hdr_cksum
;
570 if (cksum
!= hdr_cksum
) {
571 kprintf("Bad EFI runtime table checksum\n");
575 DPRINTF("Runtime functions\n");
576 DPRINTF(" GetTime : 0x%x\n", runtime
->GetTime
);
577 DPRINTF(" SetTime : 0x%x\n", runtime
->SetTime
);
578 DPRINTF(" GetWakeupTime : 0x%x\n", runtime
->GetWakeupTime
);
579 DPRINTF(" SetWakeupTime : 0x%x\n", runtime
->SetWakeupTime
);
580 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime
->SetVirtualAddressMap
);
581 DPRINTF(" ConvertPointer : 0x%x\n", runtime
->ConvertPointer
);
582 DPRINTF(" GetVariable : 0x%x\n", runtime
->GetVariable
);
583 DPRINTF(" GetNextVariableName : 0x%x\n", runtime
->GetNextVariableName
);
584 DPRINTF(" SetVariable : 0x%x\n", runtime
->SetVariable
);
585 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime
->GetNextHighMonotonicCount
);
586 DPRINTF(" ResetSystem : 0x%x\n", runtime
->ResetSystem
);
588 gPEEFIRuntimeServices
= runtime
;
594 /* Map in EFI runtime areas. */
598 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
600 kprintf("Initializing EFI runtime services\n");
604 vm_offset_t vm_size
, vm_addr
;
605 vm_map_offset_t phys_addr
;
606 EfiMemoryRange
*mptr
;
607 unsigned int msize
, mcount
;
610 msize
= args
->MemoryMapDescriptorSize
;
611 mcount
= args
->MemoryMapSize
/ msize
;
613 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
614 args
->kaddr
, args
->ksize
);
615 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
616 args
->efiSystemTable
,
617 (void *) ml_static_ptovirt(args
->efiSystemTable
));
618 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
619 args
->efiRuntimeServicesPageStart
);
620 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
621 args
->efiRuntimeServicesPageCount
);
622 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
623 args
->efiRuntimeServicesVirtualPageStart
);
624 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
625 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
626 if (((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) ) {
627 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
628 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
629 /* For K64 on EFI32, shadow-map into high KVA */
630 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
631 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
632 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
633 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
635 (void *) (uintptr_t) phys_addr
,
636 (void *) (uintptr_t) mptr
->VirtualStart
,
639 pmap_map_bd(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
640 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
|VM_PROT_WRITE
,
641 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
645 if (args
->Version
!= kBootArgsVersion2
)
646 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
648 DPRINTF("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
649 if (args
->efiMode
== kBootArgsEfiMode64
) {
650 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
652 efi_set_tables_32((EFI_SYSTEM_TABLE_32
*) ml_static_ptovirt(args
->efiSystemTable
));
660 /* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */
662 efi_valid_page(ppnum_t ppn
)
664 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
665 ppnum_t pstart
= args
->efiRuntimeServicesPageStart
;
666 ppnum_t pend
= pstart
+ args
->efiRuntimeServicesPageCount
;
668 return pstart
<= ppn
&& ppn
< pend
;
671 /* Remap EFI runtime areas. */
673 hibernate_newruntime_map(void * map
, vm_size_t map_size
, uint32_t system_table_offset
)
675 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
677 kprintf("Reinitializing EFI runtime services\n");
681 vm_offset_t vm_size
, vm_addr
;
682 vm_map_offset_t phys_addr
;
683 EfiMemoryRange
*mptr
;
684 unsigned int msize
, mcount
;
687 gPEEFISystemTable
= 0;
688 gPEEFIRuntimeServices
= 0;
690 system_table_offset
+= ptoa_32(args
->efiRuntimeServicesPageStart
);
692 kprintf("Old system table 0x%x, new 0x%x\n",
693 (uint32_t)args
->efiSystemTable
, system_table_offset
);
695 args
->efiSystemTable
= system_table_offset
;
697 kprintf("Old map:\n");
698 msize
= args
->MemoryMapDescriptorSize
;
699 mcount
= args
->MemoryMapSize
/ msize
;
700 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
701 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
702 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
704 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
705 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
707 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
708 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
709 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
711 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
715 pmap_remove(kernel_pmap
, i386_ptob(args
->efiRuntimeServicesPageStart
),
716 i386_ptob(args
->efiRuntimeServicesPageStart
+ args
->efiRuntimeServicesPageCount
));
718 kprintf("New map:\n");
719 msize
= args
->MemoryMapDescriptorSize
;
720 mcount
= (unsigned int )(map_size
/ msize
);
722 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
723 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
725 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
726 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
727 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
728 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
729 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
731 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
733 pmap_map(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
734 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
|VM_PROT_WRITE
,
735 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
739 if (args
->Version
!= kBootArgsVersion2
)
740 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
742 kprintf("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
743 if (args
->efiMode
== kBootArgsEfiMode64
) {
744 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
746 efi_set_tables_32((EFI_SYSTEM_TABLE_32
*) ml_static_ptovirt(args
->efiSystemTable
));
751 kprintf("Done reinitializing EFI runtime services\n");
757 * Find devices. The system is alive.
762 /* Now with VM up, switch to dynamically allocated cpu data */
765 /* Ensure panic buffer is initialized. */
769 * Display CPU identification
771 cpuid_cpu_display("CPU identification");
772 cpuid_feature_display("CPU features");
773 cpuid_extfeature_display("CPU extended features");
776 * Initialize EFI runtime services.
783 * Set up to use floating point.
788 * Configure clock devices.
794 * Initialize MTRR from boot processor.
799 * Set up PAT for boot processor.
805 * Free lowmem pages and complete other setup
807 pmap_lowmem_finalize();
816 halt_all_cpus(FALSE
);
819 int reset_mem_on_reboot
= 1;
822 * Halt the system or reboot.
824 __attribute__((noreturn
))
826 halt_all_cpus(boolean_t reboot
)
829 printf("MACH Reboot\n");
830 PEHaltRestart( kPERestartCPU
);
832 printf("CPU halted\n");
833 PEHaltRestart( kPEHaltCPU
);
839 /* Issue an I/O port read if one has been requested - this is an event logic
840 * analyzers can use as a trigger point.
844 panic_io_port_read(void) {
846 (void)inb(panic_io_port
);
849 /* For use with the MP rendezvous mechanism
852 uint64_t panic_restart_timeout
= ~(0ULL);
854 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
857 * We should always return from this function with the other log offset
858 * set in the panic_info structure.
861 RecordPanicStackshot()
863 int err
= 0, bytes_traced
= 0, bytes_used
= 0, bytes_remaining
= 0;
864 char *stackshot_begin_loc
= NULL
;
866 /* Don't re-enter this code if we panic here */
867 if (begun_panic_stackshot
) {
868 if (panic_info
->mph_other_log_offset
== 0) {
869 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
873 begun_panic_stackshot
= TRUE
;
875 /* The panic log length should have been set before we came to capture a stackshot */
876 if (panic_info
->mph_panic_log_len
== 0) {
877 kdb_printf("Found zero length panic log, skipping capturing panic stackshot\n");
878 if (panic_info
->mph_other_log_offset
== 0) {
879 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
885 * Try to capture an in memory panic_stackshot (enabled during boot
886 * on systems with co-processors).
888 if (extended_debug_log_enabled
) {
889 if (stackshot_active()) {
890 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED
;
891 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
892 kdb_printf("Panicked during stackshot, skipping panic stackshot\n");
895 stackshot_begin_loc
= debug_buf_ptr
;
897 bytes_remaining
= debug_buf_size
- (unsigned int)((uintptr_t)stackshot_begin_loc
- (uintptr_t)debug_buf_base
);
898 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)stackshot_begin_loc
,
899 KCDATA_BUFFER_BEGIN_STACKSHOT
, bytes_remaining
, KCFLAG_USE_MEMCOPY
);
900 if (err
!= KERN_SUCCESS
) {
901 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
902 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
903 kdb_printf("Failed to initialize kcdata buffer for in-memory panic stackshot, skipping ...\n");
907 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc
, bytes_remaining
,
908 (STACKSHOT_SAVE_KEXT_LOADINFO
| STACKSHOT_SAVE_LOADINFO
| STACKSHOT_KCDATA_FORMAT
|
909 STACKSHOT_ENABLE_BT_FAULTING
| STACKSHOT_ENABLE_UUID_FAULTING
| STACKSHOT_FROM_PANIC
|
910 STACKSHOT_NO_IO_STATS
| STACKSHOT_THREAD_WAITINFO
), &kc_panic_data
, 0);
911 err
= do_stackshot(NULL
);
912 bytes_traced
= (int) kdp_stack_snapshot_bytes_traced();
913 bytes_used
= (int) kcdata_memory_get_used_bytes(&kc_panic_data
);
915 if ((err
!= KERN_SUCCESS
) && (bytes_used
> 0)) {
917 * We ran out of space while trying to capture a stackshot, try again without user frames.
918 * It's not safe to log from here, but append a flag to the panic flags.
920 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_KERNEL_ONLY
;
921 panic_stackshot_reset_state();
923 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
924 memset(stackshot_begin_loc
, '\0', bytes_used
);
926 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)stackshot_begin_loc
,
927 KCDATA_BUFFER_BEGIN_STACKSHOT
, bytes_remaining
, KCFLAG_USE_MEMCOPY
);
928 if (err
!= KERN_SUCCESS
) {
929 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
930 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
931 kdb_printf("Failed to re-initialize kcdata buffer for kernel only in-memory panic stackshot, skipping ...\n");
935 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc
, bytes_remaining
, (STACKSHOT_KCDATA_FORMAT
|
936 STACKSHOT_NO_IO_STATS
| STACKSHOT_SAVE_KEXT_LOADINFO
| STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY
|
937 STACKSHOT_FROM_PANIC
| STACKSHOT_THREAD_WAITINFO
), &kc_panic_data
, 0);
938 err
= do_stackshot(NULL
);
939 bytes_traced
= (int) kdp_stack_snapshot_bytes_traced();
940 bytes_used
= (int) kcdata_memory_get_used_bytes(&kc_panic_data
);
943 if (err
== KERN_SUCCESS
) {
944 debug_buf_ptr
+= bytes_traced
;
945 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED
;
946 panic_info
->mph_stackshot_offset
= PE_get_offset_into_panic_region(stackshot_begin_loc
);
947 panic_info
->mph_stackshot_len
= bytes_traced
;
949 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
950 kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced
);
952 if (bytes_used
> 0) {
953 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
954 memset(stackshot_begin_loc
, '\0', bytes_used
);
955 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE
;
957 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
958 kdb_printf("\n** In Memory Panic Stackshot Incomplete ** Bytes Filled %d ** Err %d\n", bytes_used
, err
);
960 bzero(stackshot_begin_loc
, bytes_used
);
961 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
963 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
964 kdb_printf("\n** In Memory Panic Stackshot Failed ** Bytes Traced %d, err %d\n", bytes_traced
, err
);
968 #if DEVELOPMENT || DEBUG
969 if (panic_stackshot_buf
!= 0) {
970 /* We're going to try to take another stackshot, reset the state. */
971 panic_stackshot_reset_state();
973 #endif /* DEVELOPMENT || DEBUG */
975 panic_info
->mph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
978 #if DEVELOPMENT || DEBUG
980 if (panic_stackshot_buf
== 0) {
981 kdb_printf("No stackshot buffer allocated for file backed panic stackshot, skipping...\n");
985 if (stackshot_active()) {
986 kdb_printf("Panicked during stackshot, skipping file backed panic stackshot\n");
990 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)panic_stackshot_buf
, KCDATA_BUFFER_BEGIN_STACKSHOT
,
991 PANIC_STACKSHOT_BUFSIZE
, KCFLAG_USE_MEMCOPY
);
992 if (err
!= KERN_SUCCESS
) {
993 kdb_printf("Failed to initialize kcdata buffer for file backed panic stackshot, skipping ...\n");
997 kdp_snapshot_preflight(-1, (void *) panic_stackshot_buf
, PANIC_STACKSHOT_BUFSIZE
, (STACKSHOT_GET_GLOBAL_MEM_STATS
| STACKSHOT_SAVE_LOADINFO
| STACKSHOT_KCDATA_FORMAT
|
998 STACKSHOT_ENABLE_BT_FAULTING
| STACKSHOT_ENABLE_UUID_FAULTING
| STACKSHOT_FROM_PANIC
| STACKSHOT_NO_IO_STATS
999 | STACKSHOT_THREAD_WAITINFO
), &kc_panic_data
, 0);
1000 err
= do_stackshot(NULL
);
1001 bytes_traced
= (int) kdp_stack_snapshot_bytes_traced();
1002 if (bytes_traced
> 0 && !err
) {
1003 panic_stackshot_len
= bytes_traced
;
1004 kdb_printf("File backed panic stackshot succeeded, length: %u bytes\n", bytes_traced
);
1006 bytes_used
= (int) kcdata_memory_get_used_bytes(&kc_panic_data
);
1007 if (bytes_used
> 0) {
1008 kdb_printf("File backed panic stackshot incomplete, consumed %u bytes, error : %d \n", bytes_used
, err
);
1010 kdb_printf("File backed panic stackshot incomplete, consumed %u bytes, error : %d \n", bytes_used
, err
);
1013 #endif /* DEVELOPMENT || DEBUG */
1020 __unused
const char *message
, uint64_t panic_options
)
1023 int cn
= cpu_number();
1026 * Issue an I/O port read if one has been requested - this is an event logic
1027 * analyzers can use as a trigger point.
1029 panic_io_port_read();
1031 /* Obtain current frame pointer */
1032 __asm__
volatile("movq %%rbp, %0" : "=m" (stackptr
));
1034 /* Print backtrace - callee is internally synchronized */
1035 if (panic_options
& DEBUGGER_OPTION_INITPROC_PANIC
) {
1036 /* Special handling of launchd died panics */
1037 print_launchd_info();
1039 panic_i386_backtrace(stackptr
, ((panic_double_fault_cpu
== cn
) ? 80: 48), NULL
, FALSE
, NULL
);
1042 if (panic_options
& DEBUGGER_OPTION_COPROC_INITIATED_PANIC
) {
1043 panic_info
->mph_panic_flags
|= MACOS_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC
;
1046 if (PE_get_offset_into_panic_region(debug_buf_ptr
) < panic_info
->mph_panic_log_offset
) {
1047 kdb_printf("Invalid panic log offset found (not properly initialized?): debug_buf_ptr : 0x%p, panic_info: 0x%p mph_panic_log_offset: 0x%x\n",
1048 debug_buf_ptr
, panic_info
, panic_info
->mph_panic_log_offset
);
1049 panic_info
->mph_panic_log_len
= 0;
1051 panic_info
->mph_panic_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->mph_panic_log_offset
;
1054 /* Flush the panic log */
1055 paniclog_flush_internal(kPaniclogFlushBase
);
1057 /* Try to take a panic stackshot */
1058 RecordPanicStackshot();
1061 * Flush the panic log again with the stackshot or any relevant logging
1062 * from when we tried to capture it.
1064 if (extended_debug_log_enabled
) {
1065 paniclog_flush_internal(kPaniclogFlushStackshot
);
1069 void paniclog_flush_internal(paniclog_flush_type_t variant
)
1071 /* Update the other log offset if we've opened the other log */
1072 if (panic_info
->mph_other_log_offset
!= 0) {
1073 panic_info
->mph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->mph_other_log_offset
;
1077 * If we've detected that we're on a co-processor system, we flush the panic log via the kPEPanicSync
1078 * panic callbacks, otherwise we flush via nvram (unless that has been disabled).
1080 if (coprocessor_paniclog_flush
) {
1081 uint32_t overall_buffer_size
= debug_buf_size
;
1082 uint32_t size_to_flush
= 0, offset_to_flush
= 0;
1083 if (extended_debug_log_enabled
) {
1085 * debug_buf_size for the extended log does not include the length of the header.
1086 * There may be some extra data at the end of the 'basic' log that wouldn't get flushed
1087 * for the non-extended case (this is a concession we make to not shrink the paniclog data
1088 * for non-coprocessor systems that only use the basic log).
1090 overall_buffer_size
= debug_buf_size
+ sizeof(struct macos_panic_header
);
1093 /* Update the CRC */
1094 panic_info
->mph_crc
= crc32(0L, &panic_info
->mph_version
, (overall_buffer_size
- offsetof(struct macos_panic_header
, mph_version
)));
1096 if (variant
== kPaniclogFlushBase
) {
1097 /* Flush the header and base panic log. */
1098 kprintf("Flushing base panic log\n");
1099 size_to_flush
= ROUNDUP((panic_info
->mph_panic_log_offset
+ panic_info
->mph_panic_log_len
), PANIC_FLUSH_BOUNDARY
);
1100 offset_to_flush
= 0;
1101 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
1102 } else if ((variant
== kPaniclogFlushStackshot
) || (variant
== kPaniclogFlushOtherLog
)) {
1103 if (variant
== kPaniclogFlushStackshot
) {
1105 * We flush the stackshot before flushing the updated header because the stackshot
1106 * can take a while to flush. We want the paniclog header to be as consistent as possible even
1107 * if the stackshot isn't flushed completely. Flush starting from the end of the panic log.
1109 kprintf("Flushing panic log stackshot\n");
1110 offset_to_flush
= ROUNDDOWN((panic_info
->mph_panic_log_offset
+ panic_info
->mph_panic_log_len
), PANIC_FLUSH_BOUNDARY
);
1111 size_to_flush
= ROUNDUP((panic_info
->mph_stackshot_len
+ (panic_info
->mph_stackshot_offset
- offset_to_flush
)), PANIC_FLUSH_BOUNDARY
);
1112 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
1115 /* Flush the other log -- everything after the stackshot */
1116 kprintf("Flushing panic 'other' log\n");
1117 offset_to_flush
= ROUNDDOWN((panic_info
->mph_stackshot_offset
+ panic_info
->mph_stackshot_len
), PANIC_FLUSH_BOUNDARY
);
1118 size_to_flush
= ROUNDUP((panic_info
->mph_other_log_len
+ (panic_info
->mph_other_log_offset
- offset_to_flush
)), PANIC_FLUSH_BOUNDARY
);
1119 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
1121 /* Flush the header -- everything before the paniclog */
1122 kprintf("Flushing panic log header\n");
1123 size_to_flush
= ROUNDUP(panic_info
->mph_panic_log_offset
, PANIC_FLUSH_BOUNDARY
);
1124 offset_to_flush
= 0;
1125 PESavePanicInfoAction(panic_info
, offset_to_flush
, size_to_flush
);
1127 } else if (commit_paniclog_to_nvram
) {
1128 assert(debug_buf_size
!= 0);
1129 unsigned int bufpos
;
1130 unsigned long pi_size
= 0;
1136 * Now call the compressor
1137 * XXX Consider using the WKdm compressor in the
1138 * future, rather than just packing - would need to
1139 * be co-ordinated with crashreporter, which decodes
1140 * this post-restart. The compressor should be
1141 * capable of in-place compression.
1143 * Don't include the macOS panic header (for co-processor systems only)
1145 bufpos
= packA(debug_buf_base
, (unsigned int) (debug_buf_ptr
- debug_buf_base
),
1148 * If compression was successful, use the compressed length
1150 pi_size
= bufpos
? bufpos
: (unsigned) (debug_buf_ptr
- debug_buf_base
);
1153 * The following sequence is a workaround for:
1154 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
1155 * any routines that use floating point (MMX in this case) when saving panic
1156 * logs to nvram/flash.
1162 * Save panic log to non-volatile store
1163 * Panic info handler must truncate data that is
1164 * too long for this platform.
1165 * This call must save data synchronously,
1166 * since we can subsequently halt the system.
1168 kprintf("Attempting to commit panic log to NVRAM\n");
1169 pi_size
= PESavePanicInfo((unsigned char *)debug_buf_base
,
1170 (uint32_t)pi_size
);
1174 * Uncompress in-place, to permit examination of
1175 * the panic log by debuggers.
1178 unpackA(debug_buf_base
, bufpos
);
1186 /* Called outside of this file to update logging appended to the "other" log */
1187 paniclog_flush_internal(kPaniclogFlushOtherLog
);
1192 machine_boot_info(char *buf
, __unused vm_size_t size
)
1198 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1199 * boot-arg is supplied.
1203 panic_print_macho_symbol_name(kernel_mach_header_t
*mh
, vm_address_t search
, const char *module_name
)
1205 kernel_nlist_t
*sym
= NULL
;
1206 struct load_command
*cmd
;
1207 kernel_segment_command_t
*orig_ts
= NULL
, *orig_le
= NULL
;
1208 struct symtab_command
*orig_st
= NULL
;
1210 char *strings
, *bestsym
= NULL
;
1211 vm_address_t bestaddr
= 0, diff
, curdiff
;
1213 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1215 cmd
= (struct load_command
*) &mh
[1];
1216 for (i
= 0; i
< mh
->ncmds
; i
++) {
1217 if (cmd
->cmd
== LC_SEGMENT_KERNEL
) {
1218 kernel_segment_command_t
*orig_sg
= (kernel_segment_command_t
*) cmd
;
1220 if (strncmp(SEG_TEXT
, orig_sg
->segname
,
1221 sizeof(orig_sg
->segname
)) == 0)
1223 else if (strncmp(SEG_LINKEDIT
, orig_sg
->segname
,
1224 sizeof(orig_sg
->segname
)) == 0)
1226 else if (strncmp("", orig_sg
->segname
,
1227 sizeof(orig_sg
->segname
)) == 0)
1228 orig_ts
= orig_sg
; /* pre-Lion i386 kexts have a single unnamed segment */
1230 else if (cmd
->cmd
== LC_SYMTAB
)
1231 orig_st
= (struct symtab_command
*) cmd
;
1233 cmd
= (struct load_command
*) ((uintptr_t) cmd
+ cmd
->cmdsize
);
1236 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
))
1239 if ((search
< orig_ts
->vmaddr
) ||
1240 (search
>= orig_ts
->vmaddr
+ orig_ts
->vmsize
)) {
1241 /* search out of range for this mach header */
1245 sym
= (kernel_nlist_t
*)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->symoff
- orig_le
->fileoff
);
1246 strings
= (char *)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->stroff
- orig_le
->fileoff
);
1249 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
1250 if (sym
[i
].n_type
& N_STAB
) continue;
1252 if (sym
[i
].n_value
<= search
) {
1253 curdiff
= search
- (vm_address_t
)sym
[i
].n_value
;
1254 if (curdiff
< diff
) {
1256 bestaddr
= sym
[i
].n_value
;
1257 bestsym
= strings
+ sym
[i
].n_un
.n_strx
;
1262 if (bestsym
!= NULL
) {
1264 paniclog_append_noflush("%s : %s + 0x%lx", module_name
, bestsym
, (unsigned long)diff
);
1266 paniclog_append_noflush("%s : %s", module_name
, bestsym
);
1273 extern kmod_info_t
* kmod
; /* the list of modules */
1276 panic_print_kmod_symbol_name(vm_address_t search
)
1280 if (gLoadedKextSummaries
== NULL
)
1282 for (i
= 0; i
< gLoadedKextSummaries
->numSummaries
; ++i
) {
1283 OSKextLoadedKextSummary
*summary
= gLoadedKextSummaries
->summaries
+ i
;
1285 if ((search
>= summary
->address
) &&
1286 (search
< (summary
->address
+ summary
->size
)))
1288 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)(uintptr_t) summary
->address
;
1289 if (panic_print_macho_symbol_name(header
, search
, summary
->name
) == 0) {
1290 paniclog_append_noflush("%s + %llu", summary
->name
, (unsigned long)search
- summary
->address
);
1298 panic_print_symbol_name(vm_address_t search
)
1300 /* try searching in the kernel */
1301 if (panic_print_macho_symbol_name(&_mh_execute_header
, search
, "mach_kernel") == 0) {
1302 /* that failed, now try to search for the right kext */
1303 panic_print_kmod_symbol_name(search
);
1307 /* Generate a backtrace, given a frame pointer - this routine
1308 * should walk the stack safely. The trace is appended to the panic log
1309 * and conditionally, to the console. If the trace contains kernel module
1310 * addresses, display the module name, load address and dependencies.
1313 #define DUMPFRAMES 32
1314 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1316 panic_i386_backtrace(void *_frame
, int nframes
, const char *msg
, boolean_t regdump
, x86_saved_state_t
*regs
)
1318 cframe_t
*frame
= (cframe_t
*)_frame
;
1319 vm_offset_t raddrs
[DUMPFRAMES
];
1322 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1323 uint64_t bt_tsc_timeout
;
1324 boolean_t keepsyms
= FALSE
;
1325 int cn
= cpu_number();
1326 boolean_t old_doprnt_hide_pointers
= doprnt_hide_pointers
;
1329 hw_atomic_add(&pbtcnt
, 1);
1330 /* Spin on print backtrace lock, which serializes output
1331 * Continue anyway if a timeout occurs.
1333 hw_lock_to(&pbtlock
, ~0U);
1337 if (__improbable(doprnt_hide_pointers
== TRUE
)) {
1338 /* If we're called directly, the Debugger() function will not be called,
1339 * so we need to reset the value in here. */
1340 doprnt_hide_pointers
= FALSE
;
1345 PE_parse_boot_argn("keepsyms", &keepsyms
, sizeof (keepsyms
));
1348 paniclog_append_noflush("%s", msg
);
1351 if ((regdump
== TRUE
) && (regs
!= NULL
)) {
1352 x86_saved_state64_t
*ss64p
= saved_state64(regs
);
1353 paniclog_append_noflush(
1354 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1355 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1356 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1357 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1358 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1359 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
1360 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
1361 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
1362 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
1363 ss64p
->isf
.rflags
, ss64p
->isf
.rip
, ss64p
->isf
.cs
,
1365 PC
= ss64p
->isf
.rip
;
1368 paniclog_append_noflush("Backtrace (CPU %d), "
1369 #if PRINT_ARGS_FROM_STACK_FRAME
1370 "Frame : Return Address (4 potential args on stack)\n", cn
);
1372 "Frame : Return Address\n", cn
);
1375 for (frame_index
= 0; frame_index
< nframes
; frame_index
++) {
1376 vm_offset_t curframep
= (vm_offset_t
) frame
;
1381 if (curframep
& 0x3) {
1382 paniclog_append_noflush("Unaligned frame\n");
1386 if (!kvtophys(curframep
) ||
1387 !kvtophys(curframep
+ sizeof(cframe_t
) - 1)) {
1388 paniclog_append_noflush("No mapping exists for frame pointer\n");
1392 paniclog_append_noflush("%p : 0x%lx ", frame
, frame
->caller
);
1393 if (frame_index
< DUMPFRAMES
)
1394 raddrs
[frame_index
] = frame
->caller
;
1396 #if PRINT_ARGS_FROM_STACK_FRAME
1397 if (kvtophys((vm_offset_t
)&(frame
->args
[3])))
1398 paniclog_append_noflush("(0x%x 0x%x 0x%x 0x%x) ",
1399 frame
->args
[0], frame
->args
[1],
1400 frame
->args
[2], frame
->args
[3]);
1403 /* Display address-symbol translation only if the "keepsyms"
1404 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1405 * This routine is potentially unsafe; also, function
1406 * boundary identification is unreliable after a strip -x.
1409 panic_print_symbol_name((vm_address_t
)frame
->caller
);
1411 paniclog_append_noflush("\n");
1413 frame
= frame
->prev
;
1416 if (frame_index
>= nframes
)
1417 paniclog_append_noflush("\tBacktrace continues...\n");
1422 paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n",frame
);
1425 /* Identify kernel modules in the backtrace and display their
1426 * load addresses and dependencies. This routine should walk
1427 * the kmod list safely.
1430 kmod_panic_dump((vm_offset_t
*)&raddrs
[0], frame_index
);
1433 kmod_panic_dump(&PC
, 1);
1435 panic_display_system_configuration(FALSE
);
1437 doprnt_hide_pointers
= old_doprnt_hide_pointers
;
1439 /* Release print backtrace lock, to permit other callers in the
1440 * event of panics on multiple processors.
1442 hw_lock_unlock(&pbtlock
);
1443 hw_atomic_sub(&pbtcnt
, 1);
1444 /* Wait for other processors to complete output
1445 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1447 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1448 while(*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
));
1452 debug_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
)
1455 char *kvaddr
= dest
;
1458 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1459 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1460 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1461 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1462 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1463 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1464 cur_size
= MIN(cur_size
, rem
);
1466 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1467 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1479 print_threads_registers(thread_t thread
)
1481 x86_saved_state_t
*savestate
;
1483 savestate
= get_user_regs(thread
);
1484 paniclog_append_noflush(
1485 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1486 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1487 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1488 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1489 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1490 savestate
->ss_64
.rax
, savestate
->ss_64
.rbx
, savestate
->ss_64
.rcx
, savestate
->ss_64
.rdx
,
1491 savestate
->ss_64
.isf
.rsp
, savestate
->ss_64
.rbp
, savestate
->ss_64
.rsi
, savestate
->ss_64
.rdi
,
1492 savestate
->ss_64
.r8
, savestate
->ss_64
.r9
, savestate
->ss_64
.r10
, savestate
->ss_64
.r11
,
1493 savestate
->ss_64
.r12
, savestate
->ss_64
.r13
, savestate
->ss_64
.r14
, savestate
->ss_64
.r15
,
1494 savestate
->ss_64
.isf
.rflags
, savestate
->ss_64
.isf
.rip
, savestate
->ss_64
.isf
.cs
,
1495 savestate
->ss_64
.isf
.ss
);
1499 print_tasks_user_threads(task_t task
)
1501 thread_t thread
= current_thread();
1502 x86_saved_state_t
*savestate
;
1505 const char *cur_marker
= 0;
1508 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1509 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1511 paniclog_append_noflush("Thread %d: %p\n", j
, thread
);
1512 pmap
= get_task_pmap(task
);
1513 savestate
= get_user_regs(thread
);
1514 rbp
= savestate
->ss_64
.rbp
;
1515 paniclog_append_noflush("\t0x%016llx\n", savestate
->ss_64
.isf
.rip
);
1516 print_one_backtrace(pmap
, (vm_offset_t
)rbp
, cur_marker
, TRUE
);
1517 paniclog_append_noflush("\n");
1522 print_thread_num_that_crashed(task_t task
)
1524 thread_t c_thread
= current_thread();
1528 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1529 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1531 if (c_thread
== thread
) {
1532 paniclog_append_noflush("\nThread %d crashed\n", j
);
1538 #define PANICLOG_UUID_BUF_SIZE 256
1540 void print_uuid_info(task_t task
)
1542 uint32_t uuid_info_count
= 0;
1543 mach_vm_address_t uuid_info_addr
= 0;
1544 boolean_t have_map
= (task
->map
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
1545 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
1546 int task_pid
= pid_from_task(task
);
1547 char uuidbuf
[PANICLOG_UUID_BUF_SIZE
] = {0};
1548 char *uuidbufptr
= uuidbuf
;
1551 if (have_pmap
&& task
->active
&& task_pid
> 0) {
1552 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1553 struct user64_dyld_all_image_infos task_image_infos
;
1554 if (debug_copyin(task
->map
->pmap
, task
->all_image_info_addr
,
1555 &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1556 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1557 uuid_info_addr
= task_image_infos
.uuidArray
;
1560 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1561 * in the middle of updating this data structure), we zero the
1562 * uuid_info_count so that we won't even try to save load info for this task
1564 if (!uuid_info_addr
) {
1565 uuid_info_count
= 0;
1569 if (task_pid
> 0 && uuid_info_count
> 0) {
1570 uint32_t uuid_info_size
= sizeof(struct user64_dyld_uuid_info
);
1571 uint32_t uuid_array_size
= uuid_info_count
* uuid_info_size
;
1572 uint32_t uuid_copy_size
= 0;
1573 uint32_t uuid_image_count
= 0;
1574 char *current_uuid_buffer
= NULL
;
1575 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1577 paniclog_append_noflush("\nuuid info:\n");
1578 while (uuid_array_size
) {
1579 if (uuid_array_size
<= PANICLOG_UUID_BUF_SIZE
) {
1580 uuid_copy_size
= uuid_array_size
;
1581 uuid_image_count
= uuid_array_size
/uuid_info_size
;
1583 uuid_image_count
= PANICLOG_UUID_BUF_SIZE
/uuid_info_size
;
1584 uuid_copy_size
= uuid_image_count
* uuid_info_size
;
1586 if (have_pmap
&& !debug_copyin(task
->map
->pmap
, uuid_info_addr
, uuidbufptr
,
1588 paniclog_append_noflush("Error!! Failed to copy UUID info for task %p pid %d\n", task
, task_pid
);
1589 uuid_image_count
= 0;
1593 if (uuid_image_count
> 0) {
1594 current_uuid_buffer
= uuidbufptr
;
1595 for (k
= 0; k
< uuid_image_count
; k
++) {
1596 paniclog_append_noflush(" %#llx", *(uint64_t *)current_uuid_buffer
);
1597 current_uuid_buffer
+= sizeof(uint64_t);
1598 uint8_t *uuid
= (uint8_t *)current_uuid_buffer
;
1599 paniclog_append_noflush("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1600 uuid
[0], uuid
[1], uuid
[2], uuid
[3], uuid
[4], uuid
[5], uuid
[6], uuid
[7], uuid
[8],
1601 uuid
[9], uuid
[10], uuid
[11], uuid
[12], uuid
[13], uuid
[14], uuid
[15]);
1602 current_uuid_buffer
+= 16;
1604 bzero(&uuidbuf
, sizeof(uuidbuf
));
1606 uuid_info_addr
+= uuid_copy_size
;
1607 uuid_array_size
-= uuid_copy_size
;
1612 void print_launchd_info(void)
1614 task_t task
= current_task();
1615 thread_t thread
= current_thread();
1616 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1617 uint64_t bt_tsc_timeout
;
1618 int cn
= cpu_number();
1621 hw_atomic_add(&pbtcnt
, 1);
1622 /* Spin on print backtrace lock, which serializes output
1623 * Continue anyway if a timeout occurs.
1625 hw_lock_to(&pbtlock
, ~0U);
1629 print_uuid_info(task
);
1630 print_thread_num_that_crashed(task
);
1631 print_threads_registers(thread
);
1632 print_tasks_user_threads(task
);
1634 panic_display_system_configuration(TRUE
);
1636 /* Release print backtrace lock, to permit other callers in the
1637 * event of panics on multiple processors.
1639 hw_lock_unlock(&pbtlock
);
1640 hw_atomic_sub(&pbtcnt
, 1);
1641 /* Wait for other processors to complete output
1642 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1644 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1645 while(*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
));