2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
66 * Basic initialization for I386 - ISA bus machines.
70 #include <mach/i386/vm_param.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_prot.h>
75 #include <mach/machine.h>
76 #include <mach/time_value.h>
77 #include <sys/kdebug.h>
79 #include <kern/assert.h>
80 #include <kern/debug.h>
81 #include <kern/misc_protos.h>
82 #include <kern/startup.h>
83 #include <kern/clock.h>
84 #include <kern/cpu_data.h>
85 #include <kern/machine.h>
86 #include <i386/postcode.h>
87 #include <i386/mp_desc.h>
88 #include <i386/misc_protos.h>
89 #include <i386/thread.h>
90 #include <i386/trap.h>
91 #include <i386/machine_routines.h>
92 #include <i386/mp.h> /* mp_rendezvous_break_lock */
93 #include <i386/cpuid.h>
95 #include <i386/machine_cpu.h>
96 #include <i386/pmap.h>
98 #include <i386/mtrr.h>
100 #include <i386/ucode.h>
101 #include <i386/pmCPU.h>
102 #include <i386/panic_hooks.h>
104 #include <architecture/i386/pio.h> /* inb() */
105 #include <pexpert/i386/boot.h>
107 #include <kdp/kdp_dyld.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
112 #include <IOKit/IOPlatformExpert.h>
113 #include <IOKit/IOHibernatePrivate.h>
115 #include <pexpert/i386/efi.h>
117 #include <kern/thread.h>
118 #include <kern/sched.h>
119 #include <mach-o/loader.h>
120 #include <mach-o/nlist.h>
122 #include <libkern/kernel_mach_header.h>
123 #include <libkern/OSKextLibPrivate.h>
125 #include <mach/branch_predicates.h>
128 #define DPRINTF(x...) kprintf(x)
130 #define DPRINTF(x...)
133 static void machine_conf(void);
134 void panic_print_symbol_name(vm_address_t search
);
136 extern boolean_t init_task_died
;
137 extern const char version
[];
138 extern char osversion
[];
139 extern int max_unsafe_quanta
;
140 extern int max_poll_quanta
;
141 extern unsigned int panic_is_inited
;
143 extern int proc_pid(void *p
);
145 /* Definitions for frame pointers */
146 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
147 #define FP_LR_OFFSET ((uint32_t)4)
148 #define FP_LR_OFFSET64 ((uint32_t)8)
149 #define FP_MAX_NUM_TO_EVALUATE (50)
153 volatile int pbtcpu
= -1;
154 hw_lock_data_t pbtlock
; /* backtrace print lock */
157 volatile int panic_double_fault_cpu
= -1;
159 #define PRINT_ARGS_FROM_STACK_FRAME 0
161 typedef struct _cframe_t
{
162 struct _cframe_t
*prev
;
164 #if PRINT_ARGS_FROM_STACK_FRAME
169 static unsigned panic_io_port
;
170 static unsigned commit_paniclog_to_nvram
;
172 unsigned int debug_boot_arg
;
175 * Backtrace a single frame.
178 print_one_backtrace(pmap_t pmap
, vm_offset_t topfp
, const char *cur_marker
,
179 boolean_t is_64_bit
, boolean_t nvram_format
)
186 boolean_t dump_kernel_stack
;
192 if (fp
>= VM_MIN_KERNEL_ADDRESS
)
193 dump_kernel_stack
= TRUE
;
195 dump_kernel_stack
= FALSE
;
198 if ((fp
== 0) || ((fp
& FP_ALIGNMENT_MASK
) != 0))
200 if (dump_kernel_stack
&& ((fp
< VM_MIN_KERNEL_ADDRESS
) || (fp
> VM_MAX_KERNEL_ADDRESS
)))
202 if ((!dump_kernel_stack
) && (fp
>=VM_MIN_KERNEL_ADDRESS
))
205 /* Check to see if current address will result in a different
206 ppn than previously computed (to avoid recomputation) via
207 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
209 if ((((fp
+ FP_LR_OFFSET
) ^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
210 ppn
= pmap_find_phys(pmap
, fp
+ FP_LR_OFFSET
);
211 fp_for_ppn
= fp
+ (is_64_bit
? FP_LR_OFFSET64
: FP_LR_OFFSET
);
213 if (ppn
!= (ppnum_t
)NULL
) {
215 lr
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET64
) & PAGE_MASK
));
217 lr
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET
) & PAGE_MASK
));
221 kdb_printf("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker
, fp
+ FP_LR_OFFSET64
);
223 kdb_printf("%s\t Could not read LR from frame at 0x%08x\n", cur_marker
, (uint32_t)(fp
+ FP_LR_OFFSET
));
227 if (((fp
^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
228 ppn
= pmap_find_phys(pmap
, fp
);
231 if (ppn
!= (ppnum_t
)NULL
) {
233 fp
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
235 fp
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
239 kdb_printf("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker
, fp
);
241 kdb_printf("%s\t Could not read FP from frame at 0x%08x\n", cur_marker
, (uint32_t)fp
);
248 kdb_printf("%s\t0x%016llx\n", cur_marker
, lr
);
250 kdb_printf("%s\t0x%08x\n", cur_marker
, (uint32_t)lr
);
254 kdb_printf("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker
, lr
, fp
);
256 kdb_printf("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker
, (uint32_t)lr
, (uint32_t)fp
);
259 } while ((++i
< FP_MAX_NUM_TO_EVALUATE
) && (fp
!= topfp
));
262 machine_startup(void)
267 if( PE_get_hotkey( kPEControlKey
))
268 halt_in_debugger
= halt_in_debugger
? 0 : 1;
271 if (PE_parse_boot_argn("debug", &debug_boot_arg
, sizeof (debug_boot_arg
))) {
272 panicDebugging
= TRUE
;
273 if (debug_boot_arg
& DB_HALT
) halt_in_debugger
=1;
274 if (debug_boot_arg
& DB_PRT
) disable_debug_output
=FALSE
;
275 if (debug_boot_arg
& DB_SLOG
) systemLogDiags
=TRUE
;
276 if (debug_boot_arg
& DB_LOG_PI_SCRN
) logPanicDataToScreen
=TRUE
;
277 #if KDEBUG_MOJO_TRACE
278 if (debug_boot_arg
& DB_PRT_KDEBUG
) {
279 kdebug_serial
= TRUE
;
280 disable_debug_output
= FALSE
;
287 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram
, sizeof (commit_paniclog_to_nvram
)))
288 commit_paniclog_to_nvram
= 1;
291 * Entering the debugger will put the CPUs into a "safe"
294 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg
, sizeof (boot_arg
)))
295 pmsafe_debug
= boot_arg
;
298 hw_lock_init(&debugger_lock
); /* initialize debugger lock */
300 hw_lock_init(&pbtlock
); /* initialize print backtrace lock */
302 if (PE_parse_boot_argn("preempt", &boot_arg
, sizeof (boot_arg
))) {
303 default_preemption_rate
= boot_arg
;
305 if (PE_parse_boot_argn("unsafe", &boot_arg
, sizeof (boot_arg
))) {
306 max_unsafe_quanta
= boot_arg
;
308 if (PE_parse_boot_argn("poll", &boot_arg
, sizeof (boot_arg
))) {
309 max_poll_quanta
= boot_arg
;
311 if (PE_parse_boot_argn("yield", &boot_arg
, sizeof (boot_arg
))) {
312 sched_poll_yield_shift
= boot_arg
;
314 /* The I/O port to issue a read from, in the event of a panic. Useful for
315 * triggering logic analyzers.
317 if (PE_parse_boot_argn("panic_io_port", &boot_arg
, sizeof (boot_arg
))) {
318 /*I/O ports range from 0 through 0xFFFF */
319 panic_io_port
= boot_arg
& 0xffff;
337 machine_info
.memory_size
= (typeof(machine_info
.memory_size
))mem_size
;
341 extern void *gPEEFIRuntimeServices
;
342 extern void *gPEEFISystemTable
;
345 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
346 * code or tables extracted from it, as desired without restriction.
348 * First, the polynomial itself and its table of feedback terms. The
350 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
352 * Note that we take it "backwards" and put the highest-order term in
353 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
354 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
357 * Note that the usual hardware shift register implementation, which
358 * is what we're using (we're merely optimizing it by doing eight-bit
359 * chunks at a time) shifts bits into the lowest-order term. In our
360 * implementation, that means shifting towards the right. Why do we
361 * do it this way? Because the calculated CRC must be transmitted in
362 * order from highest-order term to lowest-order term. UARTs transmit
363 * characters in order from LSB to MSB. By storing the CRC this way
364 * we hand it to the UART in the order low-byte to high-byte; the UART
365 * sends each low-bit to hight-bit; and the result is transmission bit
366 * by bit from highest- to lowest-order term without requiring any bit
367 * shuffling on our part. Reception works similarly
369 * The feedback terms table consists of 256, 32-bit entries. Notes
371 * The table can be generated at runtime if desired; code to do so
372 * is shown later. It might not be obvious, but the feedback
373 * terms simply represent the results of eight shift/xor opera
374 * tions for all combinations of data and CRC register values
376 * The values must be right-shifted by eight bits by the "updcrc
377 * logic; the shift must be unsigned (bring in zeroes). On some
378 * hardware you could probably optimize the shift in assembler by
379 * using byte-swap instructions
380 * polynomial $edb88320
383 * CRC32 code derived from work by Gary S. Brown.
386 static uint32_t crc32_tab
[] = {
387 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
388 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
389 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
390 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
391 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
392 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
393 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
394 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
395 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
396 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
397 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
398 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
399 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
400 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
401 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
402 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
403 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
404 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
405 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
406 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
407 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
408 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
409 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
410 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
411 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
412 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
413 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
414 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
415 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
416 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
417 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
418 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
419 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
420 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
421 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
422 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
423 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
424 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
425 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
426 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
427 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
428 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
429 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
433 crc32(uint32_t crc
, const void *buf
, size_t size
)
441 crc
= crc32_tab
[(crc
^ *p
++) & 0xFF] ^ (crc
>> 8);
447 efi_set_tables_64(EFI_SYSTEM_TABLE_64
* system_table
)
449 EFI_RUNTIME_SERVICES_64
*runtime
;
453 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table
);
455 DPRINTF("Header:\n");
456 DPRINTF(" Signature: 0x%016llx\n", system_table
->Hdr
.Signature
);
457 DPRINTF(" Revision: 0x%08x\n", system_table
->Hdr
.Revision
);
458 DPRINTF(" HeaderSize: 0x%08x\n", system_table
->Hdr
.HeaderSize
);
459 DPRINTF(" CRC32: 0x%08x\n", system_table
->Hdr
.CRC32
);
460 DPRINTF("RuntimeServices: 0x%016llx\n", system_table
->RuntimeServices
);
461 if (system_table
->Hdr
.Signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
462 kprintf("Bad EFI system table signature\n");
465 // Verify signature of the system table
466 hdr_cksum
= system_table
->Hdr
.CRC32
;
467 system_table
->Hdr
.CRC32
= 0;
468 cksum
= crc32(0L, system_table
, system_table
->Hdr
.HeaderSize
);
470 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
471 system_table
->Hdr
.CRC32
= hdr_cksum
;
472 if (cksum
!= hdr_cksum
) {
473 kprintf("Bad EFI system table checksum\n");
477 gPEEFISystemTable
= system_table
;
479 if(system_table
->RuntimeServices
== 0) {
480 kprintf("No runtime table present\n");
483 DPRINTF("RuntimeServices table at 0x%qx\n", system_table
->RuntimeServices
);
484 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
485 runtime
= (EFI_RUNTIME_SERVICES_64
*) (uintptr_t)system_table
->RuntimeServices
;
486 DPRINTF("Checking runtime services table %p\n", runtime
);
487 if (runtime
->Hdr
.Signature
!= EFI_RUNTIME_SERVICES_SIGNATURE
) {
488 kprintf("Bad EFI runtime table signature\n");
492 // Verify signature of runtime services table
493 hdr_cksum
= runtime
->Hdr
.CRC32
;
494 runtime
->Hdr
.CRC32
= 0;
495 cksum
= crc32(0L, runtime
, runtime
->Hdr
.HeaderSize
);
497 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
498 runtime
->Hdr
.CRC32
= hdr_cksum
;
499 if (cksum
!= hdr_cksum
) {
500 kprintf("Bad EFI runtime table checksum\n");
504 gPEEFIRuntimeServices
= runtime
;
510 efi_set_tables_32(EFI_SYSTEM_TABLE_32
* system_table
)
512 EFI_RUNTIME_SERVICES_32
*runtime
;
516 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table
);
518 DPRINTF("Header:\n");
519 DPRINTF(" Signature: 0x%016llx\n", system_table
->Hdr
.Signature
);
520 DPRINTF(" Revision: 0x%08x\n", system_table
->Hdr
.Revision
);
521 DPRINTF(" HeaderSize: 0x%08x\n", system_table
->Hdr
.HeaderSize
);
522 DPRINTF(" CRC32: 0x%08x\n", system_table
->Hdr
.CRC32
);
523 DPRINTF("RuntimeServices: 0x%08x\n", system_table
->RuntimeServices
);
524 if (system_table
->Hdr
.Signature
!= EFI_SYSTEM_TABLE_SIGNATURE
) {
525 kprintf("Bad EFI system table signature\n");
528 // Verify signature of the system table
529 hdr_cksum
= system_table
->Hdr
.CRC32
;
530 system_table
->Hdr
.CRC32
= 0;
531 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table
, system_table
->Hdr
.HeaderSize
);
532 cksum
= crc32(0L, system_table
, system_table
->Hdr
.HeaderSize
);
534 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
535 system_table
->Hdr
.CRC32
= hdr_cksum
;
536 if (cksum
!= hdr_cksum
) {
537 kprintf("Bad EFI system table checksum\n");
541 gPEEFISystemTable
= system_table
;
543 if(system_table
->RuntimeServices
== 0) {
544 kprintf("No runtime table present\n");
547 DPRINTF("RuntimeServices table at 0x%x\n", system_table
->RuntimeServices
);
548 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
549 // For a 64-bit kernel, booter provides a virtual address mod 4G
550 runtime
= (EFI_RUNTIME_SERVICES_32
*)
551 (system_table
->RuntimeServices
| VM_MIN_KERNEL_ADDRESS
);
552 DPRINTF("Runtime table addressed at %p\n", runtime
);
553 if (runtime
->Hdr
.Signature
!= EFI_RUNTIME_SERVICES_SIGNATURE
) {
554 kprintf("Bad EFI runtime table signature\n");
558 // Verify signature of runtime services table
559 hdr_cksum
= runtime
->Hdr
.CRC32
;
560 runtime
->Hdr
.CRC32
= 0;
561 cksum
= crc32(0L, runtime
, runtime
->Hdr
.HeaderSize
);
563 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum
, hdr_cksum
);
564 runtime
->Hdr
.CRC32
= hdr_cksum
;
565 if (cksum
!= hdr_cksum
) {
566 kprintf("Bad EFI runtime table checksum\n");
570 DPRINTF("Runtime functions\n");
571 DPRINTF(" GetTime : 0x%x\n", runtime
->GetTime
);
572 DPRINTF(" SetTime : 0x%x\n", runtime
->SetTime
);
573 DPRINTF(" GetWakeupTime : 0x%x\n", runtime
->GetWakeupTime
);
574 DPRINTF(" SetWakeupTime : 0x%x\n", runtime
->SetWakeupTime
);
575 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime
->SetVirtualAddressMap
);
576 DPRINTF(" ConvertPointer : 0x%x\n", runtime
->ConvertPointer
);
577 DPRINTF(" GetVariable : 0x%x\n", runtime
->GetVariable
);
578 DPRINTF(" GetNextVariableName : 0x%x\n", runtime
->GetNextVariableName
);
579 DPRINTF(" SetVariable : 0x%x\n", runtime
->SetVariable
);
580 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime
->GetNextHighMonotonicCount
);
581 DPRINTF(" ResetSystem : 0x%x\n", runtime
->ResetSystem
);
583 gPEEFIRuntimeServices
= runtime
;
589 /* Map in EFI runtime areas. */
593 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
595 kprintf("Initializing EFI runtime services\n");
599 vm_offset_t vm_size
, vm_addr
;
600 vm_map_offset_t phys_addr
;
601 EfiMemoryRange
*mptr
;
602 unsigned int msize
, mcount
;
605 msize
= args
->MemoryMapDescriptorSize
;
606 mcount
= args
->MemoryMapSize
/ msize
;
608 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
609 args
->kaddr
, args
->ksize
);
610 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
611 args
->efiSystemTable
,
612 (void *) ml_static_ptovirt(args
->efiSystemTable
));
613 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
614 args
->efiRuntimeServicesPageStart
);
615 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
616 args
->efiRuntimeServicesPageCount
);
617 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
618 args
->efiRuntimeServicesVirtualPageStart
);
619 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
620 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
621 if (((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) ) {
622 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
623 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
624 /* For K64 on EFI32, shadow-map into high KVA */
625 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
626 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
627 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
628 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
630 (void *) (uintptr_t) phys_addr
,
631 (void *) (uintptr_t) mptr
->VirtualStart
,
634 pmap_map_bd(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
635 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
|VM_PROT_WRITE
,
636 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
640 if (args
->Version
!= kBootArgsVersion2
)
641 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
643 DPRINTF("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
644 if (args
->efiMode
== kBootArgsEfiMode64
) {
645 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
647 efi_set_tables_32((EFI_SYSTEM_TABLE_32
*) ml_static_ptovirt(args
->efiSystemTable
));
655 /* Remap EFI runtime areas. */
657 hibernate_newruntime_map(void * map
, vm_size_t map_size
, uint32_t system_table_offset
)
659 boot_args
*args
= (boot_args
*)PE_state
.bootArgs
;
661 kprintf("Reinitializing EFI runtime services\n");
665 vm_offset_t vm_size
, vm_addr
;
666 vm_map_offset_t phys_addr
;
667 EfiMemoryRange
*mptr
;
668 unsigned int msize
, mcount
;
671 gPEEFISystemTable
= 0;
672 gPEEFIRuntimeServices
= 0;
674 system_table_offset
+= ptoa_32(args
->efiRuntimeServicesPageStart
);
676 kprintf("Old system table 0x%x, new 0x%x\n",
677 (uint32_t)args
->efiSystemTable
, system_table_offset
);
679 args
->efiSystemTable
= system_table_offset
;
681 kprintf("Old map:\n");
682 msize
= args
->MemoryMapDescriptorSize
;
683 mcount
= args
->MemoryMapSize
/ msize
;
684 mptr
= (EfiMemoryRange
*)ml_static_ptovirt(args
->MemoryMap
);
685 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
686 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
688 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
689 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
691 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
692 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
693 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
695 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
699 pmap_remove(kernel_pmap
, i386_ptob(args
->efiRuntimeServicesPageStart
),
700 i386_ptob(args
->efiRuntimeServicesPageStart
+ args
->efiRuntimeServicesPageCount
));
702 kprintf("New map:\n");
703 msize
= args
->MemoryMapDescriptorSize
;
704 mcount
= (unsigned int )(map_size
/ msize
);
706 for (i
=0; i
< mcount
; i
++, mptr
= (EfiMemoryRange
*)(((vm_offset_t
)mptr
) + msize
)) {
707 if ((mptr
->Attribute
& EFI_MEMORY_RUNTIME
) == EFI_MEMORY_RUNTIME
) {
709 vm_size
= (vm_offset_t
)i386_ptob((uint32_t)mptr
->NumberOfPages
);
710 vm_addr
= (vm_offset_t
) mptr
->VirtualStart
;
711 if (vm_addr
< VM_MIN_KERNEL_ADDRESS
)
712 vm_addr
|= VM_MIN_KERNEL_ADDRESS
;
713 phys_addr
= (vm_map_offset_t
) mptr
->PhysicalStart
;
715 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr
->Type
, phys_addr
, (unsigned long)vm_addr
, mptr
->NumberOfPages
);
717 pmap_map(vm_addr
, phys_addr
, phys_addr
+ round_page(vm_size
),
718 (mptr
->Type
== kEfiRuntimeServicesCode
) ? VM_PROT_READ
| VM_PROT_EXECUTE
: VM_PROT_READ
|VM_PROT_WRITE
,
719 (mptr
->Type
== EfiMemoryMappedIO
) ? VM_WIMG_IO
: VM_WIMG_USE_DEFAULT
);
723 if (args
->Version
!= kBootArgsVersion2
)
724 panic("Incompatible boot args version %d revision %d\n", args
->Version
, args
->Revision
);
726 kprintf("Boot args version %d revision %d mode %d\n", args
->Version
, args
->Revision
, args
->efiMode
);
727 if (args
->efiMode
== kBootArgsEfiMode64
) {
728 efi_set_tables_64((EFI_SYSTEM_TABLE_64
*) ml_static_ptovirt(args
->efiSystemTable
));
730 efi_set_tables_32((EFI_SYSTEM_TABLE_32
*) ml_static_ptovirt(args
->efiSystemTable
));
735 kprintf("Done reinitializing EFI runtime services\n");
741 * Find devices. The system is alive.
746 /* Now with VM up, switch to dynamically allocated cpu data */
749 /* Ensure panic buffer is initialized. */
753 * Display CPU identification
755 cpuid_cpu_display("CPU identification");
756 cpuid_feature_display("CPU features");
757 cpuid_extfeature_display("CPU extended features");
760 * Initialize EFI runtime services.
767 * Set up to use floating point.
772 * Configure clock devices.
778 * Initialize MTRR from boot processor.
783 * Set up PAT for boot processor.
789 * Free lowmem pages and complete other setup
791 pmap_lowmem_finalize();
800 halt_all_cpus(FALSE
);
803 int reset_mem_on_reboot
= 1;
806 * Halt the system or reboot.
809 halt_all_cpus(boolean_t reboot
)
812 printf("MACH Reboot\n");
813 PEHaltRestart( kPERestartCPU
);
815 printf("CPU halted\n");
816 PEHaltRestart( kPEHaltCPU
);
822 /* Issue an I/O port read if one has been requested - this is an event logic
823 * analyzers can use as a trigger point.
827 panic_io_port_read(void) {
829 (void)inb(panic_io_port
);
832 /* For use with the MP rendezvous mechanism
835 uint64_t panic_restart_timeout
= ~(0ULL);
837 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
840 machine_halt_cpu(void) {
843 panic_io_port_read();
845 /* Halt here forever if we're not rebooting */
846 if (!PE_reboot_on_panic() && panic_restart_timeout
== ~(0ULL)) {
847 pmCPUHalt(PM_HALT_DEBUG
);
851 if (PE_reboot_on_panic())
852 deadline
= mach_absolute_time() + PANIC_RESTART_TIMEOUT
;
854 deadline
= mach_absolute_time() + panic_restart_timeout
;
856 while (mach_absolute_time() < deadline
)
859 kprintf("Invoking PE_halt_restart\n");
860 /* Attempt restart via ACPI RESET_REG; at the time of this
861 * writing, this is routine is chained through AppleSMC->
865 (*PE_halt_restart
)(kPERestartCPU
);
866 pmCPUHalt(PM_HALT_DEBUG
);
869 static int pid_from_task(task_t task
)
874 pid
= proc_pid(task
->bsd_info
);
881 __unused
unsigned int reason
,
892 unsigned long pi_size
= 0;
894 int cn
= cpu_number();
895 task_t task
= current_task();
896 int task_pid
= pid_from_task(task
);
897 boolean_t old_doprnt_hide_pointers
= doprnt_hide_pointers
;
899 hw_atomic_add(&debug_mode
, 1);
900 if (!panic_is_inited
) {
905 doprnt_hide_pointers
= FALSE
;
907 printf("Debugger called: <%s>\n", message
);
908 kprintf("Debugger called: <%s>\n", message
);
911 * Skip the graphical panic box if no panic string.
912 * This is the case if we're being called from
913 * host_reboot(,HOST_REBOOT_DEBUGGER)
914 * as a quiet way into the debugger.
918 disable_preemption();
920 /* Issue an I/O port read if one has been requested - this is an event logic
921 * analyzers can use as a trigger point.
923 panic_io_port_read();
925 /* Obtain current frame pointer */
926 __asm__
volatile("movq %%rbp, %0" : "=m" (stackptr
));
928 /* Print backtrace - callee is internally synchronized */
929 if (task_pid
== 1 && (init_task_died
)) {
930 /* Special handling of launchd died panics */
931 print_launchd_info();
933 panic_i386_backtrace(stackptr
, ((panic_double_fault_cpu
== cn
) ? 80: 48), NULL
, FALSE
, NULL
);
936 /* everything should be printed now so copy to NVRAM
939 if( debug_buf_size
> 0) {
940 /* Optionally sync the panic log, if any, to NVRAM
941 * This is the default.
943 if (commit_paniclog_to_nvram
) {
949 /* Now call the compressor */
950 /* XXX Consider using the WKdm compressor in the
951 * future, rather than just packing - would need to
952 * be co-ordinated with crashreporter, which decodes
953 * this post-restart. The compressor should be
954 * capable of in-place compression.
956 bufpos
= packA(debug_buf
,
957 (unsigned int) (debug_buf_ptr
- debug_buf
), debug_buf_size
);
958 /* If compression was successful,
959 * use the compressed length
961 pi_size
= bufpos
? bufpos
: (unsigned) (debug_buf_ptr
- debug_buf
);
963 /* Save panic log to non-volatile store
964 * Panic info handler must truncate data that is
965 * too long for this platform.
966 * This call must save data synchronously,
967 * since we can subsequently halt the system.
971 /* The following sequence is a workaround for:
972 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
973 * any routines that use floating point (MMX in this case) when saving panic
974 * logs to nvram/flash.
979 kprintf("Attempting to commit panic log to NVRAM\n");
980 pi_size
= PESavePanicInfo((unsigned char *)debug_buf
,
984 /* Uncompress in-place, to permit examination of
985 * the panic log by debuggers.
989 unpackA(debug_buf
, bufpos
);
994 if (!panicDebugging
) {
996 /* Clear the MP rendezvous function lock, in the event
997 * that a panic occurred while in that codepath.
999 mp_rendezvous_break_lock();
1001 /* Non-maskably interrupt all other processors
1002 * If a restart timeout is specified, this processor
1003 * will attempt a restart.
1005 kprintf("Invoking machine_halt_cpu on CPU %d\n", cn
);
1006 for (cnum
= 0; cnum
< real_ncpus
; cnum
++) {
1007 if (cnum
!= (unsigned) cn
) {
1008 cpu_NMI_interrupt(cnum
);
1016 doprnt_hide_pointers
= old_doprnt_hide_pointers
;
1018 hw_atomic_sub(&debug_mode
, 1);
1022 machine_boot_info(char *buf
, __unused vm_size_t size
)
1028 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1029 * boot-arg is supplied.
1033 panic_print_macho_symbol_name(kernel_mach_header_t
*mh
, vm_address_t search
, const char *module_name
)
1035 kernel_nlist_t
*sym
= NULL
;
1036 struct load_command
*cmd
;
1037 kernel_segment_command_t
*orig_ts
= NULL
, *orig_le
= NULL
;
1038 struct symtab_command
*orig_st
= NULL
;
1040 char *strings
, *bestsym
= NULL
;
1041 vm_address_t bestaddr
= 0, diff
, curdiff
;
1043 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1045 cmd
= (struct load_command
*) &mh
[1];
1046 for (i
= 0; i
< mh
->ncmds
; i
++) {
1047 if (cmd
->cmd
== LC_SEGMENT_KERNEL
) {
1048 kernel_segment_command_t
*orig_sg
= (kernel_segment_command_t
*) cmd
;
1050 if (strncmp(SEG_TEXT
, orig_sg
->segname
,
1051 sizeof(orig_sg
->segname
)) == 0)
1053 else if (strncmp(SEG_LINKEDIT
, orig_sg
->segname
,
1054 sizeof(orig_sg
->segname
)) == 0)
1056 else if (strncmp("", orig_sg
->segname
,
1057 sizeof(orig_sg
->segname
)) == 0)
1058 orig_ts
= orig_sg
; /* pre-Lion i386 kexts have a single unnamed segment */
1060 else if (cmd
->cmd
== LC_SYMTAB
)
1061 orig_st
= (struct symtab_command
*) cmd
;
1063 cmd
= (struct load_command
*) ((uintptr_t) cmd
+ cmd
->cmdsize
);
1066 if ((orig_ts
== NULL
) || (orig_st
== NULL
) || (orig_le
== NULL
))
1069 if ((search
< orig_ts
->vmaddr
) ||
1070 (search
>= orig_ts
->vmaddr
+ orig_ts
->vmsize
)) {
1071 /* search out of range for this mach header */
1075 sym
= (kernel_nlist_t
*)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->symoff
- orig_le
->fileoff
);
1076 strings
= (char *)(uintptr_t)(orig_le
->vmaddr
+ orig_st
->stroff
- orig_le
->fileoff
);
1079 for (i
= 0; i
< orig_st
->nsyms
; i
++) {
1080 if (sym
[i
].n_type
& N_STAB
) continue;
1082 if (sym
[i
].n_value
<= search
) {
1083 curdiff
= search
- (vm_address_t
)sym
[i
].n_value
;
1084 if (curdiff
< diff
) {
1086 bestaddr
= sym
[i
].n_value
;
1087 bestsym
= strings
+ sym
[i
].n_un
.n_strx
;
1092 if (bestsym
!= NULL
) {
1094 kdb_printf("%s : %s + 0x%lx", module_name
, bestsym
, (unsigned long)diff
);
1096 kdb_printf("%s : %s", module_name
, bestsym
);
1103 extern kmod_info_t
* kmod
; /* the list of modules */
1106 panic_print_kmod_symbol_name(vm_address_t search
)
1110 if (gLoadedKextSummaries
== NULL
)
1112 for (i
= 0; i
< gLoadedKextSummaries
->numSummaries
; ++i
) {
1113 OSKextLoadedKextSummary
*summary
= gLoadedKextSummaries
->summaries
+ i
;
1115 if ((search
>= summary
->address
) &&
1116 (search
< (summary
->address
+ summary
->size
)))
1118 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)(uintptr_t) summary
->address
;
1119 if (panic_print_macho_symbol_name(header
, search
, summary
->name
) == 0) {
1120 kdb_printf("%s + %llu", summary
->name
, (unsigned long)search
- summary
->address
);
1128 panic_print_symbol_name(vm_address_t search
)
1130 /* try searching in the kernel */
1131 if (panic_print_macho_symbol_name(&_mh_execute_header
, search
, "mach_kernel") == 0) {
1132 /* that failed, now try to search for the right kext */
1133 panic_print_kmod_symbol_name(search
);
1137 /* Generate a backtrace, given a frame pointer - this routine
1138 * should walk the stack safely. The trace is appended to the panic log
1139 * and conditionally, to the console. If the trace contains kernel module
1140 * addresses, display the module name, load address and dependencies.
1143 #define DUMPFRAMES 32
1144 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1146 panic_i386_backtrace(void *_frame
, int nframes
, const char *msg
, boolean_t regdump
, x86_saved_state_t
*regs
)
1148 cframe_t
*frame
= (cframe_t
*)_frame
;
1149 vm_offset_t raddrs
[DUMPFRAMES
];
1152 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1153 uint64_t bt_tsc_timeout
;
1154 boolean_t keepsyms
= FALSE
;
1155 int cn
= cpu_number();
1156 boolean_t old_doprnt_hide_pointers
= doprnt_hide_pointers
;
1159 hw_atomic_add(&pbtcnt
, 1);
1160 /* Spin on print backtrace lock, which serializes output
1161 * Continue anyway if a timeout occurs.
1163 hw_lock_to(&pbtlock
, ~0U);
1167 if (__improbable(doprnt_hide_pointers
== TRUE
)) {
1168 /* If we're called directly, the Debugger() function will not be called,
1169 * so we need to reset the value in here. */
1170 doprnt_hide_pointers
= FALSE
;
1175 PE_parse_boot_argn("keepsyms", &keepsyms
, sizeof (keepsyms
));
1178 kdb_printf("%s", msg
);
1181 if ((regdump
== TRUE
) && (regs
!= NULL
)) {
1182 x86_saved_state64_t
*ss64p
= saved_state64(regs
);
1184 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1185 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1186 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1187 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1188 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1189 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
1190 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
1191 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
1192 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
1193 ss64p
->isf
.rflags
, ss64p
->isf
.rip
, ss64p
->isf
.cs
,
1195 PC
= ss64p
->isf
.rip
;
1198 kdb_printf("Backtrace (CPU %d), "
1199 #if PRINT_ARGS_FROM_STACK_FRAME
1200 "Frame : Return Address (4 potential args on stack)\n", cn
);
1202 "Frame : Return Address\n", cn
);
1205 for (frame_index
= 0; frame_index
< nframes
; frame_index
++) {
1206 vm_offset_t curframep
= (vm_offset_t
) frame
;
1211 if (curframep
& 0x3) {
1212 kdb_printf("Unaligned frame\n");
1216 if (!kvtophys(curframep
) ||
1217 !kvtophys(curframep
+ sizeof(cframe_t
) - 1)) {
1218 kdb_printf("No mapping exists for frame pointer\n");
1222 kdb_printf("%p : 0x%lx ", frame
, frame
->caller
);
1223 if (frame_index
< DUMPFRAMES
)
1224 raddrs
[frame_index
] = frame
->caller
;
1226 #if PRINT_ARGS_FROM_STACK_FRAME
1227 if (kvtophys((vm_offset_t
)&(frame
->args
[3])))
1228 kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
1229 frame
->args
[0], frame
->args
[1],
1230 frame
->args
[2], frame
->args
[3]);
1233 /* Display address-symbol translation only if the "keepsyms"
1234 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1235 * This routine is potentially unsafe; also, function
1236 * boundary identification is unreliable after a strip -x.
1239 panic_print_symbol_name((vm_address_t
)frame
->caller
);
1243 frame
= frame
->prev
;
1246 if (frame_index
>= nframes
)
1247 kdb_printf("\tBacktrace continues...\n");
1252 kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame
);
1255 /* Identify kernel modules in the backtrace and display their
1256 * load addresses and dependencies. This routine should walk
1257 * the kmod list safely.
1260 kmod_panic_dump((vm_offset_t
*)&raddrs
[0], frame_index
);
1263 kmod_panic_dump(&PC
, 1);
1265 panic_display_system_configuration();
1267 doprnt_hide_pointers
= old_doprnt_hide_pointers
;
1269 /* Release print backtrace lock, to permit other callers in the
1270 * event of panics on multiple processors.
1272 hw_lock_unlock(&pbtlock
);
1273 hw_atomic_sub(&pbtcnt
, 1);
1274 /* Wait for other processors to complete output
1275 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1277 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1278 while(*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
));
1282 debug_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
)
1285 char *kvaddr
= dest
;
1288 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1289 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1290 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1291 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1292 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1293 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1294 cur_size
= MIN(cur_size
, rem
);
1296 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1297 bcopy_phys(phys_src
, phys_dest
, cur_size
);
1309 print_threads_registers(thread_t thread
)
1311 x86_saved_state_t
*savestate
;
1313 savestate
= get_user_regs(thread
);
1315 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1316 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1317 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1318 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1319 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1320 savestate
->ss_64
.rax
, savestate
->ss_64
.rbx
, savestate
->ss_64
.rcx
, savestate
->ss_64
.rdx
,
1321 savestate
->ss_64
.isf
.rsp
, savestate
->ss_64
.rbp
, savestate
->ss_64
.rsi
, savestate
->ss_64
.rdi
,
1322 savestate
->ss_64
.r8
, savestate
->ss_64
.r9
, savestate
->ss_64
.r10
, savestate
->ss_64
.r11
,
1323 savestate
->ss_64
.r12
, savestate
->ss_64
.r13
, savestate
->ss_64
.r14
, savestate
->ss_64
.r15
,
1324 savestate
->ss_64
.isf
.rflags
, savestate
->ss_64
.isf
.rip
, savestate
->ss_64
.isf
.cs
,
1325 savestate
->ss_64
.isf
.ss
);
1329 print_tasks_user_threads(task_t task
)
1331 thread_t thread
= current_thread();
1332 x86_saved_state_t
*savestate
;
1335 const char *cur_marker
= 0;
1338 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1339 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1341 kdb_printf("Thread %d: %p\n", j
, thread
);
1342 pmap
= get_task_pmap(task
);
1343 savestate
= get_user_regs(thread
);
1344 rbp
= savestate
->ss_64
.rbp
;
1345 print_one_backtrace(pmap
, (vm_offset_t
)rbp
, cur_marker
, TRUE
, TRUE
);
1351 print_thread_num_that_crashed(task_t task
)
1353 thread_t c_thread
= current_thread();
1357 for (j
= 0, thread
= (thread_t
) queue_first(&task
->threads
); j
< task
->thread_count
;
1358 ++j
, thread
= (thread_t
) queue_next(&thread
->task_threads
)) {
1360 if (c_thread
== thread
) {
1361 kdb_printf("\nThread %d crashed\n", j
);
1367 #define PANICLOG_UUID_BUF_SIZE 256
1369 void print_uuid_info(task_t task
)
1371 uint32_t uuid_info_count
= 0;
1372 mach_vm_address_t uuid_info_addr
= 0;
1373 boolean_t have_map
= (task
->map
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
1374 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
1375 int task_pid
= pid_from_task(task
);
1376 char uuidbuf
[PANICLOG_UUID_BUF_SIZE
] = {0};
1377 char *uuidbufptr
= uuidbuf
;
1380 if (have_pmap
&& task
->active
&& task_pid
> 0) {
1381 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1382 struct user64_dyld_all_image_infos task_image_infos
;
1383 if (debug_copyin(task
->map
->pmap
, task
->all_image_info_addr
,
1384 &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1385 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1386 uuid_info_addr
= task_image_infos
.uuidArray
;
1389 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1390 * in the middle of updating this data structure), we zero the
1391 * uuid_info_count so that we won't even try to save load info for this task
1393 if (!uuid_info_addr
) {
1394 uuid_info_count
= 0;
1398 if (task_pid
> 0 && uuid_info_count
> 0) {
1399 uint32_t uuid_info_size
= sizeof(struct user64_dyld_uuid_info
);
1400 uint32_t uuid_array_size
= uuid_info_count
* uuid_info_size
;
1401 uint32_t uuid_copy_size
= 0;
1402 uint32_t uuid_image_count
= 0;
1403 char *current_uuid_buffer
= NULL
;
1404 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1406 kdb_printf("\nuuid info:\n");
1407 while (uuid_array_size
) {
1408 if (uuid_array_size
<= PANICLOG_UUID_BUF_SIZE
) {
1409 uuid_copy_size
= uuid_array_size
;
1410 uuid_image_count
= uuid_array_size
/uuid_info_size
;
1412 uuid_image_count
= PANICLOG_UUID_BUF_SIZE
/uuid_info_size
;
1413 uuid_copy_size
= uuid_image_count
* uuid_info_size
;
1415 if (have_pmap
&& !debug_copyin(task
->map
->pmap
, uuid_info_addr
, uuidbufptr
,
1417 kdb_printf("Error!! Failed to copy UUID info for task %p pid %d\n", task
, task_pid
);
1418 uuid_image_count
= 0;
1422 if (uuid_image_count
> 0) {
1423 current_uuid_buffer
= uuidbufptr
;
1424 for (k
= 0; k
< uuid_image_count
; k
++) {
1425 kdb_printf(" %#llx", *(uint64_t *)current_uuid_buffer
);
1426 current_uuid_buffer
+= sizeof(uint64_t);
1427 uint8_t *uuid
= (uint8_t *)current_uuid_buffer
;
1428 kdb_printf("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1429 uuid
[0], uuid
[1], uuid
[2], uuid
[3], uuid
[4], uuid
[5], uuid
[6], uuid
[7], uuid
[8],
1430 uuid
[9], uuid
[10], uuid
[11], uuid
[12], uuid
[13], uuid
[14], uuid
[15]);
1431 current_uuid_buffer
+= 16;
1433 bzero(&uuidbuf
, sizeof(uuidbuf
));
1435 uuid_info_addr
+= uuid_copy_size
;
1436 uuid_array_size
-= uuid_copy_size
;
1441 void print_launchd_info(void)
1443 task_t task
= current_task();
1444 thread_t thread
= current_thread();
1445 volatile uint32_t *ppbtcnt
= &pbtcnt
;
1446 uint64_t bt_tsc_timeout
;
1447 int cn
= cpu_number();
1450 hw_atomic_add(&pbtcnt
, 1);
1451 /* Spin on print backtrace lock, which serializes output
1452 * Continue anyway if a timeout occurs.
1454 hw_lock_to(&pbtlock
, ~0U);
1458 print_uuid_info(task
);
1459 print_thread_num_that_crashed(task
);
1460 print_threads_registers(thread
);
1461 print_tasks_user_threads(task
);
1462 kdb_printf("Mac OS version: %s\n", (osversion
[0] != 0) ? osversion
: "Not yet set");
1463 kdb_printf("Kernel version: %s\n", version
);
1464 panic_display_kernel_uuid();
1465 panic_display_model_name();
1467 /* Release print backtrace lock, to permit other callers in the
1468 * event of panics on multiple processors.
1470 hw_lock_unlock(&pbtlock
);
1471 hw_atomic_sub(&pbtcnt
, 1);
1472 /* Wait for other processors to complete output
1473 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1475 bt_tsc_timeout
= rdtsc64() + PBT_TIMEOUT_CYCLES
;
1476 while(*ppbtcnt
&& (rdtsc64() < bt_tsc_timeout
));