]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/AT386/model_dep.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / i386 / AT386 / model_dep.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * File: model_dep.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
65 *
66 * Basic initialization for I386 - ISA bus machines.
67 */
68
69
70 #include <mach/i386/vm_param.h>
71
72 #include <string.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_prot.h>
75 #include <mach/machine.h>
76 #include <mach/time_value.h>
77 #include <sys/kdebug.h>
78 #include <kern/spl.h>
79 #include <kern/assert.h>
80 #include <kern/debug.h>
81 #include <kern/misc_protos.h>
82 #include <kern/startup.h>
83 #include <kern/clock.h>
84 #include <kern/cpu_data.h>
85 #include <kern/machine.h>
86 #include <i386/postcode.h>
87 #include <i386/mp_desc.h>
88 #include <i386/misc_protos.h>
89 #include <i386/thread.h>
90 #include <i386/trap.h>
91 #include <i386/machine_routines.h>
92 #include <i386/mp.h> /* mp_rendezvous_break_lock */
93 #include <i386/cpuid.h>
94 #include <i386/fpu.h>
95 #include <i386/machine_cpu.h>
96 #include <i386/pmap.h>
97 #if CONFIG_MTRR
98 #include <i386/mtrr.h>
99 #endif
100 #include <i386/ucode.h>
101 #include <i386/pmCPU.h>
102 #include <i386/panic_hooks.h>
103
104 #include <architecture/i386/pio.h> /* inb() */
105 #include <pexpert/i386/boot.h>
106
107 #include <kdp/kdp_dyld.h>
108 #include <vm/pmap.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
111
112 #include <IOKit/IOPlatformExpert.h>
113 #include <IOKit/IOHibernatePrivate.h>
114
115 #include <pexpert/i386/efi.h>
116
117 #include <kern/thread.h>
118 #include <kern/sched.h>
119 #include <mach-o/loader.h>
120 #include <mach-o/nlist.h>
121
122 #include <libkern/kernel_mach_header.h>
123 #include <libkern/OSKextLibPrivate.h>
124
125 #include <mach/branch_predicates.h>
126
127 #if DEBUG
128 #define DPRINTF(x...) kprintf(x)
129 #else
130 #define DPRINTF(x...)
131 #endif
132
133 static void machine_conf(void);
134 void panic_print_symbol_name(vm_address_t search);
135
136 extern boolean_t init_task_died;
137 extern const char version[];
138 extern char osversion[];
139 extern int max_unsafe_quanta;
140 extern int max_poll_quanta;
141 extern unsigned int panic_is_inited;
142
143 extern int proc_pid(void *p);
144
145 /* Definitions for frame pointers */
146 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
147 #define FP_LR_OFFSET ((uint32_t)4)
148 #define FP_LR_OFFSET64 ((uint32_t)8)
149 #define FP_MAX_NUM_TO_EVALUATE (50)
150
151 int db_run_mode;
152
153 volatile int pbtcpu = -1;
154 hw_lock_data_t pbtlock; /* backtrace print lock */
155 uint32_t pbtcnt = 0;
156
157 volatile int panic_double_fault_cpu = -1;
158
159 #define PRINT_ARGS_FROM_STACK_FRAME 0
160
161 typedef struct _cframe_t {
162 struct _cframe_t *prev;
163 uintptr_t caller;
164 #if PRINT_ARGS_FROM_STACK_FRAME
165 unsigned args[0];
166 #endif
167 } cframe_t;
168
169 static unsigned panic_io_port;
170 static unsigned commit_paniclog_to_nvram;
171
172 unsigned int debug_boot_arg;
173
174 /*
175 * Backtrace a single frame.
176 */
177 void
178 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
179 boolean_t is_64_bit, boolean_t nvram_format)
180 {
181 int i = 0;
182 addr64_t lr;
183 addr64_t fp;
184 addr64_t fp_for_ppn;
185 ppnum_t ppn;
186 boolean_t dump_kernel_stack;
187
188 fp = topfp;
189 fp_for_ppn = 0;
190 ppn = (ppnum_t)NULL;
191
192 if (fp >= VM_MIN_KERNEL_ADDRESS)
193 dump_kernel_stack = TRUE;
194 else
195 dump_kernel_stack = FALSE;
196
197 do {
198 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
199 break;
200 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
201 break;
202 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
203 break;
204
205 /* Check to see if current address will result in a different
206 ppn than previously computed (to avoid recomputation) via
207 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
208
209 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
210 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
211 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
212 }
213 if (ppn != (ppnum_t)NULL) {
214 if (is_64_bit) {
215 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
216 } else {
217 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
218 }
219 } else {
220 if (is_64_bit) {
221 kdb_printf("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
222 } else {
223 kdb_printf("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
224 }
225 break;
226 }
227 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
228 ppn = pmap_find_phys(pmap, fp);
229 fp_for_ppn = fp;
230 }
231 if (ppn != (ppnum_t)NULL) {
232 if (is_64_bit) {
233 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
234 } else {
235 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
236 }
237 } else {
238 if (is_64_bit) {
239 kdb_printf("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
240 } else {
241 kdb_printf("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
242 }
243 break;
244 }
245
246 if (nvram_format) {
247 if (is_64_bit) {
248 kdb_printf("%s\t0x%016llx\n", cur_marker, lr);
249 } else {
250 kdb_printf("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
251 }
252 } else {
253 if (is_64_bit) {
254 kdb_printf("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
255 } else {
256 kdb_printf("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
257 }
258 }
259 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
260 }
261 void
262 machine_startup(void)
263 {
264 int boot_arg;
265
266 #if 0
267 if( PE_get_hotkey( kPEControlKey ))
268 halt_in_debugger = halt_in_debugger ? 0 : 1;
269 #endif
270
271 if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) {
272 panicDebugging = TRUE;
273 if (debug_boot_arg & DB_HALT) halt_in_debugger=1;
274 if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE;
275 if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE;
276 if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
277 #if KDEBUG_MOJO_TRACE
278 if (debug_boot_arg & DB_PRT_KDEBUG) {
279 kdebug_serial = TRUE;
280 disable_debug_output = FALSE;
281 }
282 #endif
283 } else {
284 debug_boot_arg = 0;
285 }
286
287 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
288 commit_paniclog_to_nvram = 1;
289
290 /*
291 * Entering the debugger will put the CPUs into a "safe"
292 * power mode.
293 */
294 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg)))
295 pmsafe_debug = boot_arg;
296
297 #if NOTYET
298 hw_lock_init(&debugger_lock); /* initialize debugger lock */
299 #endif
300 hw_lock_init(&pbtlock); /* initialize print backtrace lock */
301
302 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
303 default_preemption_rate = boot_arg;
304 }
305 if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
306 max_unsafe_quanta = boot_arg;
307 }
308 if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
309 max_poll_quanta = boot_arg;
310 }
311 if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
312 sched_poll_yield_shift = boot_arg;
313 }
314 /* The I/O port to issue a read from, in the event of a panic. Useful for
315 * triggering logic analyzers.
316 */
317 if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) {
318 /*I/O ports range from 0 through 0xFFFF */
319 panic_io_port = boot_arg & 0xffff;
320 }
321
322 machine_conf();
323
324 panic_hooks_init();
325
326 /*
327 * Start the system.
328 */
329 kernel_bootstrap();
330 /*NOTREACHED*/
331 }
332
333
334 static void
335 machine_conf(void)
336 {
337 machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
338 }
339
340
341 extern void *gPEEFIRuntimeServices;
342 extern void *gPEEFISystemTable;
343
344 /*-
345 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
346 * code or tables extracted from it, as desired without restriction.
347 *
348 * First, the polynomial itself and its table of feedback terms. The
349 * polynomial is
350 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
351 *
352 * Note that we take it "backwards" and put the highest-order term in
353 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
354 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
355 * the MSB being 1
356 *
357 * Note that the usual hardware shift register implementation, which
358 * is what we're using (we're merely optimizing it by doing eight-bit
359 * chunks at a time) shifts bits into the lowest-order term. In our
360 * implementation, that means shifting towards the right. Why do we
361 * do it this way? Because the calculated CRC must be transmitted in
362 * order from highest-order term to lowest-order term. UARTs transmit
363 * characters in order from LSB to MSB. By storing the CRC this way
364 * we hand it to the UART in the order low-byte to high-byte; the UART
365 * sends each low-bit to hight-bit; and the result is transmission bit
366 * by bit from highest- to lowest-order term without requiring any bit
367 * shuffling on our part. Reception works similarly
368 *
369 * The feedback terms table consists of 256, 32-bit entries. Notes
370 *
371 * The table can be generated at runtime if desired; code to do so
372 * is shown later. It might not be obvious, but the feedback
373 * terms simply represent the results of eight shift/xor opera
374 * tions for all combinations of data and CRC register values
375 *
376 * The values must be right-shifted by eight bits by the "updcrc
377 * logic; the shift must be unsigned (bring in zeroes). On some
378 * hardware you could probably optimize the shift in assembler by
379 * using byte-swap instructions
380 * polynomial $edb88320
381 *
382 *
383 * CRC32 code derived from work by Gary S. Brown.
384 */
385
386 static uint32_t crc32_tab[] = {
387 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
388 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
389 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
390 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
391 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
392 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
393 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
394 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
395 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
396 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
397 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
398 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
399 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
400 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
401 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
402 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
403 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
404 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
405 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
406 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
407 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
408 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
409 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
410 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
411 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
412 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
413 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
414 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
415 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
416 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
417 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
418 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
419 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
420 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
421 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
422 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
423 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
424 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
425 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
426 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
427 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
428 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
429 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
430 };
431
432 static uint32_t
433 crc32(uint32_t crc, const void *buf, size_t size)
434 {
435 const uint8_t *p;
436
437 p = buf;
438 crc = crc ^ ~0U;
439
440 while (size--)
441 crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
442
443 return crc ^ ~0U;
444 }
445
446 static void
447 efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table)
448 {
449 EFI_RUNTIME_SERVICES_64 *runtime;
450 uint32_t hdr_cksum;
451 uint32_t cksum;
452
453 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
454 do {
455 DPRINTF("Header:\n");
456 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
457 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
458 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
459 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
460 DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
461 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
462 kprintf("Bad EFI system table signature\n");
463 break;
464 }
465 // Verify signature of the system table
466 hdr_cksum = system_table->Hdr.CRC32;
467 system_table->Hdr.CRC32 = 0;
468 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
469
470 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
471 system_table->Hdr.CRC32 = hdr_cksum;
472 if (cksum != hdr_cksum) {
473 kprintf("Bad EFI system table checksum\n");
474 break;
475 }
476
477 gPEEFISystemTable = system_table;
478
479 if(system_table->RuntimeServices == 0) {
480 kprintf("No runtime table present\n");
481 break;
482 }
483 DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
484 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
485 runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
486 DPRINTF("Checking runtime services table %p\n", runtime);
487 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
488 kprintf("Bad EFI runtime table signature\n");
489 break;
490 }
491
492 // Verify signature of runtime services table
493 hdr_cksum = runtime->Hdr.CRC32;
494 runtime->Hdr.CRC32 = 0;
495 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
496
497 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
498 runtime->Hdr.CRC32 = hdr_cksum;
499 if (cksum != hdr_cksum) {
500 kprintf("Bad EFI runtime table checksum\n");
501 break;
502 }
503
504 gPEEFIRuntimeServices = runtime;
505 }
506 while (FALSE);
507 }
508
509 static void
510 efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
511 {
512 EFI_RUNTIME_SERVICES_32 *runtime;
513 uint32_t hdr_cksum;
514 uint32_t cksum;
515
516 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
517 do {
518 DPRINTF("Header:\n");
519 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
520 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
521 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
522 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
523 DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
524 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
525 kprintf("Bad EFI system table signature\n");
526 break;
527 }
528 // Verify signature of the system table
529 hdr_cksum = system_table->Hdr.CRC32;
530 system_table->Hdr.CRC32 = 0;
531 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
532 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
533
534 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
535 system_table->Hdr.CRC32 = hdr_cksum;
536 if (cksum != hdr_cksum) {
537 kprintf("Bad EFI system table checksum\n");
538 break;
539 }
540
541 gPEEFISystemTable = system_table;
542
543 if(system_table->RuntimeServices == 0) {
544 kprintf("No runtime table present\n");
545 break;
546 }
547 DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
548 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
549 // For a 64-bit kernel, booter provides a virtual address mod 4G
550 runtime = (EFI_RUNTIME_SERVICES_32 *)
551 (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
552 DPRINTF("Runtime table addressed at %p\n", runtime);
553 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
554 kprintf("Bad EFI runtime table signature\n");
555 break;
556 }
557
558 // Verify signature of runtime services table
559 hdr_cksum = runtime->Hdr.CRC32;
560 runtime->Hdr.CRC32 = 0;
561 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
562
563 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
564 runtime->Hdr.CRC32 = hdr_cksum;
565 if (cksum != hdr_cksum) {
566 kprintf("Bad EFI runtime table checksum\n");
567 break;
568 }
569
570 DPRINTF("Runtime functions\n");
571 DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
572 DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
573 DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
574 DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
575 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
576 DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
577 DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
578 DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
579 DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
580 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
581 DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
582
583 gPEEFIRuntimeServices = runtime;
584 }
585 while (FALSE);
586 }
587
588
589 /* Map in EFI runtime areas. */
590 static void
591 efi_init(void)
592 {
593 boot_args *args = (boot_args *)PE_state.bootArgs;
594
595 kprintf("Initializing EFI runtime services\n");
596
597 do
598 {
599 vm_offset_t vm_size, vm_addr;
600 vm_map_offset_t phys_addr;
601 EfiMemoryRange *mptr;
602 unsigned int msize, mcount;
603 unsigned int i;
604
605 msize = args->MemoryMapDescriptorSize;
606 mcount = args->MemoryMapSize / msize;
607
608 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
609 args->kaddr, args->ksize);
610 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
611 args->efiSystemTable,
612 (void *) ml_static_ptovirt(args->efiSystemTable));
613 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
614 args->efiRuntimeServicesPageStart);
615 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
616 args->efiRuntimeServicesPageCount);
617 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
618 args->efiRuntimeServicesVirtualPageStart);
619 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
620 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
621 if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
622 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
623 vm_addr = (vm_offset_t) mptr->VirtualStart;
624 /* For K64 on EFI32, shadow-map into high KVA */
625 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
626 vm_addr |= VM_MIN_KERNEL_ADDRESS;
627 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
628 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
629 mptr->Type,
630 (void *) (uintptr_t) phys_addr,
631 (void *) (uintptr_t) mptr->VirtualStart,
632 (void *) vm_addr,
633 (void *) vm_size);
634 pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size),
635 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
636 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
637 }
638 }
639
640 if (args->Version != kBootArgsVersion2)
641 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
642
643 DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
644 if (args->efiMode == kBootArgsEfiMode64) {
645 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
646 } else {
647 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
648 }
649 }
650 while (FALSE);
651
652 return;
653 }
654
655 /* Remap EFI runtime areas. */
656 void
657 hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset)
658 {
659 boot_args *args = (boot_args *)PE_state.bootArgs;
660
661 kprintf("Reinitializing EFI runtime services\n");
662
663 do
664 {
665 vm_offset_t vm_size, vm_addr;
666 vm_map_offset_t phys_addr;
667 EfiMemoryRange *mptr;
668 unsigned int msize, mcount;
669 unsigned int i;
670
671 gPEEFISystemTable = 0;
672 gPEEFIRuntimeServices = 0;
673
674 system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
675
676 kprintf("Old system table 0x%x, new 0x%x\n",
677 (uint32_t)args->efiSystemTable, system_table_offset);
678
679 args->efiSystemTable = system_table_offset;
680
681 kprintf("Old map:\n");
682 msize = args->MemoryMapDescriptorSize;
683 mcount = args->MemoryMapSize / msize;
684 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
685 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
686 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
687
688 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
689 vm_addr = (vm_offset_t) mptr->VirtualStart;
690 /* K64 on EFI32 */
691 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
692 vm_addr |= VM_MIN_KERNEL_ADDRESS;
693 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
694
695 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
696 }
697 }
698
699 pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart),
700 i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount));
701
702 kprintf("New map:\n");
703 msize = args->MemoryMapDescriptorSize;
704 mcount = (unsigned int )(map_size / msize);
705 mptr = map;
706 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
707 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
708
709 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
710 vm_addr = (vm_offset_t) mptr->VirtualStart;
711 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
712 vm_addr |= VM_MIN_KERNEL_ADDRESS;
713 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
714
715 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
716
717 pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
718 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
719 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
720 }
721 }
722
723 if (args->Version != kBootArgsVersion2)
724 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
725
726 kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
727 if (args->efiMode == kBootArgsEfiMode64) {
728 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
729 } else {
730 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
731 }
732 }
733 while (FALSE);
734
735 kprintf("Done reinitializing EFI runtime services\n");
736
737 return;
738 }
739
740 /*
741 * Find devices. The system is alive.
742 */
743 void
744 machine_init(void)
745 {
746 /* Now with VM up, switch to dynamically allocated cpu data */
747 cpu_data_realloc();
748
749 /* Ensure panic buffer is initialized. */
750 debug_log_init();
751
752 /*
753 * Display CPU identification
754 */
755 cpuid_cpu_display("CPU identification");
756 cpuid_feature_display("CPU features");
757 cpuid_extfeature_display("CPU extended features");
758
759 /*
760 * Initialize EFI runtime services.
761 */
762 efi_init();
763
764 smp_init();
765
766 /*
767 * Set up to use floating point.
768 */
769 init_fpu();
770
771 /*
772 * Configure clock devices.
773 */
774 clock_config();
775
776 #if CONFIG_MTRR
777 /*
778 * Initialize MTRR from boot processor.
779 */
780 mtrr_init();
781
782 /*
783 * Set up PAT for boot processor.
784 */
785 pat_init();
786 #endif
787
788 /*
789 * Free lowmem pages and complete other setup
790 */
791 pmap_lowmem_finalize();
792 }
793
794 /*
795 * Halt a cpu.
796 */
797 void
798 halt_cpu(void)
799 {
800 halt_all_cpus(FALSE);
801 }
802
803 int reset_mem_on_reboot = 1;
804
805 /*
806 * Halt the system or reboot.
807 */
808 void
809 halt_all_cpus(boolean_t reboot)
810 {
811 if (reboot) {
812 printf("MACH Reboot\n");
813 PEHaltRestart( kPERestartCPU );
814 } else {
815 printf("CPU halted\n");
816 PEHaltRestart( kPEHaltCPU );
817 }
818 while(1);
819 }
820
821
822 /* Issue an I/O port read if one has been requested - this is an event logic
823 * analyzers can use as a trigger point.
824 */
825
826 void
827 panic_io_port_read(void) {
828 if (panic_io_port)
829 (void)inb(panic_io_port);
830 }
831
832 /* For use with the MP rendezvous mechanism
833 */
834
835 uint64_t panic_restart_timeout = ~(0ULL);
836
837 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
838
839 static void
840 machine_halt_cpu(void) {
841 uint64_t deadline;
842
843 panic_io_port_read();
844
845 /* Halt here forever if we're not rebooting */
846 if (!PE_reboot_on_panic() && panic_restart_timeout == ~(0ULL)) {
847 pmCPUHalt(PM_HALT_DEBUG);
848 return;
849 }
850
851 if (PE_reboot_on_panic())
852 deadline = mach_absolute_time() + PANIC_RESTART_TIMEOUT;
853 else
854 deadline = mach_absolute_time() + panic_restart_timeout;
855
856 while (mach_absolute_time() < deadline)
857 cpu_pause();
858
859 kprintf("Invoking PE_halt_restart\n");
860 /* Attempt restart via ACPI RESET_REG; at the time of this
861 * writing, this is routine is chained through AppleSMC->
862 * AppleACPIPlatform
863 */
864 if (PE_halt_restart)
865 (*PE_halt_restart)(kPERestartCPU);
866 pmCPUHalt(PM_HALT_DEBUG);
867 }
868
869 static int pid_from_task(task_t task)
870 {
871 int pid = -1;
872
873 if (task->bsd_info)
874 pid = proc_pid(task->bsd_info);
875
876 return pid;
877 }
878
879 void
880 DebuggerWithContext(
881 __unused unsigned int reason,
882 __unused void *ctx,
883 const char *message)
884 {
885 Debugger(message);
886 }
887
888 void
889 Debugger(
890 const char *message)
891 {
892 unsigned long pi_size = 0;
893 void *stackptr;
894 int cn = cpu_number();
895 task_t task = current_task();
896 int task_pid = pid_from_task(task);
897 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
898
899 hw_atomic_add(&debug_mode, 1);
900 if (!panic_is_inited) {
901 postcode(PANIC_HLT);
902 asm("hlt");
903 }
904
905 doprnt_hide_pointers = FALSE;
906
907 printf("Debugger called: <%s>\n", message);
908 kprintf("Debugger called: <%s>\n", message);
909
910 /*
911 * Skip the graphical panic box if no panic string.
912 * This is the case if we're being called from
913 * host_reboot(,HOST_REBOOT_DEBUGGER)
914 * as a quiet way into the debugger.
915 */
916
917 if (panicstr) {
918 disable_preemption();
919
920 /* Issue an I/O port read if one has been requested - this is an event logic
921 * analyzers can use as a trigger point.
922 */
923 panic_io_port_read();
924
925 /* Obtain current frame pointer */
926 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
927
928 /* Print backtrace - callee is internally synchronized */
929 if (task_pid == 1 && (init_task_died)) {
930 /* Special handling of launchd died panics */
931 print_launchd_info();
932 } else {
933 panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
934 }
935
936 /* everything should be printed now so copy to NVRAM
937 */
938
939 if( debug_buf_size > 0) {
940 /* Optionally sync the panic log, if any, to NVRAM
941 * This is the default.
942 */
943 if (commit_paniclog_to_nvram) {
944 unsigned int bufpos;
945 uintptr_t cr0;
946
947 debug_putc(0);
948
949 /* Now call the compressor */
950 /* XXX Consider using the WKdm compressor in the
951 * future, rather than just packing - would need to
952 * be co-ordinated with crashreporter, which decodes
953 * this post-restart. The compressor should be
954 * capable of in-place compression.
955 */
956 bufpos = packA(debug_buf,
957 (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size);
958 /* If compression was successful,
959 * use the compressed length
960 */
961 pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
962
963 /* Save panic log to non-volatile store
964 * Panic info handler must truncate data that is
965 * too long for this platform.
966 * This call must save data synchronously,
967 * since we can subsequently halt the system.
968 */
969
970
971 /* The following sequence is a workaround for:
972 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
973 * any routines that use floating point (MMX in this case) when saving panic
974 * logs to nvram/flash.
975 */
976 cr0 = get_cr0();
977 clear_ts();
978
979 kprintf("Attempting to commit panic log to NVRAM\n");
980 pi_size = PESavePanicInfo((unsigned char *)debug_buf,
981 (uint32_t)pi_size );
982 set_cr0(cr0);
983
984 /* Uncompress in-place, to permit examination of
985 * the panic log by debuggers.
986 */
987
988 if (bufpos) {
989 unpackA(debug_buf, bufpos);
990 }
991 }
992 }
993
994 if (!panicDebugging) {
995 unsigned cnum;
996 /* Clear the MP rendezvous function lock, in the event
997 * that a panic occurred while in that codepath.
998 */
999 mp_rendezvous_break_lock();
1000
1001 /* Non-maskably interrupt all other processors
1002 * If a restart timeout is specified, this processor
1003 * will attempt a restart.
1004 */
1005 kprintf("Invoking machine_halt_cpu on CPU %d\n", cn);
1006 for (cnum = 0; cnum < real_ncpus; cnum++) {
1007 if (cnum != (unsigned) cn) {
1008 cpu_NMI_interrupt(cnum);
1009 }
1010 }
1011 machine_halt_cpu();
1012 /* NOT REACHED */
1013 }
1014 }
1015
1016 doprnt_hide_pointers = old_doprnt_hide_pointers;
1017 __asm__("int3");
1018 hw_atomic_sub(&debug_mode, 1);
1019 }
1020
1021 char *
1022 machine_boot_info(char *buf, __unused vm_size_t size)
1023 {
1024 *buf ='\0';
1025 return buf;
1026 }
1027
1028 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1029 * boot-arg is supplied.
1030 */
1031
1032 static int
1033 panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
1034 {
1035 kernel_nlist_t *sym = NULL;
1036 struct load_command *cmd;
1037 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
1038 struct symtab_command *orig_st = NULL;
1039 unsigned int i;
1040 char *strings, *bestsym = NULL;
1041 vm_address_t bestaddr = 0, diff, curdiff;
1042
1043 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1044
1045 cmd = (struct load_command *) &mh[1];
1046 for (i = 0; i < mh->ncmds; i++) {
1047 if (cmd->cmd == LC_SEGMENT_KERNEL) {
1048 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
1049
1050 if (strncmp(SEG_TEXT, orig_sg->segname,
1051 sizeof(orig_sg->segname)) == 0)
1052 orig_ts = orig_sg;
1053 else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
1054 sizeof(orig_sg->segname)) == 0)
1055 orig_le = orig_sg;
1056 else if (strncmp("", orig_sg->segname,
1057 sizeof(orig_sg->segname)) == 0)
1058 orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
1059 }
1060 else if (cmd->cmd == LC_SYMTAB)
1061 orig_st = (struct symtab_command *) cmd;
1062
1063 cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
1064 }
1065
1066 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
1067 return 0;
1068
1069 if ((search < orig_ts->vmaddr) ||
1070 (search >= orig_ts->vmaddr + orig_ts->vmsize)) {
1071 /* search out of range for this mach header */
1072 return 0;
1073 }
1074
1075 sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
1076 strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
1077 diff = search;
1078
1079 for (i = 0; i < orig_st->nsyms; i++) {
1080 if (sym[i].n_type & N_STAB) continue;
1081
1082 if (sym[i].n_value <= search) {
1083 curdiff = search - (vm_address_t)sym[i].n_value;
1084 if (curdiff < diff) {
1085 diff = curdiff;
1086 bestaddr = sym[i].n_value;
1087 bestsym = strings + sym[i].n_un.n_strx;
1088 }
1089 }
1090 }
1091
1092 if (bestsym != NULL) {
1093 if (diff != 0) {
1094 kdb_printf("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
1095 } else {
1096 kdb_printf("%s : %s", module_name, bestsym);
1097 }
1098 return 1;
1099 }
1100 return 0;
1101 }
1102
1103 extern kmod_info_t * kmod; /* the list of modules */
1104
1105 static void
1106 panic_print_kmod_symbol_name(vm_address_t search)
1107 {
1108 u_int i;
1109
1110 if (gLoadedKextSummaries == NULL)
1111 return;
1112 for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
1113 OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
1114
1115 if ((search >= summary->address) &&
1116 (search < (summary->address + summary->size)))
1117 {
1118 kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
1119 if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
1120 kdb_printf("%s + %llu", summary->name, (unsigned long)search - summary->address);
1121 }
1122 break;
1123 }
1124 }
1125 }
1126
1127 void
1128 panic_print_symbol_name(vm_address_t search)
1129 {
1130 /* try searching in the kernel */
1131 if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
1132 /* that failed, now try to search for the right kext */
1133 panic_print_kmod_symbol_name(search);
1134 }
1135 }
1136
1137 /* Generate a backtrace, given a frame pointer - this routine
1138 * should walk the stack safely. The trace is appended to the panic log
1139 * and conditionally, to the console. If the trace contains kernel module
1140 * addresses, display the module name, load address and dependencies.
1141 */
1142
1143 #define DUMPFRAMES 32
1144 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1145 void
1146 panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
1147 {
1148 cframe_t *frame = (cframe_t *)_frame;
1149 vm_offset_t raddrs[DUMPFRAMES];
1150 vm_offset_t PC = 0;
1151 int frame_index;
1152 volatile uint32_t *ppbtcnt = &pbtcnt;
1153 uint64_t bt_tsc_timeout;
1154 boolean_t keepsyms = FALSE;
1155 int cn = cpu_number();
1156 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
1157
1158 if(pbtcpu != cn) {
1159 hw_atomic_add(&pbtcnt, 1);
1160 /* Spin on print backtrace lock, which serializes output
1161 * Continue anyway if a timeout occurs.
1162 */
1163 hw_lock_to(&pbtlock, ~0U);
1164 pbtcpu = cn;
1165 }
1166
1167 if (__improbable(doprnt_hide_pointers == TRUE)) {
1168 /* If we're called directly, the Debugger() function will not be called,
1169 * so we need to reset the value in here. */
1170 doprnt_hide_pointers = FALSE;
1171 }
1172
1173 panic_check_hook();
1174
1175 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
1176
1177 if (msg != NULL) {
1178 kdb_printf("%s", msg);
1179 }
1180
1181 if ((regdump == TRUE) && (regs != NULL)) {
1182 x86_saved_state64_t *ss64p = saved_state64(regs);
1183 kdb_printf(
1184 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1185 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1186 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1187 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1188 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1189 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
1190 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
1191 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
1192 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
1193 ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
1194 ss64p->isf.ss);
1195 PC = ss64p->isf.rip;
1196 }
1197
1198 kdb_printf("Backtrace (CPU %d), "
1199 #if PRINT_ARGS_FROM_STACK_FRAME
1200 "Frame : Return Address (4 potential args on stack)\n", cn);
1201 #else
1202 "Frame : Return Address\n", cn);
1203 #endif
1204
1205 for (frame_index = 0; frame_index < nframes; frame_index++) {
1206 vm_offset_t curframep = (vm_offset_t) frame;
1207
1208 if (!curframep)
1209 break;
1210
1211 if (curframep & 0x3) {
1212 kdb_printf("Unaligned frame\n");
1213 goto invalid;
1214 }
1215
1216 if (!kvtophys(curframep) ||
1217 !kvtophys(curframep + sizeof(cframe_t) - 1)) {
1218 kdb_printf("No mapping exists for frame pointer\n");
1219 goto invalid;
1220 }
1221
1222 kdb_printf("%p : 0x%lx ", frame, frame->caller);
1223 if (frame_index < DUMPFRAMES)
1224 raddrs[frame_index] = frame->caller;
1225
1226 #if PRINT_ARGS_FROM_STACK_FRAME
1227 if (kvtophys((vm_offset_t)&(frame->args[3])))
1228 kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
1229 frame->args[0], frame->args[1],
1230 frame->args[2], frame->args[3]);
1231 #endif
1232
1233 /* Display address-symbol translation only if the "keepsyms"
1234 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1235 * This routine is potentially unsafe; also, function
1236 * boundary identification is unreliable after a strip -x.
1237 */
1238 if (keepsyms)
1239 panic_print_symbol_name((vm_address_t)frame->caller);
1240
1241 kdb_printf("\n");
1242
1243 frame = frame->prev;
1244 }
1245
1246 if (frame_index >= nframes)
1247 kdb_printf("\tBacktrace continues...\n");
1248
1249 goto out;
1250
1251 invalid:
1252 kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame);
1253 out:
1254
1255 /* Identify kernel modules in the backtrace and display their
1256 * load addresses and dependencies. This routine should walk
1257 * the kmod list safely.
1258 */
1259 if (frame_index)
1260 kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
1261
1262 if (PC != 0)
1263 kmod_panic_dump(&PC, 1);
1264
1265 panic_display_system_configuration();
1266
1267 doprnt_hide_pointers = old_doprnt_hide_pointers;
1268
1269 /* Release print backtrace lock, to permit other callers in the
1270 * event of panics on multiple processors.
1271 */
1272 hw_lock_unlock(&pbtlock);
1273 hw_atomic_sub(&pbtcnt, 1);
1274 /* Wait for other processors to complete output
1275 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1276 */
1277 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1278 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1279 }
1280
1281 static boolean_t
1282 debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1283 {
1284 size_t rem = size;
1285 char *kvaddr = dest;
1286
1287 while (rem) {
1288 ppnum_t upn = pmap_find_phys(p, uaddr);
1289 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1290 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1291 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1292 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1293 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1294 cur_size = MIN(cur_size, rem);
1295
1296 if (upn && pmap_valid_page(upn) && phys_dest) {
1297 bcopy_phys(phys_src, phys_dest, cur_size);
1298 }
1299 else
1300 break;
1301 uaddr += cur_size;
1302 kvaddr += cur_size;
1303 rem -= cur_size;
1304 }
1305 return (rem == 0);
1306 }
1307
1308 void
1309 print_threads_registers(thread_t thread)
1310 {
1311 x86_saved_state_t *savestate;
1312
1313 savestate = get_user_regs(thread);
1314 kdb_printf(
1315 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1316 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1317 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1318 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1319 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1320 savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
1321 savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
1322 savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
1323 savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
1324 savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
1325 savestate->ss_64.isf.ss);
1326 }
1327
1328 void
1329 print_tasks_user_threads(task_t task)
1330 {
1331 thread_t thread = current_thread();
1332 x86_saved_state_t *savestate;
1333 pmap_t pmap = 0;
1334 uint64_t rbp;
1335 const char *cur_marker = 0;
1336 int j;
1337
1338 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1339 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1340
1341 kdb_printf("Thread %d: %p\n", j, thread);
1342 pmap = get_task_pmap(task);
1343 savestate = get_user_regs(thread);
1344 rbp = savestate->ss_64.rbp;
1345 print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE, TRUE);
1346 kdb_printf("\n");
1347 }
1348 }
1349
1350 void
1351 print_thread_num_that_crashed(task_t task)
1352 {
1353 thread_t c_thread = current_thread();
1354 thread_t thread;
1355 int j;
1356
1357 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1358 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1359
1360 if (c_thread == thread) {
1361 kdb_printf("\nThread %d crashed\n", j);
1362 break;
1363 }
1364 }
1365 }
1366
1367 #define PANICLOG_UUID_BUF_SIZE 256
1368
1369 void print_uuid_info(task_t task)
1370 {
1371 uint32_t uuid_info_count = 0;
1372 mach_vm_address_t uuid_info_addr = 0;
1373 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1374 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1375 int task_pid = pid_from_task(task);
1376 char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
1377 char *uuidbufptr = uuidbuf;
1378 uint32_t k;
1379
1380 if (have_pmap && task->active && task_pid > 0) {
1381 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1382 struct user64_dyld_all_image_infos task_image_infos;
1383 if (debug_copyin(task->map->pmap, task->all_image_info_addr,
1384 &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1385 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1386 uuid_info_addr = task_image_infos.uuidArray;
1387 }
1388
1389 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1390 * in the middle of updating this data structure), we zero the
1391 * uuid_info_count so that we won't even try to save load info for this task
1392 */
1393 if (!uuid_info_addr) {
1394 uuid_info_count = 0;
1395 }
1396 }
1397
1398 if (task_pid > 0 && uuid_info_count > 0) {
1399 uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1400 uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
1401 uint32_t uuid_copy_size = 0;
1402 uint32_t uuid_image_count = 0;
1403 char *current_uuid_buffer = NULL;
1404 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1405
1406 kdb_printf("\nuuid info:\n");
1407 while (uuid_array_size) {
1408 if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
1409 uuid_copy_size = uuid_array_size;
1410 uuid_image_count = uuid_array_size/uuid_info_size;
1411 } else {
1412 uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size;
1413 uuid_copy_size = uuid_image_count * uuid_info_size;
1414 }
1415 if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
1416 uuid_copy_size)) {
1417 kdb_printf("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
1418 uuid_image_count = 0;
1419 break;
1420 }
1421
1422 if (uuid_image_count > 0) {
1423 current_uuid_buffer = uuidbufptr;
1424 for (k = 0; k < uuid_image_count; k++) {
1425 kdb_printf(" %#llx", *(uint64_t *)current_uuid_buffer);
1426 current_uuid_buffer += sizeof(uint64_t);
1427 uint8_t *uuid = (uint8_t *)current_uuid_buffer;
1428 kdb_printf("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1429 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
1430 uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
1431 current_uuid_buffer += 16;
1432 }
1433 bzero(&uuidbuf, sizeof(uuidbuf));
1434 }
1435 uuid_info_addr += uuid_copy_size;
1436 uuid_array_size -= uuid_copy_size;
1437 }
1438 }
1439 }
1440
1441 void print_launchd_info(void)
1442 {
1443 task_t task = current_task();
1444 thread_t thread = current_thread();
1445 volatile uint32_t *ppbtcnt = &pbtcnt;
1446 uint64_t bt_tsc_timeout;
1447 int cn = cpu_number();
1448
1449 if(pbtcpu != cn) {
1450 hw_atomic_add(&pbtcnt, 1);
1451 /* Spin on print backtrace lock, which serializes output
1452 * Continue anyway if a timeout occurs.
1453 */
1454 hw_lock_to(&pbtlock, ~0U);
1455 pbtcpu = cn;
1456 }
1457
1458 print_uuid_info(task);
1459 print_thread_num_that_crashed(task);
1460 print_threads_registers(thread);
1461 print_tasks_user_threads(task);
1462 kdb_printf("Mac OS version: %s\n", (osversion[0] != 0) ? osversion : "Not yet set");
1463 kdb_printf("Kernel version: %s\n", version);
1464 panic_display_kernel_uuid();
1465 panic_display_model_name();
1466
1467 /* Release print backtrace lock, to permit other callers in the
1468 * event of panics on multiple processors.
1469 */
1470 hw_lock_unlock(&pbtlock);
1471 hw_atomic_sub(&pbtcnt, 1);
1472 /* Wait for other processors to complete output
1473 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1474 */
1475 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1476 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1477
1478 }