]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/AT386/model_dep.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / i386 / AT386 / model_dep.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * File: model_dep.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
65 *
66 * Basic initialization for I386 - ISA bus machines.
67 */
68
69
70 #include <mach/i386/vm_param.h>
71
72 #include <string.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_prot.h>
75 #include <mach/machine.h>
76 #include <mach/time_value.h>
77 #include <sys/kdebug.h>
78 #include <kern/spl.h>
79 #include <kern/assert.h>
80 #include <kern/debug.h>
81 #include <kern/misc_protos.h>
82 #include <kern/startup.h>
83 #include <kern/clock.h>
84 #include <kern/cpu_data.h>
85 #include <kern/machine.h>
86 #include <i386/postcode.h>
87 #include <i386/mp_desc.h>
88 #include <i386/misc_protos.h>
89 #include <i386/thread.h>
90 #include <i386/trap.h>
91 #include <i386/machine_routines.h>
92 #include <i386/mp.h> /* mp_rendezvous_break_lock */
93 #include <i386/cpuid.h>
94 #include <i386/fpu.h>
95 #include <i386/machine_cpu.h>
96 #include <i386/pmap.h>
97 #if CONFIG_MTRR
98 #include <i386/mtrr.h>
99 #endif
100 #include <i386/ucode.h>
101 #include <i386/pmCPU.h>
102 #include <i386/panic_hooks.h>
103
104 #include <architecture/i386/pio.h> /* inb() */
105 #include <pexpert/i386/boot.h>
106
107 #include <kdp/kdp_dyld.h>
108 #include <vm/pmap.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
111
112 #include <IOKit/IOPlatformExpert.h>
113 #include <IOKit/IOHibernatePrivate.h>
114
115 #include <pexpert/i386/efi.h>
116
117 #include <kern/thread.h>
118 #include <kern/sched.h>
119 #include <mach-o/loader.h>
120 #include <mach-o/nlist.h>
121
122 #include <libkern/kernel_mach_header.h>
123 #include <libkern/OSKextLibPrivate.h>
124
125 #include <mach/branch_predicates.h>
126
127 #if DEBUG
128 #define DPRINTF(x...) kprintf(x)
129 #else
130 #define DPRINTF(x...)
131 #endif
132
133 static void machine_conf(void);
134 void panic_print_symbol_name(vm_address_t search);
135
136 extern boolean_t init_task_died;
137 extern const char version[];
138 extern char osversion[];
139 extern int max_unsafe_quanta;
140 extern int max_poll_quanta;
141 extern unsigned int panic_is_inited;
142
143 extern int proc_pid(void *p);
144
145 /* Definitions for frame pointers */
146 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
147 #define FP_LR_OFFSET ((uint32_t)4)
148 #define FP_LR_OFFSET64 ((uint32_t)8)
149 #define FP_MAX_NUM_TO_EVALUATE (50)
150
151 int db_run_mode;
152
153 volatile int pbtcpu = -1;
154 hw_lock_data_t pbtlock; /* backtrace print lock */
155 uint32_t pbtcnt = 0;
156
157 volatile int panic_double_fault_cpu = -1;
158
159 #define PRINT_ARGS_FROM_STACK_FRAME 0
160
161 typedef struct _cframe_t {
162 struct _cframe_t *prev;
163 uintptr_t caller;
164 #if PRINT_ARGS_FROM_STACK_FRAME
165 unsigned args[0];
166 #endif
167 } cframe_t;
168
169 static unsigned panic_io_port;
170 static unsigned commit_paniclog_to_nvram;
171
172 unsigned int debug_boot_arg;
173
174 /*
175 * Backtrace a single frame.
176 */
177 void
178 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
179 boolean_t is_64_bit, boolean_t nvram_format)
180 {
181 int i = 0;
182 addr64_t lr;
183 addr64_t fp;
184 addr64_t fp_for_ppn;
185 ppnum_t ppn;
186 boolean_t dump_kernel_stack;
187
188 fp = topfp;
189 fp_for_ppn = 0;
190 ppn = (ppnum_t)NULL;
191
192 if (fp >= VM_MIN_KERNEL_ADDRESS)
193 dump_kernel_stack = TRUE;
194 else
195 dump_kernel_stack = FALSE;
196
197 do {
198 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
199 break;
200 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
201 break;
202 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
203 break;
204
205 /* Check to see if current address will result in a different
206 ppn than previously computed (to avoid recomputation) via
207 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
208
209 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
210 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
211 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
212 }
213 if (ppn != (ppnum_t)NULL) {
214 if (is_64_bit) {
215 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
216 } else {
217 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
218 }
219 } else {
220 if (is_64_bit) {
221 kdb_printf("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
222 } else {
223 kdb_printf("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
224 }
225 break;
226 }
227 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
228 ppn = pmap_find_phys(pmap, fp);
229 fp_for_ppn = fp;
230 }
231 if (ppn != (ppnum_t)NULL) {
232 if (is_64_bit) {
233 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
234 } else {
235 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
236 }
237 } else {
238 if (is_64_bit) {
239 kdb_printf("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
240 } else {
241 kdb_printf("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
242 }
243 break;
244 }
245
246 if (nvram_format) {
247 if (is_64_bit) {
248 kdb_printf("%s\t0x%016llx\n", cur_marker, lr);
249 } else {
250 kdb_printf("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
251 }
252 } else {
253 if (is_64_bit) {
254 kdb_printf("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
255 } else {
256 kdb_printf("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
257 }
258 }
259 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
260 }
261 void
262 machine_startup(void)
263 {
264 int boot_arg;
265
266 #if 0
267 if( PE_get_hotkey( kPEControlKey ))
268 halt_in_debugger = halt_in_debugger ? 0 : 1;
269 #endif
270
271 if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) {
272 panicDebugging = TRUE;
273 #if DEVELOPMENT || DEBUG
274 if (debug_boot_arg & DB_HALT) halt_in_debugger=1;
275 #endif
276 if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE;
277 if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE;
278 if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
279 #if KDEBUG_MOJO_TRACE
280 if (debug_boot_arg & DB_PRT_KDEBUG) {
281 kdebug_serial = TRUE;
282 disable_debug_output = FALSE;
283 }
284 #endif
285 } else {
286 debug_boot_arg = 0;
287 }
288
289 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
290 commit_paniclog_to_nvram = 1;
291
292 /*
293 * Entering the debugger will put the CPUs into a "safe"
294 * power mode.
295 */
296 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg)))
297 pmsafe_debug = boot_arg;
298
299 #if NOTYET
300 hw_lock_init(&debugger_lock); /* initialize debugger lock */
301 #endif
302 hw_lock_init(&pbtlock); /* initialize print backtrace lock */
303
304 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
305 default_preemption_rate = boot_arg;
306 }
307 if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
308 max_unsafe_quanta = boot_arg;
309 }
310 if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
311 max_poll_quanta = boot_arg;
312 }
313 if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
314 sched_poll_yield_shift = boot_arg;
315 }
316 /* The I/O port to issue a read from, in the event of a panic. Useful for
317 * triggering logic analyzers.
318 */
319 if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) {
320 /*I/O ports range from 0 through 0xFFFF */
321 panic_io_port = boot_arg & 0xffff;
322 }
323
324 machine_conf();
325
326 panic_hooks_init();
327
328 /*
329 * Start the system.
330 */
331 kernel_bootstrap();
332 /*NOTREACHED*/
333 }
334
335
336 static void
337 machine_conf(void)
338 {
339 machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
340 }
341
342
343 extern void *gPEEFIRuntimeServices;
344 extern void *gPEEFISystemTable;
345
346 /*-
347 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
348 * code or tables extracted from it, as desired without restriction.
349 *
350 * First, the polynomial itself and its table of feedback terms. The
351 * polynomial is
352 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
353 *
354 * Note that we take it "backwards" and put the highest-order term in
355 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
356 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
357 * the MSB being 1
358 *
359 * Note that the usual hardware shift register implementation, which
360 * is what we're using (we're merely optimizing it by doing eight-bit
361 * chunks at a time) shifts bits into the lowest-order term. In our
362 * implementation, that means shifting towards the right. Why do we
363 * do it this way? Because the calculated CRC must be transmitted in
364 * order from highest-order term to lowest-order term. UARTs transmit
365 * characters in order from LSB to MSB. By storing the CRC this way
366 * we hand it to the UART in the order low-byte to high-byte; the UART
367 * sends each low-bit to hight-bit; and the result is transmission bit
368 * by bit from highest- to lowest-order term without requiring any bit
369 * shuffling on our part. Reception works similarly
370 *
371 * The feedback terms table consists of 256, 32-bit entries. Notes
372 *
373 * The table can be generated at runtime if desired; code to do so
374 * is shown later. It might not be obvious, but the feedback
375 * terms simply represent the results of eight shift/xor opera
376 * tions for all combinations of data and CRC register values
377 *
378 * The values must be right-shifted by eight bits by the "updcrc
379 * logic; the shift must be unsigned (bring in zeroes). On some
380 * hardware you could probably optimize the shift in assembler by
381 * using byte-swap instructions
382 * polynomial $edb88320
383 *
384 *
385 * CRC32 code derived from work by Gary S. Brown.
386 */
387
388 static uint32_t crc32_tab[] = {
389 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
390 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
391 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
392 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
393 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
394 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
395 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
396 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
397 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
398 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
399 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
400 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
401 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
402 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
403 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
404 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
405 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
406 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
407 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
408 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
409 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
410 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
411 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
412 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
413 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
414 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
415 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
416 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
417 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
418 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
419 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
420 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
421 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
422 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
423 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
424 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
425 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
426 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
427 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
428 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
429 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
430 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
431 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
432 };
433
434 static uint32_t
435 crc32(uint32_t crc, const void *buf, size_t size)
436 {
437 const uint8_t *p;
438
439 p = buf;
440 crc = crc ^ ~0U;
441
442 while (size--)
443 crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
444
445 return crc ^ ~0U;
446 }
447
448 static void
449 efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table)
450 {
451 EFI_RUNTIME_SERVICES_64 *runtime;
452 uint32_t hdr_cksum;
453 uint32_t cksum;
454
455 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
456 do {
457 DPRINTF("Header:\n");
458 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
459 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
460 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
461 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
462 DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
463 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
464 kprintf("Bad EFI system table signature\n");
465 break;
466 }
467 // Verify signature of the system table
468 hdr_cksum = system_table->Hdr.CRC32;
469 system_table->Hdr.CRC32 = 0;
470 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
471
472 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
473 system_table->Hdr.CRC32 = hdr_cksum;
474 if (cksum != hdr_cksum) {
475 kprintf("Bad EFI system table checksum\n");
476 break;
477 }
478
479 gPEEFISystemTable = system_table;
480
481 if(system_table->RuntimeServices == 0) {
482 kprintf("No runtime table present\n");
483 break;
484 }
485 DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
486 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
487 runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
488 DPRINTF("Checking runtime services table %p\n", runtime);
489 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
490 kprintf("Bad EFI runtime table signature\n");
491 break;
492 }
493
494 // Verify signature of runtime services table
495 hdr_cksum = runtime->Hdr.CRC32;
496 runtime->Hdr.CRC32 = 0;
497 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
498
499 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
500 runtime->Hdr.CRC32 = hdr_cksum;
501 if (cksum != hdr_cksum) {
502 kprintf("Bad EFI runtime table checksum\n");
503 break;
504 }
505
506 gPEEFIRuntimeServices = runtime;
507 }
508 while (FALSE);
509 }
510
511 static void
512 efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
513 {
514 EFI_RUNTIME_SERVICES_32 *runtime;
515 uint32_t hdr_cksum;
516 uint32_t cksum;
517
518 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
519 do {
520 DPRINTF("Header:\n");
521 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
522 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
523 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
524 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
525 DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
526 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
527 kprintf("Bad EFI system table signature\n");
528 break;
529 }
530 // Verify signature of the system table
531 hdr_cksum = system_table->Hdr.CRC32;
532 system_table->Hdr.CRC32 = 0;
533 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
534 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
535
536 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
537 system_table->Hdr.CRC32 = hdr_cksum;
538 if (cksum != hdr_cksum) {
539 kprintf("Bad EFI system table checksum\n");
540 break;
541 }
542
543 gPEEFISystemTable = system_table;
544
545 if(system_table->RuntimeServices == 0) {
546 kprintf("No runtime table present\n");
547 break;
548 }
549 DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
550 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
551 // For a 64-bit kernel, booter provides a virtual address mod 4G
552 runtime = (EFI_RUNTIME_SERVICES_32 *)
553 (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
554 DPRINTF("Runtime table addressed at %p\n", runtime);
555 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
556 kprintf("Bad EFI runtime table signature\n");
557 break;
558 }
559
560 // Verify signature of runtime services table
561 hdr_cksum = runtime->Hdr.CRC32;
562 runtime->Hdr.CRC32 = 0;
563 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
564
565 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
566 runtime->Hdr.CRC32 = hdr_cksum;
567 if (cksum != hdr_cksum) {
568 kprintf("Bad EFI runtime table checksum\n");
569 break;
570 }
571
572 DPRINTF("Runtime functions\n");
573 DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
574 DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
575 DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
576 DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
577 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
578 DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
579 DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
580 DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
581 DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
582 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
583 DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
584
585 gPEEFIRuntimeServices = runtime;
586 }
587 while (FALSE);
588 }
589
590
591 /* Map in EFI runtime areas. */
592 static void
593 efi_init(void)
594 {
595 boot_args *args = (boot_args *)PE_state.bootArgs;
596
597 kprintf("Initializing EFI runtime services\n");
598
599 do
600 {
601 vm_offset_t vm_size, vm_addr;
602 vm_map_offset_t phys_addr;
603 EfiMemoryRange *mptr;
604 unsigned int msize, mcount;
605 unsigned int i;
606
607 msize = args->MemoryMapDescriptorSize;
608 mcount = args->MemoryMapSize / msize;
609
610 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
611 args->kaddr, args->ksize);
612 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
613 args->efiSystemTable,
614 (void *) ml_static_ptovirt(args->efiSystemTable));
615 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
616 args->efiRuntimeServicesPageStart);
617 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
618 args->efiRuntimeServicesPageCount);
619 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
620 args->efiRuntimeServicesVirtualPageStart);
621 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
622 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
623 if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
624 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
625 vm_addr = (vm_offset_t) mptr->VirtualStart;
626 /* For K64 on EFI32, shadow-map into high KVA */
627 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
628 vm_addr |= VM_MIN_KERNEL_ADDRESS;
629 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
630 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
631 mptr->Type,
632 (void *) (uintptr_t) phys_addr,
633 (void *) (uintptr_t) mptr->VirtualStart,
634 (void *) vm_addr,
635 (void *) vm_size);
636 pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size),
637 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
638 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
639 }
640 }
641
642 if (args->Version != kBootArgsVersion2)
643 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
644
645 DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
646 if (args->efiMode == kBootArgsEfiMode64) {
647 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
648 } else {
649 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
650 }
651 }
652 while (FALSE);
653
654 return;
655 }
656
657 /* Remap EFI runtime areas. */
658 void
659 hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset)
660 {
661 boot_args *args = (boot_args *)PE_state.bootArgs;
662
663 kprintf("Reinitializing EFI runtime services\n");
664
665 do
666 {
667 vm_offset_t vm_size, vm_addr;
668 vm_map_offset_t phys_addr;
669 EfiMemoryRange *mptr;
670 unsigned int msize, mcount;
671 unsigned int i;
672
673 gPEEFISystemTable = 0;
674 gPEEFIRuntimeServices = 0;
675
676 system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
677
678 kprintf("Old system table 0x%x, new 0x%x\n",
679 (uint32_t)args->efiSystemTable, system_table_offset);
680
681 args->efiSystemTable = system_table_offset;
682
683 kprintf("Old map:\n");
684 msize = args->MemoryMapDescriptorSize;
685 mcount = args->MemoryMapSize / msize;
686 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
687 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
688 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
689
690 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
691 vm_addr = (vm_offset_t) mptr->VirtualStart;
692 /* K64 on EFI32 */
693 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
694 vm_addr |= VM_MIN_KERNEL_ADDRESS;
695 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
696
697 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
698 }
699 }
700
701 pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart),
702 i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount));
703
704 kprintf("New map:\n");
705 msize = args->MemoryMapDescriptorSize;
706 mcount = (unsigned int )(map_size / msize);
707 mptr = map;
708 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
709 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
710
711 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
712 vm_addr = (vm_offset_t) mptr->VirtualStart;
713 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
714 vm_addr |= VM_MIN_KERNEL_ADDRESS;
715 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
716
717 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
718
719 pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
720 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
721 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
722 }
723 }
724
725 if (args->Version != kBootArgsVersion2)
726 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
727
728 kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
729 if (args->efiMode == kBootArgsEfiMode64) {
730 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
731 } else {
732 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
733 }
734 }
735 while (FALSE);
736
737 kprintf("Done reinitializing EFI runtime services\n");
738
739 return;
740 }
741
742 /*
743 * Find devices. The system is alive.
744 */
745 void
746 machine_init(void)
747 {
748 /* Now with VM up, switch to dynamically allocated cpu data */
749 cpu_data_realloc();
750
751 /* Ensure panic buffer is initialized. */
752 debug_log_init();
753
754 /*
755 * Display CPU identification
756 */
757 cpuid_cpu_display("CPU identification");
758 cpuid_feature_display("CPU features");
759 cpuid_extfeature_display("CPU extended features");
760
761 /*
762 * Initialize EFI runtime services.
763 */
764 efi_init();
765
766 smp_init();
767
768 /*
769 * Set up to use floating point.
770 */
771 init_fpu();
772
773 /*
774 * Configure clock devices.
775 */
776 clock_config();
777
778 #if CONFIG_MTRR
779 /*
780 * Initialize MTRR from boot processor.
781 */
782 mtrr_init();
783
784 /*
785 * Set up PAT for boot processor.
786 */
787 pat_init();
788 #endif
789
790 /*
791 * Free lowmem pages and complete other setup
792 */
793 pmap_lowmem_finalize();
794 }
795
796 /*
797 * Halt a cpu.
798 */
799 void
800 halt_cpu(void)
801 {
802 halt_all_cpus(FALSE);
803 }
804
805 int reset_mem_on_reboot = 1;
806
807 /*
808 * Halt the system or reboot.
809 */
810 void
811 halt_all_cpus(boolean_t reboot)
812 {
813 if (reboot) {
814 printf("MACH Reboot\n");
815 PEHaltRestart( kPERestartCPU );
816 } else {
817 printf("CPU halted\n");
818 PEHaltRestart( kPEHaltCPU );
819 }
820 while(1);
821 }
822
823
824 /* Issue an I/O port read if one has been requested - this is an event logic
825 * analyzers can use as a trigger point.
826 */
827
828 void
829 panic_io_port_read(void) {
830 if (panic_io_port)
831 (void)inb(panic_io_port);
832 }
833
834 /* For use with the MP rendezvous mechanism
835 */
836
837 uint64_t panic_restart_timeout = ~(0ULL);
838
839 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
840
841 static void
842 machine_halt_cpu(void) {
843 uint64_t deadline;
844
845 panic_io_port_read();
846
847 /* Halt here forever if we're not rebooting */
848 if (!PE_reboot_on_panic() && panic_restart_timeout == ~(0ULL)) {
849 pmCPUHalt(PM_HALT_DEBUG);
850 return;
851 }
852
853 if (PE_reboot_on_panic())
854 deadline = mach_absolute_time() + PANIC_RESTART_TIMEOUT;
855 else
856 deadline = mach_absolute_time() + panic_restart_timeout;
857
858 while (mach_absolute_time() < deadline)
859 cpu_pause();
860
861 kprintf("Invoking PE_halt_restart\n");
862 /* Attempt restart via ACPI RESET_REG; at the time of this
863 * writing, this is routine is chained through AppleSMC->
864 * AppleACPIPlatform
865 */
866 if (PE_halt_restart)
867 (*PE_halt_restart)(kPERestartCPU);
868 pmCPUHalt(PM_HALT_DEBUG);
869 }
870
871 static int pid_from_task(task_t task)
872 {
873 int pid = -1;
874
875 if (task->bsd_info)
876 pid = proc_pid(task->bsd_info);
877
878 return pid;
879 }
880
881 void
882 DebuggerWithContext(
883 __unused unsigned int reason,
884 __unused void *ctx,
885 const char *message)
886 {
887 Debugger(message);
888 }
889
890 void
891 Debugger(
892 const char *message)
893 {
894 unsigned long pi_size = 0;
895 void *stackptr;
896 int cn = cpu_number();
897 task_t task = current_task();
898 int task_pid = pid_from_task(task);
899 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
900
901 hw_atomic_add(&debug_mode, 1);
902 if (!panic_is_inited) {
903 postcode(PANIC_HLT);
904 asm("hlt");
905 }
906
907 doprnt_hide_pointers = FALSE;
908
909 printf("Debugger called: <%s>\n", message);
910 kprintf("Debugger called: <%s>\n", message);
911
912 /*
913 * Skip the graphical panic box if no panic string.
914 * This is the case if we're being called from
915 * host_reboot(,HOST_REBOOT_DEBUGGER)
916 * as a quiet way into the debugger.
917 */
918
919 if (panicstr) {
920 disable_preemption();
921
922 /* Issue an I/O port read if one has been requested - this is an event logic
923 * analyzers can use as a trigger point.
924 */
925 panic_io_port_read();
926
927 /* Obtain current frame pointer */
928 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
929
930 /* Print backtrace - callee is internally synchronized */
931 if (task_pid == 1 && (init_task_died)) {
932 /* Special handling of launchd died panics */
933 print_launchd_info();
934 } else {
935 panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
936 }
937
938 /* everything should be printed now so copy to NVRAM
939 */
940
941 if( debug_buf_size > 0) {
942 /* Optionally sync the panic log, if any, to NVRAM
943 * This is the default.
944 */
945 if (commit_paniclog_to_nvram) {
946 unsigned int bufpos;
947 uintptr_t cr0;
948
949 debug_putc(0);
950
951 /* Now call the compressor */
952 /* XXX Consider using the WKdm compressor in the
953 * future, rather than just packing - would need to
954 * be co-ordinated with crashreporter, which decodes
955 * this post-restart. The compressor should be
956 * capable of in-place compression.
957 */
958 bufpos = packA(debug_buf,
959 (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size);
960 /* If compression was successful,
961 * use the compressed length
962 */
963 pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
964
965 /* Save panic log to non-volatile store
966 * Panic info handler must truncate data that is
967 * too long for this platform.
968 * This call must save data synchronously,
969 * since we can subsequently halt the system.
970 */
971
972
973 /* The following sequence is a workaround for:
974 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
975 * any routines that use floating point (MMX in this case) when saving panic
976 * logs to nvram/flash.
977 */
978 cr0 = get_cr0();
979 clear_ts();
980
981 kprintf("Attempting to commit panic log to NVRAM\n");
982 pi_size = PESavePanicInfo((unsigned char *)debug_buf,
983 (uint32_t)pi_size );
984 set_cr0(cr0);
985
986 /* Uncompress in-place, to permit examination of
987 * the panic log by debuggers.
988 */
989
990 if (bufpos) {
991 unpackA(debug_buf, bufpos);
992 }
993 }
994 }
995
996 if (!panicDebugging) {
997 unsigned cnum;
998 /* Clear the MP rendezvous function lock, in the event
999 * that a panic occurred while in that codepath.
1000 */
1001 mp_rendezvous_break_lock();
1002
1003 /* Non-maskably interrupt all other processors
1004 * If a restart timeout is specified, this processor
1005 * will attempt a restart.
1006 */
1007 kprintf("Invoking machine_halt_cpu on CPU %d\n", cn);
1008 for (cnum = 0; cnum < real_ncpus; cnum++) {
1009 if (cnum != (unsigned) cn) {
1010 cpu_NMI_interrupt(cnum);
1011 }
1012 }
1013 machine_halt_cpu();
1014 /* NOT REACHED */
1015 }
1016 }
1017
1018 doprnt_hide_pointers = old_doprnt_hide_pointers;
1019 __asm__("int3");
1020 hw_atomic_sub(&debug_mode, 1);
1021 }
1022
1023 char *
1024 machine_boot_info(char *buf, __unused vm_size_t size)
1025 {
1026 *buf ='\0';
1027 return buf;
1028 }
1029
1030 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1031 * boot-arg is supplied.
1032 */
1033
1034 static int
1035 panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
1036 {
1037 kernel_nlist_t *sym = NULL;
1038 struct load_command *cmd;
1039 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
1040 struct symtab_command *orig_st = NULL;
1041 unsigned int i;
1042 char *strings, *bestsym = NULL;
1043 vm_address_t bestaddr = 0, diff, curdiff;
1044
1045 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1046
1047 cmd = (struct load_command *) &mh[1];
1048 for (i = 0; i < mh->ncmds; i++) {
1049 if (cmd->cmd == LC_SEGMENT_KERNEL) {
1050 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
1051
1052 if (strncmp(SEG_TEXT, orig_sg->segname,
1053 sizeof(orig_sg->segname)) == 0)
1054 orig_ts = orig_sg;
1055 else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
1056 sizeof(orig_sg->segname)) == 0)
1057 orig_le = orig_sg;
1058 else if (strncmp("", orig_sg->segname,
1059 sizeof(orig_sg->segname)) == 0)
1060 orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
1061 }
1062 else if (cmd->cmd == LC_SYMTAB)
1063 orig_st = (struct symtab_command *) cmd;
1064
1065 cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
1066 }
1067
1068 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
1069 return 0;
1070
1071 if ((search < orig_ts->vmaddr) ||
1072 (search >= orig_ts->vmaddr + orig_ts->vmsize)) {
1073 /* search out of range for this mach header */
1074 return 0;
1075 }
1076
1077 sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
1078 strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
1079 diff = search;
1080
1081 for (i = 0; i < orig_st->nsyms; i++) {
1082 if (sym[i].n_type & N_STAB) continue;
1083
1084 if (sym[i].n_value <= search) {
1085 curdiff = search - (vm_address_t)sym[i].n_value;
1086 if (curdiff < diff) {
1087 diff = curdiff;
1088 bestaddr = sym[i].n_value;
1089 bestsym = strings + sym[i].n_un.n_strx;
1090 }
1091 }
1092 }
1093
1094 if (bestsym != NULL) {
1095 if (diff != 0) {
1096 kdb_printf("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
1097 } else {
1098 kdb_printf("%s : %s", module_name, bestsym);
1099 }
1100 return 1;
1101 }
1102 return 0;
1103 }
1104
1105 extern kmod_info_t * kmod; /* the list of modules */
1106
1107 static void
1108 panic_print_kmod_symbol_name(vm_address_t search)
1109 {
1110 u_int i;
1111
1112 if (gLoadedKextSummaries == NULL)
1113 return;
1114 for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
1115 OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
1116
1117 if ((search >= summary->address) &&
1118 (search < (summary->address + summary->size)))
1119 {
1120 kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
1121 if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
1122 kdb_printf("%s + %llu", summary->name, (unsigned long)search - summary->address);
1123 }
1124 break;
1125 }
1126 }
1127 }
1128
1129 void
1130 panic_print_symbol_name(vm_address_t search)
1131 {
1132 /* try searching in the kernel */
1133 if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
1134 /* that failed, now try to search for the right kext */
1135 panic_print_kmod_symbol_name(search);
1136 }
1137 }
1138
1139 /* Generate a backtrace, given a frame pointer - this routine
1140 * should walk the stack safely. The trace is appended to the panic log
1141 * and conditionally, to the console. If the trace contains kernel module
1142 * addresses, display the module name, load address and dependencies.
1143 */
1144
1145 #define DUMPFRAMES 32
1146 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1147 void
1148 panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
1149 {
1150 cframe_t *frame = (cframe_t *)_frame;
1151 vm_offset_t raddrs[DUMPFRAMES];
1152 vm_offset_t PC = 0;
1153 int frame_index;
1154 volatile uint32_t *ppbtcnt = &pbtcnt;
1155 uint64_t bt_tsc_timeout;
1156 boolean_t keepsyms = FALSE;
1157 int cn = cpu_number();
1158 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
1159
1160 if(pbtcpu != cn) {
1161 hw_atomic_add(&pbtcnt, 1);
1162 /* Spin on print backtrace lock, which serializes output
1163 * Continue anyway if a timeout occurs.
1164 */
1165 hw_lock_to(&pbtlock, ~0U);
1166 pbtcpu = cn;
1167 }
1168
1169 if (__improbable(doprnt_hide_pointers == TRUE)) {
1170 /* If we're called directly, the Debugger() function will not be called,
1171 * so we need to reset the value in here. */
1172 doprnt_hide_pointers = FALSE;
1173 }
1174
1175 panic_check_hook();
1176
1177 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
1178
1179 if (msg != NULL) {
1180 kdb_printf("%s", msg);
1181 }
1182
1183 if ((regdump == TRUE) && (regs != NULL)) {
1184 x86_saved_state64_t *ss64p = saved_state64(regs);
1185 kdb_printf(
1186 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1187 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1188 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1189 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1190 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1191 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
1192 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
1193 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
1194 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
1195 ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
1196 ss64p->isf.ss);
1197 PC = ss64p->isf.rip;
1198 }
1199
1200 kdb_printf("Backtrace (CPU %d), "
1201 #if PRINT_ARGS_FROM_STACK_FRAME
1202 "Frame : Return Address (4 potential args on stack)\n", cn);
1203 #else
1204 "Frame : Return Address\n", cn);
1205 #endif
1206
1207 for (frame_index = 0; frame_index < nframes; frame_index++) {
1208 vm_offset_t curframep = (vm_offset_t) frame;
1209
1210 if (!curframep)
1211 break;
1212
1213 if (curframep & 0x3) {
1214 kdb_printf("Unaligned frame\n");
1215 goto invalid;
1216 }
1217
1218 if (!kvtophys(curframep) ||
1219 !kvtophys(curframep + sizeof(cframe_t) - 1)) {
1220 kdb_printf("No mapping exists for frame pointer\n");
1221 goto invalid;
1222 }
1223
1224 kdb_printf("%p : 0x%lx ", frame, frame->caller);
1225 if (frame_index < DUMPFRAMES)
1226 raddrs[frame_index] = frame->caller;
1227
1228 #if PRINT_ARGS_FROM_STACK_FRAME
1229 if (kvtophys((vm_offset_t)&(frame->args[3])))
1230 kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
1231 frame->args[0], frame->args[1],
1232 frame->args[2], frame->args[3]);
1233 #endif
1234
1235 /* Display address-symbol translation only if the "keepsyms"
1236 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1237 * This routine is potentially unsafe; also, function
1238 * boundary identification is unreliable after a strip -x.
1239 */
1240 if (keepsyms)
1241 panic_print_symbol_name((vm_address_t)frame->caller);
1242
1243 kdb_printf("\n");
1244
1245 frame = frame->prev;
1246 }
1247
1248 if (frame_index >= nframes)
1249 kdb_printf("\tBacktrace continues...\n");
1250
1251 goto out;
1252
1253 invalid:
1254 kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame);
1255 out:
1256
1257 /* Identify kernel modules in the backtrace and display their
1258 * load addresses and dependencies. This routine should walk
1259 * the kmod list safely.
1260 */
1261 if (frame_index)
1262 kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
1263
1264 if (PC != 0)
1265 kmod_panic_dump(&PC, 1);
1266
1267 panic_display_system_configuration();
1268
1269 doprnt_hide_pointers = old_doprnt_hide_pointers;
1270
1271 /* Release print backtrace lock, to permit other callers in the
1272 * event of panics on multiple processors.
1273 */
1274 hw_lock_unlock(&pbtlock);
1275 hw_atomic_sub(&pbtcnt, 1);
1276 /* Wait for other processors to complete output
1277 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1278 */
1279 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1280 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1281 }
1282
1283 static boolean_t
1284 debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1285 {
1286 size_t rem = size;
1287 char *kvaddr = dest;
1288
1289 while (rem) {
1290 ppnum_t upn = pmap_find_phys(p, uaddr);
1291 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1292 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1293 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1294 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1295 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1296 cur_size = MIN(cur_size, rem);
1297
1298 if (upn && pmap_valid_page(upn) && phys_dest) {
1299 bcopy_phys(phys_src, phys_dest, cur_size);
1300 }
1301 else
1302 break;
1303 uaddr += cur_size;
1304 kvaddr += cur_size;
1305 rem -= cur_size;
1306 }
1307 return (rem == 0);
1308 }
1309
1310 void
1311 print_threads_registers(thread_t thread)
1312 {
1313 x86_saved_state_t *savestate;
1314
1315 savestate = get_user_regs(thread);
1316 kdb_printf(
1317 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1318 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1319 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1320 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1321 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1322 savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
1323 savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
1324 savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
1325 savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
1326 savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
1327 savestate->ss_64.isf.ss);
1328 }
1329
1330 void
1331 print_tasks_user_threads(task_t task)
1332 {
1333 thread_t thread = current_thread();
1334 x86_saved_state_t *savestate;
1335 pmap_t pmap = 0;
1336 uint64_t rbp;
1337 const char *cur_marker = 0;
1338 int j;
1339
1340 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1341 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1342
1343 kdb_printf("Thread %d: %p\n", j, thread);
1344 pmap = get_task_pmap(task);
1345 savestate = get_user_regs(thread);
1346 rbp = savestate->ss_64.rbp;
1347 print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE, TRUE);
1348 kdb_printf("\n");
1349 }
1350 }
1351
1352 void
1353 print_thread_num_that_crashed(task_t task)
1354 {
1355 thread_t c_thread = current_thread();
1356 thread_t thread;
1357 int j;
1358
1359 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1360 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1361
1362 if (c_thread == thread) {
1363 kdb_printf("\nThread %d crashed\n", j);
1364 break;
1365 }
1366 }
1367 }
1368
1369 #define PANICLOG_UUID_BUF_SIZE 256
1370
1371 void print_uuid_info(task_t task)
1372 {
1373 uint32_t uuid_info_count = 0;
1374 mach_vm_address_t uuid_info_addr = 0;
1375 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1376 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1377 int task_pid = pid_from_task(task);
1378 char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
1379 char *uuidbufptr = uuidbuf;
1380 uint32_t k;
1381
1382 if (have_pmap && task->active && task_pid > 0) {
1383 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1384 struct user64_dyld_all_image_infos task_image_infos;
1385 if (debug_copyin(task->map->pmap, task->all_image_info_addr,
1386 &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1387 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1388 uuid_info_addr = task_image_infos.uuidArray;
1389 }
1390
1391 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1392 * in the middle of updating this data structure), we zero the
1393 * uuid_info_count so that we won't even try to save load info for this task
1394 */
1395 if (!uuid_info_addr) {
1396 uuid_info_count = 0;
1397 }
1398 }
1399
1400 if (task_pid > 0 && uuid_info_count > 0) {
1401 uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1402 uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
1403 uint32_t uuid_copy_size = 0;
1404 uint32_t uuid_image_count = 0;
1405 char *current_uuid_buffer = NULL;
1406 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1407
1408 kdb_printf("\nuuid info:\n");
1409 while (uuid_array_size) {
1410 if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
1411 uuid_copy_size = uuid_array_size;
1412 uuid_image_count = uuid_array_size/uuid_info_size;
1413 } else {
1414 uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size;
1415 uuid_copy_size = uuid_image_count * uuid_info_size;
1416 }
1417 if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
1418 uuid_copy_size)) {
1419 kdb_printf("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
1420 uuid_image_count = 0;
1421 break;
1422 }
1423
1424 if (uuid_image_count > 0) {
1425 current_uuid_buffer = uuidbufptr;
1426 for (k = 0; k < uuid_image_count; k++) {
1427 kdb_printf(" %#llx", *(uint64_t *)current_uuid_buffer);
1428 current_uuid_buffer += sizeof(uint64_t);
1429 uint8_t *uuid = (uint8_t *)current_uuid_buffer;
1430 kdb_printf("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1431 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
1432 uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
1433 current_uuid_buffer += 16;
1434 }
1435 bzero(&uuidbuf, sizeof(uuidbuf));
1436 }
1437 uuid_info_addr += uuid_copy_size;
1438 uuid_array_size -= uuid_copy_size;
1439 }
1440 }
1441 }
1442
1443 void print_launchd_info(void)
1444 {
1445 task_t task = current_task();
1446 thread_t thread = current_thread();
1447 volatile uint32_t *ppbtcnt = &pbtcnt;
1448 uint64_t bt_tsc_timeout;
1449 int cn = cpu_number();
1450
1451 if(pbtcpu != cn) {
1452 hw_atomic_add(&pbtcnt, 1);
1453 /* Spin on print backtrace lock, which serializes output
1454 * Continue anyway if a timeout occurs.
1455 */
1456 hw_lock_to(&pbtlock, ~0U);
1457 pbtcpu = cn;
1458 }
1459
1460 print_uuid_info(task);
1461 print_thread_num_that_crashed(task);
1462 print_threads_registers(thread);
1463 print_tasks_user_threads(task);
1464 kdb_printf("Mac OS version: %s\n", (osversion[0] != 0) ? osversion : "Not yet set");
1465 kdb_printf("Kernel version: %s\n", version);
1466 panic_display_kernel_uuid();
1467 panic_display_model_name();
1468
1469 /* Release print backtrace lock, to permit other callers in the
1470 * event of panics on multiple processors.
1471 */
1472 hw_lock_unlock(&pbtlock);
1473 hw_atomic_sub(&pbtcnt, 1);
1474 /* Wait for other processors to complete output
1475 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1476 */
1477 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1478 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1479
1480 }