]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/AT386/model_dep.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / AT386 / model_dep.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * File: model_dep.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
65 *
66 * Basic initialization for I386 - ISA bus machines.
67 */
68
69
70 #include <mach/i386/vm_param.h>
71
72 #include <string.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_prot.h>
75 #include <mach/machine.h>
76 #include <mach/time_value.h>
77 #include <sys/kdebug.h>
78 #include <kern/spl.h>
79 #include <kern/assert.h>
80 #include <kern/debug.h>
81 #include <kern/misc_protos.h>
82 #include <kern/startup.h>
83 #include <kern/clock.h>
84 #include <kern/cpu_data.h>
85 #include <kern/machine.h>
86 #include <i386/postcode.h>
87 #include <i386/mp_desc.h>
88 #include <i386/misc_protos.h>
89 #include <i386/thread.h>
90 #include <i386/trap.h>
91 #include <i386/machine_routines.h>
92 #include <i386/mp.h> /* mp_rendezvous_break_lock */
93 #include <i386/cpuid.h>
94 #include <i386/fpu.h>
95 #include <i386/machine_cpu.h>
96 #include <i386/pmap.h>
97 #if CONFIG_MTRR
98 #include <i386/mtrr.h>
99 #endif
100 #include <i386/ucode.h>
101 #include <i386/pmCPU.h>
102 #include <i386/panic_hooks.h>
103
104 #include <architecture/i386/pio.h> /* inb() */
105 #include <pexpert/i386/boot.h>
106
107 #include <kdp/kdp_dyld.h>
108 #include <kdp/kdp_core.h>
109 #include <vm/pmap.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_kern.h>
112
113 #include <IOKit/IOPlatformExpert.h>
114 #include <IOKit/IOHibernatePrivate.h>
115
116 #include <pexpert/i386/efi.h>
117
118 #include <kern/thread.h>
119 #include <kern/sched.h>
120 #include <mach-o/loader.h>
121 #include <mach-o/nlist.h>
122
123 #include <libkern/kernel_mach_header.h>
124 #include <libkern/OSKextLibPrivate.h>
125
126 #include <mach/branch_predicates.h>
127 #include <libkern/section_keywords.h>
128
129 #if DEBUG
130 #define DPRINTF(x...) kprintf(x)
131 #else
132 #define DPRINTF(x...)
133 #endif
134
135 static void machine_conf(void);
136 void panic_print_symbol_name(vm_address_t search);
137
138 extern const char version[];
139 extern char osversion[];
140 extern int max_unsafe_quanta;
141 extern int max_poll_quanta;
142 extern unsigned int panic_is_inited;
143
144 extern int proc_pid(void *p);
145
146 /* Definitions for frame pointers */
147 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
148 #define FP_LR_OFFSET ((uint32_t)4)
149 #define FP_LR_OFFSET64 ((uint32_t)8)
150 #define FP_MAX_NUM_TO_EVALUATE (50)
151
152 int db_run_mode;
153
154 volatile int pbtcpu = -1;
155 hw_lock_data_t pbtlock; /* backtrace print lock */
156 uint32_t pbtcnt = 0;
157
158 volatile int panic_double_fault_cpu = -1;
159
160 #define PRINT_ARGS_FROM_STACK_FRAME 0
161
162 typedef struct _cframe_t {
163 struct _cframe_t *prev;
164 uintptr_t caller;
165 #if PRINT_ARGS_FROM_STACK_FRAME
166 unsigned args[0];
167 #endif
168 } cframe_t;
169
170 static unsigned panic_io_port;
171 static unsigned commit_paniclog_to_nvram;
172
173 SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg;
174
175 /*
176 * Backtrace a single frame.
177 */
178 void
179 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
180 boolean_t is_64_bit, boolean_t nvram_format)
181 {
182 int i = 0;
183 addr64_t lr;
184 addr64_t fp;
185 addr64_t fp_for_ppn;
186 ppnum_t ppn;
187 boolean_t dump_kernel_stack;
188
189 fp = topfp;
190 fp_for_ppn = 0;
191 ppn = (ppnum_t)NULL;
192
193 if (fp >= VM_MIN_KERNEL_ADDRESS)
194 dump_kernel_stack = TRUE;
195 else
196 dump_kernel_stack = FALSE;
197
198 do {
199 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
200 break;
201 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
202 break;
203 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
204 break;
205
206 /* Check to see if current address will result in a different
207 ppn than previously computed (to avoid recomputation) via
208 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
209
210 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
211 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
212 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
213 }
214 if (ppn != (ppnum_t)NULL) {
215 if (is_64_bit) {
216 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
217 } else {
218 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
219 }
220 } else {
221 if (is_64_bit) {
222 kdb_printf("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
223 } else {
224 kdb_printf("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
225 }
226 break;
227 }
228 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
229 ppn = pmap_find_phys(pmap, fp);
230 fp_for_ppn = fp;
231 }
232 if (ppn != (ppnum_t)NULL) {
233 if (is_64_bit) {
234 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
235 } else {
236 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
237 }
238 } else {
239 if (is_64_bit) {
240 kdb_printf("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
241 } else {
242 kdb_printf("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
243 }
244 break;
245 }
246
247 if (nvram_format) {
248 if (is_64_bit) {
249 kdb_printf("%s\t0x%016llx\n", cur_marker, lr);
250 } else {
251 kdb_printf("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
252 }
253 } else {
254 if (is_64_bit) {
255 kdb_printf("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
256 } else {
257 kdb_printf("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
258 }
259 }
260 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
261 }
262 void
263 machine_startup(void)
264 {
265 int boot_arg;
266
267 #if 0
268 if( PE_get_hotkey( kPEControlKey ))
269 halt_in_debugger = halt_in_debugger ? 0 : 1;
270 #endif
271
272 if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) {
273 panicDebugging = TRUE;
274 #if DEVELOPMENT || DEBUG
275 if (debug_boot_arg & DB_HALT) halt_in_debugger=1;
276 #endif
277 if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE;
278 if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE;
279 if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
280 #if KDEBUG_MOJO_TRACE
281 if (debug_boot_arg & DB_PRT_KDEBUG) {
282 kdebug_serial = TRUE;
283 disable_debug_output = FALSE;
284 }
285 #endif
286 } else {
287 debug_boot_arg = 0;
288 }
289
290 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
291 commit_paniclog_to_nvram = 1;
292
293 /*
294 * Entering the debugger will put the CPUs into a "safe"
295 * power mode.
296 */
297 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg)))
298 pmsafe_debug = boot_arg;
299
300 #if NOTYET
301 hw_lock_init(&debugger_lock); /* initialize debugger lock */
302 #endif
303 hw_lock_init(&pbtlock); /* initialize print backtrace lock */
304
305 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
306 default_preemption_rate = boot_arg;
307 }
308 if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
309 max_unsafe_quanta = boot_arg;
310 }
311 if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
312 max_poll_quanta = boot_arg;
313 }
314 if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
315 sched_poll_yield_shift = boot_arg;
316 }
317 /* The I/O port to issue a read from, in the event of a panic. Useful for
318 * triggering logic analyzers.
319 */
320 if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) {
321 /*I/O ports range from 0 through 0xFFFF */
322 panic_io_port = boot_arg & 0xffff;
323 }
324
325 machine_conf();
326
327 panic_hooks_init();
328
329 /*
330 * Start the system.
331 */
332 kernel_bootstrap();
333 /*NOTREACHED*/
334 }
335
336
337 static void
338 machine_conf(void)
339 {
340 machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
341 }
342
343
344 extern void *gPEEFIRuntimeServices;
345 extern void *gPEEFISystemTable;
346
347 /*-
348 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
349 * code or tables extracted from it, as desired without restriction.
350 *
351 * First, the polynomial itself and its table of feedback terms. The
352 * polynomial is
353 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
354 *
355 * Note that we take it "backwards" and put the highest-order term in
356 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
357 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
358 * the MSB being 1
359 *
360 * Note that the usual hardware shift register implementation, which
361 * is what we're using (we're merely optimizing it by doing eight-bit
362 * chunks at a time) shifts bits into the lowest-order term. In our
363 * implementation, that means shifting towards the right. Why do we
364 * do it this way? Because the calculated CRC must be transmitted in
365 * order from highest-order term to lowest-order term. UARTs transmit
366 * characters in order from LSB to MSB. By storing the CRC this way
367 * we hand it to the UART in the order low-byte to high-byte; the UART
368 * sends each low-bit to hight-bit; and the result is transmission bit
369 * by bit from highest- to lowest-order term without requiring any bit
370 * shuffling on our part. Reception works similarly
371 *
372 * The feedback terms table consists of 256, 32-bit entries. Notes
373 *
374 * The table can be generated at runtime if desired; code to do so
375 * is shown later. It might not be obvious, but the feedback
376 * terms simply represent the results of eight shift/xor opera
377 * tions for all combinations of data and CRC register values
378 *
379 * The values must be right-shifted by eight bits by the "updcrc
380 * logic; the shift must be unsigned (bring in zeroes). On some
381 * hardware you could probably optimize the shift in assembler by
382 * using byte-swap instructions
383 * polynomial $edb88320
384 *
385 *
386 * CRC32 code derived from work by Gary S. Brown.
387 */
388
389 static uint32_t crc32_tab[] = {
390 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
391 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
392 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
393 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
394 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
395 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
396 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
397 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
398 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
399 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
400 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
401 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
402 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
403 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
404 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
405 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
406 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
407 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
408 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
409 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
410 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
411 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
412 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
413 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
414 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
415 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
416 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
417 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
418 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
419 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
420 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
421 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
422 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
423 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
424 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
425 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
426 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
427 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
428 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
429 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
430 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
431 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
432 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
433 };
434
435 static uint32_t
436 crc32(uint32_t crc, const void *buf, size_t size)
437 {
438 const uint8_t *p;
439
440 p = buf;
441 crc = crc ^ ~0U;
442
443 while (size--)
444 crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
445
446 return crc ^ ~0U;
447 }
448
449 static void
450 efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table)
451 {
452 EFI_RUNTIME_SERVICES_64 *runtime;
453 uint32_t hdr_cksum;
454 uint32_t cksum;
455
456 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
457 do {
458 DPRINTF("Header:\n");
459 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
460 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
461 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
462 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
463 DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
464 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
465 kprintf("Bad EFI system table signature\n");
466 break;
467 }
468 // Verify signature of the system table
469 hdr_cksum = system_table->Hdr.CRC32;
470 system_table->Hdr.CRC32 = 0;
471 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
472
473 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
474 system_table->Hdr.CRC32 = hdr_cksum;
475 if (cksum != hdr_cksum) {
476 kprintf("Bad EFI system table checksum\n");
477 break;
478 }
479
480 gPEEFISystemTable = system_table;
481
482 if(system_table->RuntimeServices == 0) {
483 kprintf("No runtime table present\n");
484 break;
485 }
486 DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
487 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
488 runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
489 DPRINTF("Checking runtime services table %p\n", runtime);
490 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
491 kprintf("Bad EFI runtime table signature\n");
492 break;
493 }
494
495 // Verify signature of runtime services table
496 hdr_cksum = runtime->Hdr.CRC32;
497 runtime->Hdr.CRC32 = 0;
498 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
499
500 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
501 runtime->Hdr.CRC32 = hdr_cksum;
502 if (cksum != hdr_cksum) {
503 kprintf("Bad EFI runtime table checksum\n");
504 break;
505 }
506
507 gPEEFIRuntimeServices = runtime;
508 }
509 while (FALSE);
510 }
511
512 static void
513 efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
514 {
515 EFI_RUNTIME_SERVICES_32 *runtime;
516 uint32_t hdr_cksum;
517 uint32_t cksum;
518
519 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
520 do {
521 DPRINTF("Header:\n");
522 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
523 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
524 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
525 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
526 DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
527 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
528 kprintf("Bad EFI system table signature\n");
529 break;
530 }
531 // Verify signature of the system table
532 hdr_cksum = system_table->Hdr.CRC32;
533 system_table->Hdr.CRC32 = 0;
534 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
535 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
536
537 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
538 system_table->Hdr.CRC32 = hdr_cksum;
539 if (cksum != hdr_cksum) {
540 kprintf("Bad EFI system table checksum\n");
541 break;
542 }
543
544 gPEEFISystemTable = system_table;
545
546 if(system_table->RuntimeServices == 0) {
547 kprintf("No runtime table present\n");
548 break;
549 }
550 DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
551 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
552 // For a 64-bit kernel, booter provides a virtual address mod 4G
553 runtime = (EFI_RUNTIME_SERVICES_32 *)
554 (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
555 DPRINTF("Runtime table addressed at %p\n", runtime);
556 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
557 kprintf("Bad EFI runtime table signature\n");
558 break;
559 }
560
561 // Verify signature of runtime services table
562 hdr_cksum = runtime->Hdr.CRC32;
563 runtime->Hdr.CRC32 = 0;
564 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
565
566 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
567 runtime->Hdr.CRC32 = hdr_cksum;
568 if (cksum != hdr_cksum) {
569 kprintf("Bad EFI runtime table checksum\n");
570 break;
571 }
572
573 DPRINTF("Runtime functions\n");
574 DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
575 DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
576 DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
577 DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
578 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
579 DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
580 DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
581 DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
582 DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
583 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
584 DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
585
586 gPEEFIRuntimeServices = runtime;
587 }
588 while (FALSE);
589 }
590
591
592 /* Map in EFI runtime areas. */
593 static void
594 efi_init(void)
595 {
596 boot_args *args = (boot_args *)PE_state.bootArgs;
597
598 kprintf("Initializing EFI runtime services\n");
599
600 do
601 {
602 vm_offset_t vm_size, vm_addr;
603 vm_map_offset_t phys_addr;
604 EfiMemoryRange *mptr;
605 unsigned int msize, mcount;
606 unsigned int i;
607
608 msize = args->MemoryMapDescriptorSize;
609 mcount = args->MemoryMapSize / msize;
610
611 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
612 args->kaddr, args->ksize);
613 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
614 args->efiSystemTable,
615 (void *) ml_static_ptovirt(args->efiSystemTable));
616 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
617 args->efiRuntimeServicesPageStart);
618 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
619 args->efiRuntimeServicesPageCount);
620 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
621 args->efiRuntimeServicesVirtualPageStart);
622 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
623 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
624 if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
625 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
626 vm_addr = (vm_offset_t) mptr->VirtualStart;
627 /* For K64 on EFI32, shadow-map into high KVA */
628 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
629 vm_addr |= VM_MIN_KERNEL_ADDRESS;
630 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
631 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
632 mptr->Type,
633 (void *) (uintptr_t) phys_addr,
634 (void *) (uintptr_t) mptr->VirtualStart,
635 (void *) vm_addr,
636 (void *) vm_size);
637 pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size),
638 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
639 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
640 }
641 }
642
643 if (args->Version != kBootArgsVersion2)
644 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
645
646 DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
647 if (args->efiMode == kBootArgsEfiMode64) {
648 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
649 } else {
650 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
651 }
652 }
653 while (FALSE);
654
655 return;
656 }
657
658 /* Remap EFI runtime areas. */
659 void
660 hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset)
661 {
662 boot_args *args = (boot_args *)PE_state.bootArgs;
663
664 kprintf("Reinitializing EFI runtime services\n");
665
666 do
667 {
668 vm_offset_t vm_size, vm_addr;
669 vm_map_offset_t phys_addr;
670 EfiMemoryRange *mptr;
671 unsigned int msize, mcount;
672 unsigned int i;
673
674 gPEEFISystemTable = 0;
675 gPEEFIRuntimeServices = 0;
676
677 system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
678
679 kprintf("Old system table 0x%x, new 0x%x\n",
680 (uint32_t)args->efiSystemTable, system_table_offset);
681
682 args->efiSystemTable = system_table_offset;
683
684 kprintf("Old map:\n");
685 msize = args->MemoryMapDescriptorSize;
686 mcount = args->MemoryMapSize / msize;
687 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
688 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
689 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
690
691 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
692 vm_addr = (vm_offset_t) mptr->VirtualStart;
693 /* K64 on EFI32 */
694 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
695 vm_addr |= VM_MIN_KERNEL_ADDRESS;
696 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
697
698 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
699 }
700 }
701
702 pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart),
703 i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount));
704
705 kprintf("New map:\n");
706 msize = args->MemoryMapDescriptorSize;
707 mcount = (unsigned int )(map_size / msize);
708 mptr = map;
709 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
710 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
711
712 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
713 vm_addr = (vm_offset_t) mptr->VirtualStart;
714 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
715 vm_addr |= VM_MIN_KERNEL_ADDRESS;
716 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
717
718 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
719
720 pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
721 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
722 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
723 }
724 }
725
726 if (args->Version != kBootArgsVersion2)
727 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
728
729 kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
730 if (args->efiMode == kBootArgsEfiMode64) {
731 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
732 } else {
733 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
734 }
735 }
736 while (FALSE);
737
738 kprintf("Done reinitializing EFI runtime services\n");
739
740 return;
741 }
742
743 /*
744 * Find devices. The system is alive.
745 */
746 void
747 machine_init(void)
748 {
749 /* Now with VM up, switch to dynamically allocated cpu data */
750 cpu_data_realloc();
751
752 /* Ensure panic buffer is initialized. */
753 debug_log_init();
754
755 /*
756 * Display CPU identification
757 */
758 cpuid_cpu_display("CPU identification");
759 cpuid_feature_display("CPU features");
760 cpuid_extfeature_display("CPU extended features");
761
762 /*
763 * Initialize EFI runtime services.
764 */
765 efi_init();
766
767 smp_init();
768
769 /*
770 * Set up to use floating point.
771 */
772 init_fpu();
773
774 /*
775 * Configure clock devices.
776 */
777 clock_config();
778
779 #if CONFIG_MTRR
780 /*
781 * Initialize MTRR from boot processor.
782 */
783 mtrr_init();
784
785 /*
786 * Set up PAT for boot processor.
787 */
788 pat_init();
789 #endif
790
791 /*
792 * Free lowmem pages and complete other setup
793 */
794 pmap_lowmem_finalize();
795 }
796
797 /*
798 * Halt a cpu.
799 */
800 void
801 halt_cpu(void)
802 {
803 halt_all_cpus(FALSE);
804 }
805
806 int reset_mem_on_reboot = 1;
807
808 /*
809 * Halt the system or reboot.
810 */
811 __attribute__((noreturn))
812 void
813 halt_all_cpus(boolean_t reboot)
814 {
815 if (reboot) {
816 printf("MACH Reboot\n");
817 PEHaltRestart( kPERestartCPU );
818 } else {
819 printf("CPU halted\n");
820 PEHaltRestart( kPEHaltCPU );
821 }
822 while(1);
823 }
824
825
826 /* Issue an I/O port read if one has been requested - this is an event logic
827 * analyzers can use as a trigger point.
828 */
829
830 void
831 panic_io_port_read(void) {
832 if (panic_io_port)
833 (void)inb(panic_io_port);
834 }
835
836 /* For use with the MP rendezvous mechanism
837 */
838
839 uint64_t panic_restart_timeout = ~(0ULL);
840
841 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
842
843 static void
844 machine_halt_cpu(void) {
845 uint64_t deadline;
846
847 panic_io_port_read();
848
849 /* Halt here forever if we're not rebooting */
850 if (!PE_reboot_on_panic() && panic_restart_timeout == ~(0ULL)) {
851 pmCPUHalt(PM_HALT_DEBUG);
852 return;
853 }
854
855 if (PE_reboot_on_panic())
856 deadline = mach_absolute_time() + PANIC_RESTART_TIMEOUT;
857 else
858 deadline = mach_absolute_time() + panic_restart_timeout;
859
860 while (mach_absolute_time() < deadline)
861 cpu_pause();
862
863 kprintf("Invoking PE_halt_restart\n");
864 /* Attempt restart via ACPI RESET_REG; at the time of this
865 * writing, this is routine is chained through AppleSMC->
866 * AppleACPIPlatform
867 */
868 if (PE_halt_restart)
869 (*PE_halt_restart)(kPERestartCPU);
870 pmCPUHalt(PM_HALT_DEBUG);
871 }
872
873 void
874 DebuggerWithContext(
875 __unused unsigned int reason,
876 __unused void *ctx,
877 const char *message,
878 uint64_t debugger_options_mask)
879 {
880 if (debugger_options_mask != DEBUGGER_OPTION_NONE) {
881 kprintf("debugger options (%llx) not supported for desktop.\n", debugger_options_mask);
882 }
883
884 Debugger(message);
885 }
886
887 void
888 Debugger(
889 const char *message)
890 {
891 unsigned long pi_size = 0;
892 void *stackptr;
893 int cn = cpu_number();
894
895 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
896
897 hw_atomic_add(&debug_mode, 1);
898 if (!panic_is_inited) {
899 postcode(PANIC_HLT);
900 asm("hlt");
901 }
902
903 doprnt_hide_pointers = FALSE;
904
905 printf("Debugger called: <%s>\n", message);
906 kprintf("Debugger called: <%s>\n", message);
907
908 /*
909 * Skip the graphical panic box if no panic string.
910 * This is the case if we're being called from
911 * host_reboot(,HOST_REBOOT_DEBUGGER)
912 * as a quiet way into the debugger.
913 */
914
915 if (panicstr) {
916 disable_preemption();
917
918 /* Issue an I/O port read if one has been requested - this is an event logic
919 * analyzers can use as a trigger point.
920 */
921 panic_io_port_read();
922
923 /* Obtain current frame pointer */
924 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
925
926 /* Print backtrace - callee is internally synchronized */
927 if (strncmp(panicstr, LAUNCHD_CRASHED_PREFIX, strlen(LAUNCHD_CRASHED_PREFIX)) == 0) {
928 /* Special handling of launchd died panics */
929 print_launchd_info();
930 } else {
931 panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
932 }
933
934 /* everything should be printed now so copy to NVRAM
935 */
936
937 if( debug_buf_size > 0) {
938 /* Optionally sync the panic log, if any, to NVRAM
939 * This is the default.
940 */
941 if (commit_paniclog_to_nvram) {
942 unsigned int bufpos;
943 uintptr_t cr0;
944
945 debug_putc(0);
946
947 /* Now call the compressor */
948 /* XXX Consider using the WKdm compressor in the
949 * future, rather than just packing - would need to
950 * be co-ordinated with crashreporter, which decodes
951 * this post-restart. The compressor should be
952 * capable of in-place compression.
953 */
954 bufpos = packA(debug_buf,
955 (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size);
956 /* If compression was successful,
957 * use the compressed length
958 */
959 pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
960
961 /* Save panic log to non-volatile store
962 * Panic info handler must truncate data that is
963 * too long for this platform.
964 * This call must save data synchronously,
965 * since we can subsequently halt the system.
966 */
967
968
969 /* The following sequence is a workaround for:
970 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
971 * any routines that use floating point (MMX in this case) when saving panic
972 * logs to nvram/flash.
973 */
974 cr0 = get_cr0();
975 clear_ts();
976
977 kprintf("Attempting to commit panic log to NVRAM\n");
978 pi_size = PESavePanicInfo((unsigned char *)debug_buf,
979 (uint32_t)pi_size );
980 set_cr0(cr0);
981
982 /* Uncompress in-place, to permit examination of
983 * the panic log by debuggers.
984 */
985
986 if (bufpos) {
987 unpackA(debug_buf, bufpos);
988 }
989 }
990 }
991
992 if (!panicDebugging && !kdp_has_polled_corefile()) {
993 unsigned cnum;
994 /* Clear the MP rendezvous function lock, in the event
995 * that a panic occurred while in that codepath.
996 */
997 mp_rendezvous_break_lock();
998
999 /* Non-maskably interrupt all other processors
1000 * If a restart timeout is specified, this processor
1001 * will attempt a restart.
1002 */
1003 kprintf("Invoking machine_halt_cpu on CPU %d\n", cn);
1004 for (cnum = 0; cnum < real_ncpus; cnum++) {
1005 if (cnum != (unsigned) cn) {
1006 cpu_NMI_interrupt(cnum);
1007 }
1008 }
1009 machine_halt_cpu();
1010 /* NOT REACHED */
1011 }
1012 }
1013
1014 doprnt_hide_pointers = old_doprnt_hide_pointers;
1015 __asm__("int3");
1016 hw_atomic_sub(&debug_mode, 1);
1017 }
1018
1019 char *
1020 machine_boot_info(char *buf, __unused vm_size_t size)
1021 {
1022 *buf ='\0';
1023 return buf;
1024 }
1025
1026 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1027 * boot-arg is supplied.
1028 */
1029
1030 static int
1031 panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
1032 {
1033 kernel_nlist_t *sym = NULL;
1034 struct load_command *cmd;
1035 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
1036 struct symtab_command *orig_st = NULL;
1037 unsigned int i;
1038 char *strings, *bestsym = NULL;
1039 vm_address_t bestaddr = 0, diff, curdiff;
1040
1041 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1042
1043 cmd = (struct load_command *) &mh[1];
1044 for (i = 0; i < mh->ncmds; i++) {
1045 if (cmd->cmd == LC_SEGMENT_KERNEL) {
1046 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
1047
1048 if (strncmp(SEG_TEXT, orig_sg->segname,
1049 sizeof(orig_sg->segname)) == 0)
1050 orig_ts = orig_sg;
1051 else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
1052 sizeof(orig_sg->segname)) == 0)
1053 orig_le = orig_sg;
1054 else if (strncmp("", orig_sg->segname,
1055 sizeof(orig_sg->segname)) == 0)
1056 orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
1057 }
1058 else if (cmd->cmd == LC_SYMTAB)
1059 orig_st = (struct symtab_command *) cmd;
1060
1061 cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
1062 }
1063
1064 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
1065 return 0;
1066
1067 if ((search < orig_ts->vmaddr) ||
1068 (search >= orig_ts->vmaddr + orig_ts->vmsize)) {
1069 /* search out of range for this mach header */
1070 return 0;
1071 }
1072
1073 sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
1074 strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
1075 diff = search;
1076
1077 for (i = 0; i < orig_st->nsyms; i++) {
1078 if (sym[i].n_type & N_STAB) continue;
1079
1080 if (sym[i].n_value <= search) {
1081 curdiff = search - (vm_address_t)sym[i].n_value;
1082 if (curdiff < diff) {
1083 diff = curdiff;
1084 bestaddr = sym[i].n_value;
1085 bestsym = strings + sym[i].n_un.n_strx;
1086 }
1087 }
1088 }
1089
1090 if (bestsym != NULL) {
1091 if (diff != 0) {
1092 kdb_printf("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
1093 } else {
1094 kdb_printf("%s : %s", module_name, bestsym);
1095 }
1096 return 1;
1097 }
1098 return 0;
1099 }
1100
1101 extern kmod_info_t * kmod; /* the list of modules */
1102
1103 static void
1104 panic_print_kmod_symbol_name(vm_address_t search)
1105 {
1106 u_int i;
1107
1108 if (gLoadedKextSummaries == NULL)
1109 return;
1110 for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
1111 OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
1112
1113 if ((search >= summary->address) &&
1114 (search < (summary->address + summary->size)))
1115 {
1116 kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
1117 if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
1118 kdb_printf("%s + %llu", summary->name, (unsigned long)search - summary->address);
1119 }
1120 break;
1121 }
1122 }
1123 }
1124
1125 void
1126 panic_print_symbol_name(vm_address_t search)
1127 {
1128 /* try searching in the kernel */
1129 if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
1130 /* that failed, now try to search for the right kext */
1131 panic_print_kmod_symbol_name(search);
1132 }
1133 }
1134
1135 /* Generate a backtrace, given a frame pointer - this routine
1136 * should walk the stack safely. The trace is appended to the panic log
1137 * and conditionally, to the console. If the trace contains kernel module
1138 * addresses, display the module name, load address and dependencies.
1139 */
1140
1141 #define DUMPFRAMES 32
1142 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1143 void
1144 panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
1145 {
1146 cframe_t *frame = (cframe_t *)_frame;
1147 vm_offset_t raddrs[DUMPFRAMES];
1148 vm_offset_t PC = 0;
1149 int frame_index;
1150 volatile uint32_t *ppbtcnt = &pbtcnt;
1151 uint64_t bt_tsc_timeout;
1152 boolean_t keepsyms = FALSE;
1153 int cn = cpu_number();
1154 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
1155
1156 if(pbtcpu != cn) {
1157 hw_atomic_add(&pbtcnt, 1);
1158 /* Spin on print backtrace lock, which serializes output
1159 * Continue anyway if a timeout occurs.
1160 */
1161 hw_lock_to(&pbtlock, ~0U);
1162 pbtcpu = cn;
1163 }
1164
1165 if (__improbable(doprnt_hide_pointers == TRUE)) {
1166 /* If we're called directly, the Debugger() function will not be called,
1167 * so we need to reset the value in here. */
1168 doprnt_hide_pointers = FALSE;
1169 }
1170
1171 panic_check_hook();
1172
1173 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
1174
1175 if (msg != NULL) {
1176 kdb_printf("%s", msg);
1177 }
1178
1179 if ((regdump == TRUE) && (regs != NULL)) {
1180 x86_saved_state64_t *ss64p = saved_state64(regs);
1181 kdb_printf(
1182 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1183 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1184 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1185 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1186 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1187 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
1188 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
1189 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
1190 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
1191 ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
1192 ss64p->isf.ss);
1193 PC = ss64p->isf.rip;
1194 }
1195
1196 kdb_printf("Backtrace (CPU %d), "
1197 #if PRINT_ARGS_FROM_STACK_FRAME
1198 "Frame : Return Address (4 potential args on stack)\n", cn);
1199 #else
1200 "Frame : Return Address\n", cn);
1201 #endif
1202
1203 for (frame_index = 0; frame_index < nframes; frame_index++) {
1204 vm_offset_t curframep = (vm_offset_t) frame;
1205
1206 if (!curframep)
1207 break;
1208
1209 if (curframep & 0x3) {
1210 kdb_printf("Unaligned frame\n");
1211 goto invalid;
1212 }
1213
1214 if (!kvtophys(curframep) ||
1215 !kvtophys(curframep + sizeof(cframe_t) - 1)) {
1216 kdb_printf("No mapping exists for frame pointer\n");
1217 goto invalid;
1218 }
1219
1220 kdb_printf("%p : 0x%lx ", frame, frame->caller);
1221 if (frame_index < DUMPFRAMES)
1222 raddrs[frame_index] = frame->caller;
1223
1224 #if PRINT_ARGS_FROM_STACK_FRAME
1225 if (kvtophys((vm_offset_t)&(frame->args[3])))
1226 kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
1227 frame->args[0], frame->args[1],
1228 frame->args[2], frame->args[3]);
1229 #endif
1230
1231 /* Display address-symbol translation only if the "keepsyms"
1232 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1233 * This routine is potentially unsafe; also, function
1234 * boundary identification is unreliable after a strip -x.
1235 */
1236 if (keepsyms)
1237 panic_print_symbol_name((vm_address_t)frame->caller);
1238
1239 kdb_printf("\n");
1240
1241 frame = frame->prev;
1242 }
1243
1244 if (frame_index >= nframes)
1245 kdb_printf("\tBacktrace continues...\n");
1246
1247 goto out;
1248
1249 invalid:
1250 kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame);
1251 out:
1252
1253 /* Identify kernel modules in the backtrace and display their
1254 * load addresses and dependencies. This routine should walk
1255 * the kmod list safely.
1256 */
1257 if (frame_index)
1258 kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
1259
1260 if (PC != 0)
1261 kmod_panic_dump(&PC, 1);
1262
1263 panic_display_system_configuration(FALSE);
1264
1265 doprnt_hide_pointers = old_doprnt_hide_pointers;
1266
1267 /* Release print backtrace lock, to permit other callers in the
1268 * event of panics on multiple processors.
1269 */
1270 hw_lock_unlock(&pbtlock);
1271 hw_atomic_sub(&pbtcnt, 1);
1272 /* Wait for other processors to complete output
1273 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1274 */
1275 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1276 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1277 }
1278
1279 static boolean_t
1280 debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1281 {
1282 size_t rem = size;
1283 char *kvaddr = dest;
1284
1285 while (rem) {
1286 ppnum_t upn = pmap_find_phys(p, uaddr);
1287 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1288 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1289 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1290 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1291 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1292 cur_size = MIN(cur_size, rem);
1293
1294 if (upn && pmap_valid_page(upn) && phys_dest) {
1295 bcopy_phys(phys_src, phys_dest, cur_size);
1296 }
1297 else
1298 break;
1299 uaddr += cur_size;
1300 kvaddr += cur_size;
1301 rem -= cur_size;
1302 }
1303 return (rem == 0);
1304 }
1305
1306 void
1307 print_threads_registers(thread_t thread)
1308 {
1309 x86_saved_state_t *savestate;
1310
1311 savestate = get_user_regs(thread);
1312 kdb_printf(
1313 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1314 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1315 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1316 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1317 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1318 savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
1319 savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
1320 savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
1321 savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
1322 savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
1323 savestate->ss_64.isf.ss);
1324 }
1325
1326 void
1327 print_tasks_user_threads(task_t task)
1328 {
1329 thread_t thread = current_thread();
1330 x86_saved_state_t *savestate;
1331 pmap_t pmap = 0;
1332 uint64_t rbp;
1333 const char *cur_marker = 0;
1334 int j;
1335
1336 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1337 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1338
1339 kdb_printf("Thread %d: %p\n", j, thread);
1340 pmap = get_task_pmap(task);
1341 savestate = get_user_regs(thread);
1342 rbp = savestate->ss_64.rbp;
1343 kdb_printf("\t0x%016llx\n", savestate->ss_64.isf.rip);
1344 print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE, TRUE);
1345 kdb_printf("\n");
1346 }
1347 }
1348
1349 void
1350 print_thread_num_that_crashed(task_t task)
1351 {
1352 thread_t c_thread = current_thread();
1353 thread_t thread;
1354 int j;
1355
1356 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1357 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1358
1359 if (c_thread == thread) {
1360 kdb_printf("\nThread %d crashed\n", j);
1361 break;
1362 }
1363 }
1364 }
1365
1366 #define PANICLOG_UUID_BUF_SIZE 256
1367
1368 void print_uuid_info(task_t task)
1369 {
1370 uint32_t uuid_info_count = 0;
1371 mach_vm_address_t uuid_info_addr = 0;
1372 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1373 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1374 int task_pid = pid_from_task(task);
1375 char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
1376 char *uuidbufptr = uuidbuf;
1377 uint32_t k;
1378
1379 if (have_pmap && task->active && task_pid > 0) {
1380 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1381 struct user64_dyld_all_image_infos task_image_infos;
1382 if (debug_copyin(task->map->pmap, task->all_image_info_addr,
1383 &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1384 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1385 uuid_info_addr = task_image_infos.uuidArray;
1386 }
1387
1388 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1389 * in the middle of updating this data structure), we zero the
1390 * uuid_info_count so that we won't even try to save load info for this task
1391 */
1392 if (!uuid_info_addr) {
1393 uuid_info_count = 0;
1394 }
1395 }
1396
1397 if (task_pid > 0 && uuid_info_count > 0) {
1398 uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1399 uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
1400 uint32_t uuid_copy_size = 0;
1401 uint32_t uuid_image_count = 0;
1402 char *current_uuid_buffer = NULL;
1403 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1404
1405 kdb_printf("\nuuid info:\n");
1406 while (uuid_array_size) {
1407 if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
1408 uuid_copy_size = uuid_array_size;
1409 uuid_image_count = uuid_array_size/uuid_info_size;
1410 } else {
1411 uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size;
1412 uuid_copy_size = uuid_image_count * uuid_info_size;
1413 }
1414 if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
1415 uuid_copy_size)) {
1416 kdb_printf("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
1417 uuid_image_count = 0;
1418 break;
1419 }
1420
1421 if (uuid_image_count > 0) {
1422 current_uuid_buffer = uuidbufptr;
1423 for (k = 0; k < uuid_image_count; k++) {
1424 kdb_printf(" %#llx", *(uint64_t *)current_uuid_buffer);
1425 current_uuid_buffer += sizeof(uint64_t);
1426 uint8_t *uuid = (uint8_t *)current_uuid_buffer;
1427 kdb_printf("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1428 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
1429 uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
1430 current_uuid_buffer += 16;
1431 }
1432 bzero(&uuidbuf, sizeof(uuidbuf));
1433 }
1434 uuid_info_addr += uuid_copy_size;
1435 uuid_array_size -= uuid_copy_size;
1436 }
1437 }
1438 }
1439
1440 void print_launchd_info(void)
1441 {
1442 task_t task = current_task();
1443 thread_t thread = current_thread();
1444 volatile uint32_t *ppbtcnt = &pbtcnt;
1445 uint64_t bt_tsc_timeout;
1446 int cn = cpu_number();
1447
1448 if(pbtcpu != cn) {
1449 hw_atomic_add(&pbtcnt, 1);
1450 /* Spin on print backtrace lock, which serializes output
1451 * Continue anyway if a timeout occurs.
1452 */
1453 hw_lock_to(&pbtlock, ~0U);
1454 pbtcpu = cn;
1455 }
1456
1457 print_uuid_info(task);
1458 print_thread_num_that_crashed(task);
1459 print_threads_registers(thread);
1460 print_tasks_user_threads(task);
1461
1462 panic_display_system_configuration(TRUE);
1463
1464 /* Release print backtrace lock, to permit other callers in the
1465 * event of panics on multiple processors.
1466 */
1467 hw_lock_unlock(&pbtlock);
1468 hw_atomic_sub(&pbtcnt, 1);
1469 /* Wait for other processors to complete output
1470 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1471 */
1472 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1473 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1474
1475 }