]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/AT386/model_dep.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / i386 / AT386 / model_dep.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * File: model_dep.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
65 *
66 * Basic initialization for I386 - ISA bus machines.
67 */
68
69
70 #define __APPLE_API_PRIVATE 1
71 #define __APPLE_API_UNSTABLE 1
72 #include <kern/debug.h>
73
74 #include <mach/i386/vm_param.h>
75
76 #include <string.h>
77 #include <mach/vm_param.h>
78 #include <mach/vm_prot.h>
79 #include <mach/machine.h>
80 #include <mach/time_value.h>
81 #include <sys/kdebug.h>
82 #include <kern/spl.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/startup.h>
86 #include <kern/clock.h>
87 #include <kern/cpu_data.h>
88 #include <kern/machine.h>
89 #include <i386/postcode.h>
90 #include <i386/mp_desc.h>
91 #include <i386/misc_protos.h>
92 #include <i386/thread.h>
93 #include <i386/trap.h>
94 #include <i386/machine_routines.h>
95 #include <i386/mp.h> /* mp_rendezvous_break_lock */
96 #include <i386/cpuid.h>
97 #include <i386/fpu.h>
98 #include <i386/machine_cpu.h>
99 #include <i386/pmap.h>
100 #if CONFIG_MTRR
101 #include <i386/mtrr.h>
102 #endif
103 #include <i386/ucode.h>
104 #include <i386/pmCPU.h>
105 #include <i386/panic_hooks.h>
106
107 #include <architecture/i386/pio.h> /* inb() */
108 #include <pexpert/i386/boot.h>
109
110 #include <kdp/kdp_dyld.h>
111 #include <kdp/kdp_core.h>
112 #include <vm/pmap.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_kern.h>
115
116 #include <IOKit/IOPlatformExpert.h>
117 #include <IOKit/IOHibernatePrivate.h>
118
119 #include <pexpert/i386/efi.h>
120
121 #include <kern/thread.h>
122 #include <kern/sched.h>
123 #include <mach-o/loader.h>
124 #include <mach-o/nlist.h>
125
126 #include <libkern/kernel_mach_header.h>
127 #include <libkern/OSKextLibPrivate.h>
128
129 #include <mach/branch_predicates.h>
130
131 #if DEBUG || DEVELOPMENT
132 #define DPRINTF(x...) kprintf(x)
133 #else
134 #define DPRINTF(x...)
135 #endif
136
137 #ifndef ROUNDUP
138 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
139 #endif
140
141 #ifndef ROUNDDOWN
142 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
143 #endif
144
145 static void machine_conf(void);
146 void panic_print_symbol_name(vm_address_t search);
147 void RecordPanicStackshot(void);
148
149 typedef enum paniclog_flush_type {
150 kPaniclogFlushBase = 1, /* Flush the initial log and paniclog header */
151 kPaniclogFlushStackshot = 2, /* Flush only the stackshot data, then flush the header */
152 kPaniclogFlushOtherLog = 3 /* Flush the other log, then flush the header */
153 } paniclog_flush_type_t;
154
155 void paniclog_flush_internal(paniclog_flush_type_t variant);
156
157 extern const char version[];
158 extern char osversion[];
159 extern int max_unsafe_quanta;
160 extern int max_poll_quanta;
161 extern unsigned int panic_is_inited;
162
163 extern int proc_pid(void *p);
164
165 /* Definitions for frame pointers */
166 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
167 #define FP_LR_OFFSET ((uint32_t)4)
168 #define FP_LR_OFFSET64 ((uint32_t)8)
169 #define FP_MAX_NUM_TO_EVALUATE (50)
170
171 volatile int pbtcpu = -1;
172 hw_lock_data_t pbtlock; /* backtrace print lock */
173 uint32_t pbtcnt = 0;
174
175 volatile int panic_double_fault_cpu = -1;
176
177 #define PRINT_ARGS_FROM_STACK_FRAME 0
178
179 typedef struct _cframe_t {
180 struct _cframe_t *prev;
181 uintptr_t caller;
182 #if PRINT_ARGS_FROM_STACK_FRAME
183 unsigned args[0];
184 #endif
185 } cframe_t;
186
187 static unsigned panic_io_port;
188 static unsigned commit_paniclog_to_nvram;
189 boolean_t coprocessor_paniclog_flush = FALSE;
190
191 struct kcdata_descriptor kc_panic_data;
192 static boolean_t begun_panic_stackshot = FALSE;
193 extern kern_return_t do_stackshot(void *);
194
195 extern void kdp_snapshot_preflight(int pid, void *tracebuf,
196 uint32_t tracebuf_size, uint32_t flags,
197 kcdata_descriptor_t data_p,
198 boolean_t enable_faulting);
199 extern int kdp_stack_snapshot_bytes_traced(void);
200
201 #if DEVELOPMENT || DEBUG
202 vm_offset_t panic_stackshot_buf = 0;
203 size_t panic_stackshot_len = 0;
204 #endif
205
206 /*
207 * Backtrace a single frame.
208 */
209 void
210 print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
211 boolean_t is_64_bit)
212 {
213 int i = 0;
214 addr64_t lr;
215 addr64_t fp;
216 addr64_t fp_for_ppn;
217 ppnum_t ppn;
218 boolean_t dump_kernel_stack;
219
220 fp = topfp;
221 fp_for_ppn = 0;
222 ppn = (ppnum_t)NULL;
223
224 if (fp >= VM_MIN_KERNEL_ADDRESS)
225 dump_kernel_stack = TRUE;
226 else
227 dump_kernel_stack = FALSE;
228
229 do {
230 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
231 break;
232 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
233 break;
234 if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
235 break;
236
237 /* Check to see if current address will result in a different
238 ppn than previously computed (to avoid recomputation) via
239 (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
240
241 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
242 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
243 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
244 }
245 if (ppn != (ppnum_t)NULL) {
246 if (is_64_bit) {
247 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
248 } else {
249 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
250 }
251 } else {
252 if (is_64_bit) {
253 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
254 } else {
255 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
256 }
257 break;
258 }
259 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
260 ppn = pmap_find_phys(pmap, fp);
261 fp_for_ppn = fp;
262 }
263 if (ppn != (ppnum_t)NULL) {
264 if (is_64_bit) {
265 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
266 } else {
267 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
268 }
269 } else {
270 if (is_64_bit) {
271 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
272 } else {
273 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
274 }
275 break;
276 }
277
278 if (is_64_bit) {
279 paniclog_append_noflush("%s\t0x%016llx\n", cur_marker, lr);
280 } else {
281 paniclog_append_noflush("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
282 }
283 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
284 }
285 void
286 machine_startup(void)
287 {
288 int boot_arg;
289
290 #if 0
291 if( PE_get_hotkey( kPEControlKey ))
292 halt_in_debugger = halt_in_debugger ? 0 : 1;
293 #endif
294
295 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
296 commit_paniclog_to_nvram = 1;
297
298 /*
299 * Entering the debugger will put the CPUs into a "safe"
300 * power mode.
301 */
302 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg)))
303 pmsafe_debug = boot_arg;
304
305 hw_lock_init(&pbtlock); /* initialize print backtrace lock */
306
307 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
308 default_preemption_rate = boot_arg;
309 }
310 if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
311 max_unsafe_quanta = boot_arg;
312 }
313 if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
314 max_poll_quanta = boot_arg;
315 }
316 if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
317 sched_poll_yield_shift = boot_arg;
318 }
319 /* The I/O port to issue a read from, in the event of a panic. Useful for
320 * triggering logic analyzers.
321 */
322 if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) {
323 /*I/O ports range from 0 through 0xFFFF */
324 panic_io_port = boot_arg & 0xffff;
325 }
326
327 machine_conf();
328
329 panic_hooks_init();
330
331 /*
332 * Start the system.
333 */
334 kernel_bootstrap();
335 /*NOTREACHED*/
336 }
337
338
339 static void
340 machine_conf(void)
341 {
342 machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
343 }
344
345
346 extern void *gPEEFIRuntimeServices;
347 extern void *gPEEFISystemTable;
348
349 /*-
350 * COPYRIGHT (C) 1986 Gary S. Brown. You may use this program, or
351 * code or tables extracted from it, as desired without restriction.
352 *
353 * First, the polynomial itself and its table of feedback terms. The
354 * polynomial is
355 * X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0
356 *
357 * Note that we take it "backwards" and put the highest-order term in
358 * the lowest-order bit. The X^32 term is "implied"; the LSB is the
359 * X^31 term, etc. The X^0 term (usually shown as "+1") results in
360 * the MSB being 1
361 *
362 * Note that the usual hardware shift register implementation, which
363 * is what we're using (we're merely optimizing it by doing eight-bit
364 * chunks at a time) shifts bits into the lowest-order term. In our
365 * implementation, that means shifting towards the right. Why do we
366 * do it this way? Because the calculated CRC must be transmitted in
367 * order from highest-order term to lowest-order term. UARTs transmit
368 * characters in order from LSB to MSB. By storing the CRC this way
369 * we hand it to the UART in the order low-byte to high-byte; the UART
370 * sends each low-bit to hight-bit; and the result is transmission bit
371 * by bit from highest- to lowest-order term without requiring any bit
372 * shuffling on our part. Reception works similarly
373 *
374 * The feedback terms table consists of 256, 32-bit entries. Notes
375 *
376 * The table can be generated at runtime if desired; code to do so
377 * is shown later. It might not be obvious, but the feedback
378 * terms simply represent the results of eight shift/xor opera
379 * tions for all combinations of data and CRC register values
380 *
381 * The values must be right-shifted by eight bits by the "updcrc
382 * logic; the shift must be unsigned (bring in zeroes). On some
383 * hardware you could probably optimize the shift in assembler by
384 * using byte-swap instructions
385 * polynomial $edb88320
386 *
387 *
388 * CRC32 code derived from work by Gary S. Brown.
389 */
390
391 static uint32_t crc32_tab[] = {
392 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
393 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
394 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
395 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
396 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
397 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
398 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
399 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
400 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
401 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
402 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
403 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
404 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
405 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
406 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
407 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
408 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
409 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
410 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
411 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
412 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
413 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
414 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
415 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
416 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
417 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
418 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
419 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
420 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
421 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
422 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
423 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
424 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
425 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
426 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
427 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
428 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
429 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
430 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
431 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
432 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
433 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
434 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
435 };
436
437 static uint32_t
438 crc32(uint32_t crc, const void *buf, size_t size)
439 {
440 const uint8_t *p;
441
442 p = buf;
443 crc = crc ^ ~0U;
444
445 while (size--)
446 crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
447
448 return crc ^ ~0U;
449 }
450
451 static void
452 efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table)
453 {
454 EFI_RUNTIME_SERVICES_64 *runtime;
455 uint32_t hdr_cksum;
456 uint32_t cksum;
457
458 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
459 do {
460 DPRINTF("Header:\n");
461 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
462 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
463 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
464 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
465 DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
466 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
467 kprintf("Bad EFI system table signature\n");
468 break;
469 }
470 // Verify signature of the system table
471 hdr_cksum = system_table->Hdr.CRC32;
472 system_table->Hdr.CRC32 = 0;
473 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
474
475 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
476 system_table->Hdr.CRC32 = hdr_cksum;
477 if (cksum != hdr_cksum) {
478 kprintf("Bad EFI system table checksum\n");
479 break;
480 }
481
482 gPEEFISystemTable = system_table;
483
484 if(system_table->RuntimeServices == 0) {
485 kprintf("No runtime table present\n");
486 break;
487 }
488 DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
489 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
490 runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
491 DPRINTF("Checking runtime services table %p\n", runtime);
492 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
493 kprintf("Bad EFI runtime table signature\n");
494 break;
495 }
496
497 // Verify signature of runtime services table
498 hdr_cksum = runtime->Hdr.CRC32;
499 runtime->Hdr.CRC32 = 0;
500 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
501
502 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
503 runtime->Hdr.CRC32 = hdr_cksum;
504 if (cksum != hdr_cksum) {
505 kprintf("Bad EFI runtime table checksum\n");
506 break;
507 }
508
509 gPEEFIRuntimeServices = runtime;
510 }
511 while (FALSE);
512 }
513
514 static void
515 efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
516 {
517 EFI_RUNTIME_SERVICES_32 *runtime;
518 uint32_t hdr_cksum;
519 uint32_t cksum;
520
521 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
522 do {
523 DPRINTF("Header:\n");
524 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
525 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
526 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
527 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
528 DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
529 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
530 kprintf("Bad EFI system table signature\n");
531 break;
532 }
533 // Verify signature of the system table
534 hdr_cksum = system_table->Hdr.CRC32;
535 system_table->Hdr.CRC32 = 0;
536 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
537 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
538
539 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
540 system_table->Hdr.CRC32 = hdr_cksum;
541 if (cksum != hdr_cksum) {
542 kprintf("Bad EFI system table checksum\n");
543 break;
544 }
545
546 gPEEFISystemTable = system_table;
547
548 if(system_table->RuntimeServices == 0) {
549 kprintf("No runtime table present\n");
550 break;
551 }
552 DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
553 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
554 // For a 64-bit kernel, booter provides a virtual address mod 4G
555 runtime = (EFI_RUNTIME_SERVICES_32 *)
556 (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
557 DPRINTF("Runtime table addressed at %p\n", runtime);
558 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
559 kprintf("Bad EFI runtime table signature\n");
560 break;
561 }
562
563 // Verify signature of runtime services table
564 hdr_cksum = runtime->Hdr.CRC32;
565 runtime->Hdr.CRC32 = 0;
566 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
567
568 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
569 runtime->Hdr.CRC32 = hdr_cksum;
570 if (cksum != hdr_cksum) {
571 kprintf("Bad EFI runtime table checksum\n");
572 break;
573 }
574
575 DPRINTF("Runtime functions\n");
576 DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
577 DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
578 DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
579 DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
580 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
581 DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
582 DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
583 DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
584 DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
585 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
586 DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
587
588 gPEEFIRuntimeServices = runtime;
589 }
590 while (FALSE);
591 }
592
593
594 /* Map in EFI runtime areas. */
595 static void
596 efi_init(void)
597 {
598 boot_args *args = (boot_args *)PE_state.bootArgs;
599
600 kprintf("Initializing EFI runtime services\n");
601
602 do
603 {
604 vm_offset_t vm_size, vm_addr;
605 vm_map_offset_t phys_addr;
606 EfiMemoryRange *mptr;
607 unsigned int msize, mcount;
608 unsigned int i;
609
610 msize = args->MemoryMapDescriptorSize;
611 mcount = args->MemoryMapSize / msize;
612
613 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
614 args->kaddr, args->ksize);
615 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
616 args->efiSystemTable,
617 (void *) ml_static_ptovirt(args->efiSystemTable));
618 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
619 args->efiRuntimeServicesPageStart);
620 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
621 args->efiRuntimeServicesPageCount);
622 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
623 args->efiRuntimeServicesVirtualPageStart);
624 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
625 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
626 if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
627 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
628 vm_addr = (vm_offset_t) mptr->VirtualStart;
629 /* For K64 on EFI32, shadow-map into high KVA */
630 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
631 vm_addr |= VM_MIN_KERNEL_ADDRESS;
632 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
633 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
634 mptr->Type,
635 (void *) (uintptr_t) phys_addr,
636 (void *) (uintptr_t) mptr->VirtualStart,
637 (void *) vm_addr,
638 (void *) vm_size);
639 pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size),
640 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
641 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
642 }
643 }
644
645 if (args->Version != kBootArgsVersion2)
646 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
647
648 DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
649 if (args->efiMode == kBootArgsEfiMode64) {
650 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
651 } else {
652 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
653 }
654 }
655 while (FALSE);
656
657 return;
658 }
659
660 /* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */
661 boolean_t
662 efi_valid_page(ppnum_t ppn)
663 {
664 boot_args *args = (boot_args *)PE_state.bootArgs;
665 ppnum_t pstart = args->efiRuntimeServicesPageStart;
666 ppnum_t pend = pstart + args->efiRuntimeServicesPageCount;
667
668 return pstart <= ppn && ppn < pend;
669 }
670
671 /* Remap EFI runtime areas. */
672 void
673 hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset)
674 {
675 boot_args *args = (boot_args *)PE_state.bootArgs;
676
677 kprintf("Reinitializing EFI runtime services\n");
678
679 do
680 {
681 vm_offset_t vm_size, vm_addr;
682 vm_map_offset_t phys_addr;
683 EfiMemoryRange *mptr;
684 unsigned int msize, mcount;
685 unsigned int i;
686
687 gPEEFISystemTable = 0;
688 gPEEFIRuntimeServices = 0;
689
690 system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
691
692 kprintf("Old system table 0x%x, new 0x%x\n",
693 (uint32_t)args->efiSystemTable, system_table_offset);
694
695 args->efiSystemTable = system_table_offset;
696
697 kprintf("Old map:\n");
698 msize = args->MemoryMapDescriptorSize;
699 mcount = args->MemoryMapSize / msize;
700 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
701 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
702 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
703
704 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
705 vm_addr = (vm_offset_t) mptr->VirtualStart;
706 /* K64 on EFI32 */
707 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
708 vm_addr |= VM_MIN_KERNEL_ADDRESS;
709 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
710
711 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
712 }
713 }
714
715 pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart),
716 i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount));
717
718 kprintf("New map:\n");
719 msize = args->MemoryMapDescriptorSize;
720 mcount = (unsigned int )(map_size / msize);
721 mptr = map;
722 for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
723 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
724
725 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
726 vm_addr = (vm_offset_t) mptr->VirtualStart;
727 if (vm_addr < VM_MIN_KERNEL_ADDRESS)
728 vm_addr |= VM_MIN_KERNEL_ADDRESS;
729 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
730
731 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
732
733 pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
734 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
735 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
736 }
737 }
738
739 if (args->Version != kBootArgsVersion2)
740 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
741
742 kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
743 if (args->efiMode == kBootArgsEfiMode64) {
744 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
745 } else {
746 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
747 }
748 }
749 while (FALSE);
750
751 kprintf("Done reinitializing EFI runtime services\n");
752
753 return;
754 }
755
756 /*
757 * Find devices. The system is alive.
758 */
759 void
760 machine_init(void)
761 {
762 /* Now with VM up, switch to dynamically allocated cpu data */
763 cpu_data_realloc();
764
765 /* Ensure panic buffer is initialized. */
766 debug_log_init();
767
768 /*
769 * Display CPU identification
770 */
771 cpuid_cpu_display("CPU identification");
772 cpuid_feature_display("CPU features");
773 cpuid_extfeature_display("CPU extended features");
774
775 /*
776 * Initialize EFI runtime services.
777 */
778 efi_init();
779
780 smp_init();
781
782 /*
783 * Set up to use floating point.
784 */
785 init_fpu();
786
787 /*
788 * Configure clock devices.
789 */
790 clock_config();
791
792 #if CONFIG_MTRR
793 /*
794 * Initialize MTRR from boot processor.
795 */
796 mtrr_init();
797
798 /*
799 * Set up PAT for boot processor.
800 */
801 pat_init();
802 #endif
803
804 /*
805 * Free lowmem pages and complete other setup
806 */
807 pmap_lowmem_finalize();
808 }
809
810 /*
811 * Halt a cpu.
812 */
813 void
814 halt_cpu(void)
815 {
816 halt_all_cpus(FALSE);
817 }
818
819 int reset_mem_on_reboot = 1;
820
821 /*
822 * Halt the system or reboot.
823 */
824 __attribute__((noreturn))
825 void
826 halt_all_cpus(boolean_t reboot)
827 {
828 if (reboot) {
829 printf("MACH Reboot\n");
830 PEHaltRestart( kPERestartCPU );
831 } else {
832 printf("CPU halted\n");
833 PEHaltRestart( kPEHaltCPU );
834 }
835 while(1);
836 }
837
838
839 /* Issue an I/O port read if one has been requested - this is an event logic
840 * analyzers can use as a trigger point.
841 */
842
843 void
844 panic_io_port_read(void) {
845 if (panic_io_port)
846 (void)inb(panic_io_port);
847 }
848
849 /* For use with the MP rendezvous mechanism
850 */
851
852 uint64_t panic_restart_timeout = ~(0ULL);
853
854 #define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
855
856 /*
857 * We should always return from this function with the other log offset
858 * set in the panic_info structure.
859 */
860 void
861 RecordPanicStackshot()
862 {
863 int err = 0, bytes_traced = 0, bytes_used = 0, bytes_remaining = 0;
864 char *stackshot_begin_loc = NULL;
865
866 /* Don't re-enter this code if we panic here */
867 if (begun_panic_stackshot) {
868 if (panic_info->mph_other_log_offset == 0) {
869 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
870 }
871 return;
872 }
873 begun_panic_stackshot = TRUE;
874
875 /* The panic log length should have been set before we came to capture a stackshot */
876 if (panic_info->mph_panic_log_len == 0) {
877 kdb_printf("Found zero length panic log, skipping capturing panic stackshot\n");
878 if (panic_info->mph_other_log_offset == 0) {
879 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
880 }
881 return;
882 }
883
884 /*
885 * Try to capture an in memory panic_stackshot (enabled during boot
886 * on systems with co-processors).
887 */
888 if (extended_debug_log_enabled) {
889 if (stackshot_active()) {
890 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
891 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
892 kdb_printf("Panicked during stackshot, skipping panic stackshot\n");
893 return;
894 } else {
895 stackshot_begin_loc = debug_buf_ptr;
896
897 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
898 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc,
899 KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining, KCFLAG_USE_MEMCOPY);
900 if (err != KERN_SUCCESS) {
901 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
902 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
903 kdb_printf("Failed to initialize kcdata buffer for in-memory panic stackshot, skipping ...\n");
904 return;
905 }
906
907 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, bytes_remaining,
908 (STACKSHOT_KCDATA_FORMAT | STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO |
909 STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_FROM_PANIC | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0);
910 err = do_stackshot(NULL);
911 bytes_traced = (int) kdp_stack_snapshot_bytes_traced();
912 if (bytes_traced > 0 && !err) {
913 debug_buf_ptr += bytes_traced;
914 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
915 panic_info->mph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
916 panic_info->mph_stackshot_len = bytes_traced;
917
918 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
919 kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced);
920 } else {
921 bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data);
922 if (bytes_used > 0) {
923 /* Zero out the stackshot data */
924 bzero(stackshot_begin_loc, bytes_used);
925 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
926
927 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
928 kdb_printf("\n** In Memory Panic Stackshot Incomplete ** Bytes Filled %d ** Err %d\n", bytes_used, err);
929 } else {
930 bzero(stackshot_begin_loc, bytes_used);
931 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
932
933 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
934 kdb_printf("\n** In Memory Panic Stackshot Failed ** Bytes Traced %d, err %d\n", bytes_traced, err);
935 }
936 }
937 }
938 #if DEVELOPMENT || DEBUG
939 if (panic_stackshot_buf != 0) {
940 // We're going to try to take another stackshot, reset the state.
941 panic_stackshot_reset_state();
942 }
943 #endif /* DEVELOPMENT || DEBUG */
944 } else {
945 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
946 }
947
948 #if DEVELOPMENT || DEBUG
949
950 if (panic_stackshot_buf == 0) {
951 kdb_printf("No stackshot buffer allocated for file backed panic stackshot, skipping...\n");
952 return;
953 }
954
955 if (stackshot_active()) {
956 kdb_printf("Panicked during stackshot, skipping file backed panic stackshot\n");
957 return;
958 }
959
960 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)panic_stackshot_buf, KCDATA_BUFFER_BEGIN_STACKSHOT,
961 PANIC_STACKSHOT_BUFSIZE, KCFLAG_USE_MEMCOPY);
962 if (err != KERN_SUCCESS) {
963 kdb_printf("Failed to initialize kcdata buffer for file backed panic stackshot, skipping ...\n");
964 return;
965 }
966
967 kdp_snapshot_preflight(-1, (void *) panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
968 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_NO_IO_STATS
969 | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0);
970 err = do_stackshot(NULL);
971 bytes_traced = (int) kdp_stack_snapshot_bytes_traced();
972 if (bytes_traced > 0 && !err) {
973 panic_stackshot_len = bytes_traced;
974 kdb_printf("File backed panic stackshot succeeded, length: %u bytes\n", bytes_traced);
975 } else {
976 bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data);
977 if (bytes_used > 0) {
978 kdb_printf("File backed panic stackshot incomplete, consumed %u bytes, error : %d \n", bytes_used, err);
979 } else {
980 kdb_printf("File backed panic stackshot incomplete, consumed %u bytes, error : %d \n", bytes_used, err);
981 }
982 }
983 #endif /* DEVELOPMENT || DEBUG */
984
985 return;
986 }
987
988 void
989 SavePanicInfo(
990 __unused const char *message, uint64_t panic_options)
991 {
992 void *stackptr;
993 int cn = cpu_number();
994
995 /*
996 * Issue an I/O port read if one has been requested - this is an event logic
997 * analyzers can use as a trigger point.
998 */
999 panic_io_port_read();
1000
1001 /* Obtain current frame pointer */
1002 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
1003
1004 /* Print backtrace - callee is internally synchronized */
1005 if (panic_options & DEBUGGER_OPTION_INITPROC_PANIC) {
1006 /* Special handling of launchd died panics */
1007 print_launchd_info();
1008 } else {
1009 panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
1010 }
1011
1012 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
1013 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
1014 }
1015
1016 if (PE_get_offset_into_panic_region(debug_buf_ptr) < panic_info->mph_panic_log_offset) {
1017 kdb_printf("Invalid panic log offset found (not properly initialized?): debug_buf_ptr : 0x%p, panic_info: 0x%p mph_panic_log_offset: 0x%x\n",
1018 debug_buf_ptr, panic_info, panic_info->mph_panic_log_offset);
1019 panic_info->mph_panic_log_len = 0;
1020 } else {
1021 panic_info->mph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->mph_panic_log_offset;
1022 }
1023
1024 /* Flush the panic log */
1025 paniclog_flush_internal(kPaniclogFlushBase);
1026
1027 /* Try to take a panic stackshot */
1028 RecordPanicStackshot();
1029
1030 /*
1031 * Flush the panic log again with the stackshot or any relevant logging
1032 * from when we tried to capture it.
1033 */
1034 if (extended_debug_log_enabled) {
1035 paniclog_flush_internal(kPaniclogFlushStackshot);
1036 }
1037 }
1038
1039 void paniclog_flush_internal(paniclog_flush_type_t variant)
1040 {
1041 /* Update the other log offset if we've opened the other log */
1042 if (panic_info->mph_other_log_offset != 0) {
1043 panic_info->mph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->mph_other_log_offset;
1044 }
1045
1046 /*
1047 * If we've detected that we're on a co-processor system, we flush the panic log via the kPEPanicSync
1048 * panic callbacks, otherwise we flush via nvram (unless that has been disabled).
1049 */
1050 if (coprocessor_paniclog_flush) {
1051 uint32_t overall_buffer_size = debug_buf_size;
1052 uint32_t size_to_flush = 0, offset_to_flush = 0;
1053 if (extended_debug_log_enabled) {
1054 /*
1055 * debug_buf_size for the extended log does not include the length of the header.
1056 * There may be some extra data at the end of the 'basic' log that wouldn't get flushed
1057 * for the non-extended case (this is a concession we make to not shrink the paniclog data
1058 * for non-coprocessor systems that only use the basic log).
1059 */
1060 overall_buffer_size = debug_buf_size + sizeof(struct macos_panic_header);
1061 }
1062
1063 /* Update the CRC */
1064 panic_info->mph_crc = crc32(0L, &panic_info->mph_version, (overall_buffer_size - offsetof(struct macos_panic_header, mph_version)));
1065
1066 if (variant == kPaniclogFlushBase) {
1067 /* Flush the header and base panic log. */
1068 kprintf("Flushing base panic log\n");
1069 size_to_flush = ROUNDUP((panic_info->mph_panic_log_offset + panic_info->mph_panic_log_len), PANIC_FLUSH_BOUNDARY);
1070 offset_to_flush = 0;
1071 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1072 } else if ((variant == kPaniclogFlushStackshot) || (variant == kPaniclogFlushOtherLog)) {
1073 if (variant == kPaniclogFlushStackshot) {
1074 /*
1075 * We flush the stackshot before flushing the updated header because the stackshot
1076 * can take a while to flush. We want the paniclog header to be as consistent as possible even
1077 * if the stackshot isn't flushed completely. Flush starting from the end of the panic log.
1078 */
1079 kprintf("Flushing panic log stackshot\n");
1080 offset_to_flush = ROUNDDOWN((panic_info->mph_panic_log_offset + panic_info->mph_panic_log_len), PANIC_FLUSH_BOUNDARY);
1081 size_to_flush = ROUNDUP((panic_info->mph_stackshot_len + (panic_info->mph_stackshot_offset - offset_to_flush)), PANIC_FLUSH_BOUNDARY);
1082 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1083 }
1084
1085 /* Flush the other log -- everything after the stackshot */
1086 kprintf("Flushing panic 'other' log\n");
1087 offset_to_flush = ROUNDDOWN((panic_info->mph_stackshot_offset + panic_info->mph_stackshot_len), PANIC_FLUSH_BOUNDARY);
1088 size_to_flush = ROUNDUP((panic_info->mph_other_log_len + (panic_info->mph_other_log_offset - offset_to_flush)), PANIC_FLUSH_BOUNDARY);
1089 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1090
1091 /* Flush the header -- everything before the paniclog */
1092 kprintf("Flushing panic log header\n");
1093 size_to_flush = ROUNDUP(panic_info->mph_panic_log_offset, PANIC_FLUSH_BOUNDARY);
1094 offset_to_flush = 0;
1095 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1096 }
1097 } else if (commit_paniclog_to_nvram) {
1098 assert(debug_buf_size != 0);
1099 unsigned int bufpos;
1100 unsigned long pi_size = 0;
1101 uintptr_t cr0;
1102
1103 debug_putc(0);
1104
1105 /*
1106 * Now call the compressor
1107 * XXX Consider using the WKdm compressor in the
1108 * future, rather than just packing - would need to
1109 * be co-ordinated with crashreporter, which decodes
1110 * this post-restart. The compressor should be
1111 * capable of in-place compression.
1112 *
1113 * Don't include the macOS panic header (for co-processor systems only)
1114 */
1115 bufpos = packA(debug_buf_base, (unsigned int) (debug_buf_ptr - debug_buf_base),
1116 debug_buf_size);
1117 /*
1118 * If compression was successful, use the compressed length
1119 */
1120 pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf_base);
1121
1122 /*
1123 * The following sequence is a workaround for:
1124 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
1125 * any routines that use floating point (MMX in this case) when saving panic
1126 * logs to nvram/flash.
1127 */
1128 cr0 = get_cr0();
1129 clear_ts();
1130
1131 /*
1132 * Save panic log to non-volatile store
1133 * Panic info handler must truncate data that is
1134 * too long for this platform.
1135 * This call must save data synchronously,
1136 * since we can subsequently halt the system.
1137 */
1138 kprintf("Attempting to commit panic log to NVRAM\n");
1139 pi_size = PESavePanicInfo((unsigned char *)debug_buf_base,
1140 (uint32_t)pi_size );
1141 set_cr0(cr0);
1142
1143 /*
1144 * Uncompress in-place, to permit examination of
1145 * the panic log by debuggers.
1146 */
1147 if (bufpos) {
1148 unpackA(debug_buf_base, bufpos);
1149 }
1150 }
1151 }
1152
1153 void
1154 paniclog_flush()
1155 {
1156 /* Called outside of this file to update logging appended to the "other" log */
1157 paniclog_flush_internal(kPaniclogFlushOtherLog);
1158 return;
1159 }
1160
1161 char *
1162 machine_boot_info(char *buf, __unused vm_size_t size)
1163 {
1164 *buf ='\0';
1165 return buf;
1166 }
1167
1168 /* Routines for address - symbol translation. Not called unless the "keepsyms"
1169 * boot-arg is supplied.
1170 */
1171
1172 static int
1173 panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
1174 {
1175 kernel_nlist_t *sym = NULL;
1176 struct load_command *cmd;
1177 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
1178 struct symtab_command *orig_st = NULL;
1179 unsigned int i;
1180 char *strings, *bestsym = NULL;
1181 vm_address_t bestaddr = 0, diff, curdiff;
1182
1183 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1184
1185 cmd = (struct load_command *) &mh[1];
1186 for (i = 0; i < mh->ncmds; i++) {
1187 if (cmd->cmd == LC_SEGMENT_KERNEL) {
1188 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
1189
1190 if (strncmp(SEG_TEXT, orig_sg->segname,
1191 sizeof(orig_sg->segname)) == 0)
1192 orig_ts = orig_sg;
1193 else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
1194 sizeof(orig_sg->segname)) == 0)
1195 orig_le = orig_sg;
1196 else if (strncmp("", orig_sg->segname,
1197 sizeof(orig_sg->segname)) == 0)
1198 orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
1199 }
1200 else if (cmd->cmd == LC_SYMTAB)
1201 orig_st = (struct symtab_command *) cmd;
1202
1203 cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
1204 }
1205
1206 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
1207 return 0;
1208
1209 if ((search < orig_ts->vmaddr) ||
1210 (search >= orig_ts->vmaddr + orig_ts->vmsize)) {
1211 /* search out of range for this mach header */
1212 return 0;
1213 }
1214
1215 sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
1216 strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
1217 diff = search;
1218
1219 for (i = 0; i < orig_st->nsyms; i++) {
1220 if (sym[i].n_type & N_STAB) continue;
1221
1222 if (sym[i].n_value <= search) {
1223 curdiff = search - (vm_address_t)sym[i].n_value;
1224 if (curdiff < diff) {
1225 diff = curdiff;
1226 bestaddr = sym[i].n_value;
1227 bestsym = strings + sym[i].n_un.n_strx;
1228 }
1229 }
1230 }
1231
1232 if (bestsym != NULL) {
1233 if (diff != 0) {
1234 paniclog_append_noflush("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
1235 } else {
1236 paniclog_append_noflush("%s : %s", module_name, bestsym);
1237 }
1238 return 1;
1239 }
1240 return 0;
1241 }
1242
1243 extern kmod_info_t * kmod; /* the list of modules */
1244
1245 static void
1246 panic_print_kmod_symbol_name(vm_address_t search)
1247 {
1248 u_int i;
1249
1250 if (gLoadedKextSummaries == NULL)
1251 return;
1252 for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
1253 OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
1254
1255 if ((search >= summary->address) &&
1256 (search < (summary->address + summary->size)))
1257 {
1258 kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
1259 if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
1260 paniclog_append_noflush("%s + %llu", summary->name, (unsigned long)search - summary->address);
1261 }
1262 break;
1263 }
1264 }
1265 }
1266
1267 void
1268 panic_print_symbol_name(vm_address_t search)
1269 {
1270 /* try searching in the kernel */
1271 if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
1272 /* that failed, now try to search for the right kext */
1273 panic_print_kmod_symbol_name(search);
1274 }
1275 }
1276
1277 /* Generate a backtrace, given a frame pointer - this routine
1278 * should walk the stack safely. The trace is appended to the panic log
1279 * and conditionally, to the console. If the trace contains kernel module
1280 * addresses, display the module name, load address and dependencies.
1281 */
1282
1283 #define DUMPFRAMES 32
1284 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1285 void
1286 panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
1287 {
1288 cframe_t *frame = (cframe_t *)_frame;
1289 vm_offset_t raddrs[DUMPFRAMES];
1290 vm_offset_t PC = 0;
1291 int frame_index;
1292 volatile uint32_t *ppbtcnt = &pbtcnt;
1293 uint64_t bt_tsc_timeout;
1294 boolean_t keepsyms = FALSE;
1295 int cn = cpu_number();
1296 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
1297
1298 if(pbtcpu != cn) {
1299 hw_atomic_add(&pbtcnt, 1);
1300 /* Spin on print backtrace lock, which serializes output
1301 * Continue anyway if a timeout occurs.
1302 */
1303 hw_lock_to(&pbtlock, ~0U);
1304 pbtcpu = cn;
1305 }
1306
1307 if (__improbable(doprnt_hide_pointers == TRUE)) {
1308 /* If we're called directly, the Debugger() function will not be called,
1309 * so we need to reset the value in here. */
1310 doprnt_hide_pointers = FALSE;
1311 }
1312
1313 panic_check_hook();
1314
1315 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
1316
1317 if (msg != NULL) {
1318 paniclog_append_noflush("%s", msg);
1319 }
1320
1321 if ((regdump == TRUE) && (regs != NULL)) {
1322 x86_saved_state64_t *ss64p = saved_state64(regs);
1323 paniclog_append_noflush(
1324 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1325 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1326 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1327 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1328 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1329 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
1330 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
1331 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
1332 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
1333 ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
1334 ss64p->isf.ss);
1335 PC = ss64p->isf.rip;
1336 }
1337
1338 paniclog_append_noflush("Backtrace (CPU %d), "
1339 #if PRINT_ARGS_FROM_STACK_FRAME
1340 "Frame : Return Address (4 potential args on stack)\n", cn);
1341 #else
1342 "Frame : Return Address\n", cn);
1343 #endif
1344
1345 for (frame_index = 0; frame_index < nframes; frame_index++) {
1346 vm_offset_t curframep = (vm_offset_t) frame;
1347
1348 if (!curframep)
1349 break;
1350
1351 if (curframep & 0x3) {
1352 paniclog_append_noflush("Unaligned frame\n");
1353 goto invalid;
1354 }
1355
1356 if (!kvtophys(curframep) ||
1357 !kvtophys(curframep + sizeof(cframe_t) - 1)) {
1358 paniclog_append_noflush("No mapping exists for frame pointer\n");
1359 goto invalid;
1360 }
1361
1362 paniclog_append_noflush("%p : 0x%lx ", frame, frame->caller);
1363 if (frame_index < DUMPFRAMES)
1364 raddrs[frame_index] = frame->caller;
1365
1366 #if PRINT_ARGS_FROM_STACK_FRAME
1367 if (kvtophys((vm_offset_t)&(frame->args[3])))
1368 paniclog_append_noflush("(0x%x 0x%x 0x%x 0x%x) ",
1369 frame->args[0], frame->args[1],
1370 frame->args[2], frame->args[3]);
1371 #endif
1372
1373 /* Display address-symbol translation only if the "keepsyms"
1374 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1375 * This routine is potentially unsafe; also, function
1376 * boundary identification is unreliable after a strip -x.
1377 */
1378 if (keepsyms)
1379 panic_print_symbol_name((vm_address_t)frame->caller);
1380
1381 paniclog_append_noflush("\n");
1382
1383 frame = frame->prev;
1384 }
1385
1386 if (frame_index >= nframes)
1387 paniclog_append_noflush("\tBacktrace continues...\n");
1388
1389 goto out;
1390
1391 invalid:
1392 paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n",frame);
1393 out:
1394
1395 /* Identify kernel modules in the backtrace and display their
1396 * load addresses and dependencies. This routine should walk
1397 * the kmod list safely.
1398 */
1399 if (frame_index)
1400 kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
1401
1402 if (PC != 0)
1403 kmod_panic_dump(&PC, 1);
1404
1405 panic_display_system_configuration(FALSE);
1406
1407 doprnt_hide_pointers = old_doprnt_hide_pointers;
1408
1409 /* Release print backtrace lock, to permit other callers in the
1410 * event of panics on multiple processors.
1411 */
1412 hw_lock_unlock(&pbtlock);
1413 hw_atomic_sub(&pbtcnt, 1);
1414 /* Wait for other processors to complete output
1415 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1416 */
1417 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1418 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1419 }
1420
1421 static boolean_t
1422 debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1423 {
1424 size_t rem = size;
1425 char *kvaddr = dest;
1426
1427 while (rem) {
1428 ppnum_t upn = pmap_find_phys(p, uaddr);
1429 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1430 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1431 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1432 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1433 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1434 cur_size = MIN(cur_size, rem);
1435
1436 if (upn && pmap_valid_page(upn) && phys_dest) {
1437 bcopy_phys(phys_src, phys_dest, cur_size);
1438 }
1439 else
1440 break;
1441 uaddr += cur_size;
1442 kvaddr += cur_size;
1443 rem -= cur_size;
1444 }
1445 return (rem == 0);
1446 }
1447
1448 void
1449 print_threads_registers(thread_t thread)
1450 {
1451 x86_saved_state_t *savestate;
1452
1453 savestate = get_user_regs(thread);
1454 paniclog_append_noflush(
1455 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1456 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1457 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1458 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1459 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1460 savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
1461 savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
1462 savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
1463 savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
1464 savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
1465 savestate->ss_64.isf.ss);
1466 }
1467
1468 void
1469 print_tasks_user_threads(task_t task)
1470 {
1471 thread_t thread = current_thread();
1472 x86_saved_state_t *savestate;
1473 pmap_t pmap = 0;
1474 uint64_t rbp;
1475 const char *cur_marker = 0;
1476 int j;
1477
1478 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1479 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1480
1481 paniclog_append_noflush("Thread %d: %p\n", j, thread);
1482 pmap = get_task_pmap(task);
1483 savestate = get_user_regs(thread);
1484 rbp = savestate->ss_64.rbp;
1485 paniclog_append_noflush("\t0x%016llx\n", savestate->ss_64.isf.rip);
1486 print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE);
1487 paniclog_append_noflush("\n");
1488 }
1489 }
1490
1491 void
1492 print_thread_num_that_crashed(task_t task)
1493 {
1494 thread_t c_thread = current_thread();
1495 thread_t thread;
1496 int j;
1497
1498 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1499 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
1500
1501 if (c_thread == thread) {
1502 paniclog_append_noflush("\nThread %d crashed\n", j);
1503 break;
1504 }
1505 }
1506 }
1507
1508 #define PANICLOG_UUID_BUF_SIZE 256
1509
1510 void print_uuid_info(task_t task)
1511 {
1512 uint32_t uuid_info_count = 0;
1513 mach_vm_address_t uuid_info_addr = 0;
1514 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1515 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1516 int task_pid = pid_from_task(task);
1517 char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
1518 char *uuidbufptr = uuidbuf;
1519 uint32_t k;
1520
1521 if (have_pmap && task->active && task_pid > 0) {
1522 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1523 struct user64_dyld_all_image_infos task_image_infos;
1524 if (debug_copyin(task->map->pmap, task->all_image_info_addr,
1525 &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1526 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1527 uuid_info_addr = task_image_infos.uuidArray;
1528 }
1529
1530 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1531 * in the middle of updating this data structure), we zero the
1532 * uuid_info_count so that we won't even try to save load info for this task
1533 */
1534 if (!uuid_info_addr) {
1535 uuid_info_count = 0;
1536 }
1537 }
1538
1539 if (task_pid > 0 && uuid_info_count > 0) {
1540 uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1541 uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
1542 uint32_t uuid_copy_size = 0;
1543 uint32_t uuid_image_count = 0;
1544 char *current_uuid_buffer = NULL;
1545 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
1546
1547 paniclog_append_noflush("\nuuid info:\n");
1548 while (uuid_array_size) {
1549 if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
1550 uuid_copy_size = uuid_array_size;
1551 uuid_image_count = uuid_array_size/uuid_info_size;
1552 } else {
1553 uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size;
1554 uuid_copy_size = uuid_image_count * uuid_info_size;
1555 }
1556 if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
1557 uuid_copy_size)) {
1558 paniclog_append_noflush("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
1559 uuid_image_count = 0;
1560 break;
1561 }
1562
1563 if (uuid_image_count > 0) {
1564 current_uuid_buffer = uuidbufptr;
1565 for (k = 0; k < uuid_image_count; k++) {
1566 paniclog_append_noflush(" %#llx", *(uint64_t *)current_uuid_buffer);
1567 current_uuid_buffer += sizeof(uint64_t);
1568 uint8_t *uuid = (uint8_t *)current_uuid_buffer;
1569 paniclog_append_noflush("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
1570 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
1571 uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
1572 current_uuid_buffer += 16;
1573 }
1574 bzero(&uuidbuf, sizeof(uuidbuf));
1575 }
1576 uuid_info_addr += uuid_copy_size;
1577 uuid_array_size -= uuid_copy_size;
1578 }
1579 }
1580 }
1581
1582 void print_launchd_info(void)
1583 {
1584 task_t task = current_task();
1585 thread_t thread = current_thread();
1586 volatile uint32_t *ppbtcnt = &pbtcnt;
1587 uint64_t bt_tsc_timeout;
1588 int cn = cpu_number();
1589
1590 if(pbtcpu != cn) {
1591 hw_atomic_add(&pbtcnt, 1);
1592 /* Spin on print backtrace lock, which serializes output
1593 * Continue anyway if a timeout occurs.
1594 */
1595 hw_lock_to(&pbtlock, ~0U);
1596 pbtcpu = cn;
1597 }
1598
1599 print_uuid_info(task);
1600 print_thread_num_that_crashed(task);
1601 print_threads_registers(thread);
1602 print_tasks_user_threads(task);
1603
1604 panic_display_system_configuration(TRUE);
1605
1606 /* Release print backtrace lock, to permit other callers in the
1607 * event of panics on multiple processors.
1608 */
1609 hw_lock_unlock(&pbtlock);
1610 hw_atomic_sub(&pbtcnt, 1);
1611 /* Wait for other processors to complete output
1612 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1613 */
1614 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
1615 while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
1616
1617 }