]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/AT386/model_dep.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / i386 / AT386 / model_dep.c
CommitLineData
1c79356b 1/*
ea3f0419 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
59
60/*
61 * File: model_dep.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
65 *
66 * Basic initialization for I386 - ISA bus machines.
67 */
68
1c79356b 69
5ba3f43e
A
70#define __APPLE_API_PRIVATE 1
71#define __APPLE_API_UNSTABLE 1
72#include <kern/debug.h>
73
1c79356b
A
74#include <mach/i386/vm_param.h>
75
76#include <string.h>
77#include <mach/vm_param.h>
78#include <mach/vm_prot.h>
79#include <mach/machine.h>
80#include <mach/time_value.h>
04b8595b 81#include <sys/kdebug.h>
1c79356b
A
82#include <kern/spl.h>
83#include <kern/assert.h>
0a7de745 84#include <kern/lock_group.h>
1c79356b
A
85#include <kern/misc_protos.h>
86#include <kern/startup.h>
87#include <kern/clock.h>
1c79356b 88#include <kern/cpu_data.h>
91447636 89#include <kern/machine.h>
b0d623f7
A
90#include <i386/postcode.h>
91#include <i386/mp_desc.h>
92#include <i386/misc_protos.h>
93#include <i386/thread.h>
94#include <i386/trap.h>
95#include <i386/machine_routines.h>
0a7de745 96#include <i386/mp.h> /* mp_rendezvous_break_lock */
b0d623f7 97#include <i386/cpuid.h>
1c79356b 98#include <i386/fpu.h>
6d2010ae
A
99#include <i386/machine_cpu.h>
100#include <i386/pmap.h>
101#if CONFIG_MTRR
91447636 102#include <i386/mtrr.h>
6d2010ae
A
103#endif
104#include <i386/ucode.h>
0c530ab8 105#include <i386/pmCPU.h>
fe8ab488
A
106#include <i386/panic_hooks.h>
107
0c530ab8 108#include <architecture/i386/pio.h> /* inb() */
55e303ae 109#include <pexpert/i386/boot.h>
1c79356b 110
04b8595b 111#include <kdp/kdp_dyld.h>
39037602 112#include <kdp/kdp_core.h>
0c530ab8
A
113#include <vm/pmap.h>
114#include <vm/vm_map.h>
115#include <vm/vm_kern.h>
116
cb323159 117#include <IOKit/IOBSD.h>
9bccf70c 118#include <IOKit/IOPlatformExpert.h>
0c530ab8
A
119#include <IOKit/IOHibernatePrivate.h>
120
121#include <pexpert/i386/efi.h>
122
123#include <kern/thread.h>
316670eb 124#include <kern/sched.h>
0c530ab8
A
125#include <mach-o/loader.h>
126#include <mach-o/nlist.h>
9bccf70c 127
b0d623f7 128#include <libkern/kernel_mach_header.h>
6d2010ae 129#include <libkern/OSKextLibPrivate.h>
d9a64523 130#include <libkern/crc.h>
3e170ce0 131
0a7de745 132#if DEBUG || DEVELOPMENT
ea3f0419 133#define DPRINTF(x ...) kprintf(x)
7ddcb079 134#else
ea3f0419 135#define DPRINTF(x ...)
7ddcb079 136#endif
91447636 137
5c9f4661
A
138#ifndef ROUNDUP
139#define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
140#endif
141
142#ifndef ROUNDDOWN
0a7de745 143#define ROUNDDOWN(x, y) (((x)/(y))*(y))
5c9f4661
A
144#endif
145
55e303ae 146static void machine_conf(void);
04b8595b 147void panic_print_symbol_name(vm_address_t search);
5ba3f43e 148void RecordPanicStackshot(void);
1c79356b 149
5c9f4661 150typedef enum paniclog_flush_type {
0a7de745
A
151 kPaniclogFlushBase = 1,/* Flush the initial log and paniclog header */
152 kPaniclogFlushStackshot = 2,/* Flush only the stackshot data, then flush the header */
153 kPaniclogFlushOtherLog = 3/* Flush the other log, then flush the header */
5c9f4661
A
154} paniclog_flush_type_t;
155
156void paniclog_flush_internal(paniclog_flush_type_t variant);
157
0a7de745
A
158extern const char version[];
159extern char osversion[];
160extern int max_unsafe_quanta;
161extern int max_poll_quanta;
162extern unsigned int panic_is_inited;
91447636 163
0a7de745 164extern int proc_pid(void *p);
04b8595b
A
165
166/* Definitions for frame pointers */
167#define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
168#define FP_LR_OFFSET ((uint32_t)4)
169#define FP_LR_OFFSET64 ((uint32_t)8)
170#define FP_MAX_NUM_TO_EVALUATE (50)
171
0c530ab8 172volatile int pbtcpu = -1;
0a7de745 173hw_lock_data_t pbtlock; /* backtrace print lock */
0c530ab8
A
174uint32_t pbtcnt = 0;
175
6d2010ae
A
176volatile int panic_double_fault_cpu = -1;
177
0a7de745 178#define PRINT_ARGS_FROM_STACK_FRAME 0
b0d623f7 179
0c530ab8 180typedef struct _cframe_t {
0a7de745
A
181 struct _cframe_t *prev;
182 uintptr_t caller;
b0d623f7 183#if PRINT_ARGS_FROM_STACK_FRAME
0a7de745 184 unsigned args[0];
b0d623f7 185#endif
0c530ab8
A
186} cframe_t;
187
2d21ac55 188static unsigned panic_io_port;
0a7de745 189static unsigned commit_paniclog_to_nvram;
5ba3f43e
A
190boolean_t coprocessor_paniclog_flush = FALSE;
191
5ba3f43e
A
192struct kcdata_descriptor kc_panic_data;
193static boolean_t begun_panic_stackshot = FALSE;
0a7de745 194extern kern_return_t do_stackshot(void *);
cc8bc92a 195
0a7de745
A
196extern void kdp_snapshot_preflight(int pid, void *tracebuf,
197 uint32_t tracebuf_size, uint32_t flags,
198 kcdata_descriptor_t data_p,
199 boolean_t enable_faulting);
200extern int kdp_stack_snapshot_bytes_traced(void);
0c530ab8 201
cc8bc92a 202vm_offset_t panic_stackshot_buf = 0;
cb323159 203size_t panic_stackshot_buf_len = 0;
c910b4d9 204
cb323159 205size_t panic_stackshot_len = 0;
04b8595b
A
206/*
207 * Backtrace a single frame.
208 */
209void
210print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
0a7de745 211 boolean_t is_64_bit)
04b8595b 212{
0a7de745
A
213 int i = 0;
214 addr64_t lr;
215 addr64_t fp;
216 addr64_t fp_for_ppn;
217 ppnum_t ppn;
218 boolean_t dump_kernel_stack;
04b8595b
A
219
220 fp = topfp;
221 fp_for_ppn = 0;
222 ppn = (ppnum_t)NULL;
223
0a7de745 224 if (fp >= VM_MIN_KERNEL_ADDRESS) {
04b8595b 225 dump_kernel_stack = TRUE;
0a7de745 226 } else {
04b8595b 227 dump_kernel_stack = FALSE;
0a7de745 228 }
04b8595b
A
229
230 do {
0a7de745 231 if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) {
04b8595b 232 break;
0a7de745
A
233 }
234 if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) {
04b8595b 235 break;
0a7de745
A
236 }
237 if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) {
04b8595b 238 break;
0a7de745
A
239 }
240
241 /* Check to see if current address will result in a different
242 * ppn than previously computed (to avoid recomputation) via
243 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
04b8595b
A
244
245 if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
246 ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
247 fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
248 }
249 if (ppn != (ppnum_t)NULL) {
250 if (is_64_bit) {
251 lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
252 } else {
253 lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
254 }
255 } else {
256 if (is_64_bit) {
5ba3f43e 257 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
04b8595b 258 } else {
5ba3f43e 259 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
04b8595b
A
260 }
261 break;
262 }
263 if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
264 ppn = pmap_find_phys(pmap, fp);
265 fp_for_ppn = fp;
266 }
267 if (ppn != (ppnum_t)NULL) {
268 if (is_64_bit) {
269 fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
270 } else {
271 fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
272 }
273 } else {
274 if (is_64_bit) {
5ba3f43e 275 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
04b8595b 276 } else {
5ba3f43e 277 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
04b8595b
A
278 }
279 break;
280 }
281
5ba3f43e
A
282 if (is_64_bit) {
283 paniclog_append_noflush("%s\t0x%016llx\n", cur_marker, lr);
284 } else {
285 paniclog_append_noflush("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
04b8595b
A
286 }
287 } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
288}
1c79356b 289void
2d21ac55 290machine_startup(void)
1c79356b 291{
0a7de745 292 int boot_arg;
1c79356b 293
55e303ae 294#if 0
0a7de745
A
295 if (PE_get_hotkey( kPEControlKey )) {
296 halt_in_debugger = halt_in_debugger ? 0 : 1;
297 }
1c79356b
A
298#endif
299
0a7de745 300 if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof(commit_paniclog_to_nvram))) {
2d21ac55 301 commit_paniclog_to_nvram = 1;
0a7de745 302 }
2d21ac55
A
303
304 /*
305 * Entering the debugger will put the CPUs into a "safe"
306 * power mode.
307 */
0a7de745
A
308 if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof(boot_arg))) {
309 pmsafe_debug = boot_arg;
310 }
2d21ac55 311
0a7de745 312 hw_lock_init(&pbtlock); /* initialize print backtrace lock */
1c79356b 313
0a7de745 314 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) {
55e303ae 315 default_preemption_rate = boot_arg;
1c79356b 316 }
0a7de745 317 if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof(boot_arg))) {
55e303ae
A
318 max_unsafe_quanta = boot_arg;
319 }
0a7de745 320 if (PE_parse_boot_argn("poll", &boot_arg, sizeof(boot_arg))) {
55e303ae
A
321 max_poll_quanta = boot_arg;
322 }
0a7de745 323 if (PE_parse_boot_argn("yield", &boot_arg, sizeof(boot_arg))) {
55e303ae
A
324 sched_poll_yield_shift = boot_arg;
325 }
0c530ab8
A
326/* The I/O port to issue a read from, in the event of a panic. Useful for
327 * triggering logic analyzers.
328 */
0a7de745 329 if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof(boot_arg))) {
0c530ab8
A
330 /*I/O ports range from 0 through 0xFFFF */
331 panic_io_port = boot_arg & 0xffff;
332 }
333
55e303ae
A
334 machine_conf();
335
fe8ab488
A
336 panic_hooks_init();
337
1c79356b
A
338 /*
339 * Start the system.
340 */
91447636
A
341 kernel_bootstrap();
342 /*NOTREACHED*/
1c79356b
A
343}
344
55e303ae
A
345
346static void
347machine_conf(void)
1c79356b 348{
b0d623f7 349 machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
1c79356b
A
350}
351
0c530ab8
A
352extern void *gPEEFIRuntimeServices;
353extern void *gPEEFISystemTable;
354
0c530ab8
A
355static void
356efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table)
357{
0a7de745
A
358 EFI_RUNTIME_SERVICES_64 *runtime;
359 uint32_t hdr_cksum;
360 uint32_t cksum;
361
362 DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
363 do {
364 DPRINTF("Header:\n");
365 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
366 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
367 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
368 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
369 DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
370 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
371 kprintf("Bad EFI system table signature\n");
372 break;
373 }
374 // Verify signature of the system table
375 hdr_cksum = system_table->Hdr.CRC32;
376 system_table->Hdr.CRC32 = 0;
377 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
378
379 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
380 system_table->Hdr.CRC32 = hdr_cksum;
381 if (cksum != hdr_cksum) {
382 kprintf("Bad EFI system table checksum\n");
383 break;
384 }
385
386 gPEEFISystemTable = system_table;
387
388 if (system_table->RuntimeServices == 0) {
389 kprintf("No runtime table present\n");
390 break;
391 }
392 DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
393 // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
394 runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
395 DPRINTF("Checking runtime services table %p\n", runtime);
396 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
397 kprintf("Bad EFI runtime table signature\n");
398 break;
399 }
400
401 // Verify signature of runtime services table
402 hdr_cksum = runtime->Hdr.CRC32;
403 runtime->Hdr.CRC32 = 0;
404 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
405
406 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
407 runtime->Hdr.CRC32 = hdr_cksum;
408 if (cksum != hdr_cksum) {
409 kprintf("Bad EFI runtime table checksum\n");
410 break;
411 }
0c530ab8 412
0a7de745 413 gPEEFIRuntimeServices = runtime;
ea3f0419 414 } while (FALSE);
0c530ab8
A
415}
416
417static void
b0d623f7 418efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
0c530ab8 419{
0a7de745
A
420 EFI_RUNTIME_SERVICES_32 *runtime;
421 uint32_t hdr_cksum;
422 uint32_t cksum;
423
424 DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
425 do {
426 DPRINTF("Header:\n");
427 DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
428 DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
429 DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
430 DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
431 DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
432 if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
433 kprintf("Bad EFI system table signature\n");
434 break;
435 }
436 // Verify signature of the system table
437 hdr_cksum = system_table->Hdr.CRC32;
438 system_table->Hdr.CRC32 = 0;
439 DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
440 cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
441
442 DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
443 system_table->Hdr.CRC32 = hdr_cksum;
444 if (cksum != hdr_cksum) {
445 kprintf("Bad EFI system table checksum\n");
446 break;
447 }
448
449 gPEEFISystemTable = system_table;
450
451 if (system_table->RuntimeServices == 0) {
452 kprintf("No runtime table present\n");
453 break;
454 }
455 DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
456 // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
457 // For a 64-bit kernel, booter provides a virtual address mod 4G
458 runtime = (EFI_RUNTIME_SERVICES_32 *)
459 (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
460 DPRINTF("Runtime table addressed at %p\n", runtime);
461 if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
462 kprintf("Bad EFI runtime table signature\n");
463 break;
464 }
465
466 // Verify signature of runtime services table
467 hdr_cksum = runtime->Hdr.CRC32;
468 runtime->Hdr.CRC32 = 0;
469 cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
0c530ab8 470
0a7de745
A
471 DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
472 runtime->Hdr.CRC32 = hdr_cksum;
473 if (cksum != hdr_cksum) {
474 kprintf("Bad EFI runtime table checksum\n");
475 break;
476 }
477
478 DPRINTF("Runtime functions\n");
479 DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
480 DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
481 DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
482 DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
483 DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
484 DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
485 DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
486 DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
487 DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
488 DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
489 DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
490
491 gPEEFIRuntimeServices = runtime;
ea3f0419 492 } while (FALSE);
0c530ab8
A
493}
494
495
496/* Map in EFI runtime areas. */
497static void
498efi_init(void)
499{
0a7de745
A
500 boot_args *args = (boot_args *)PE_state.bootArgs;
501
502 kprintf("Initializing EFI runtime services\n");
503
ea3f0419 504 do {
0a7de745
A
505 vm_offset_t vm_size, vm_addr;
506 vm_map_offset_t phys_addr;
507 EfiMemoryRange *mptr;
508 unsigned int msize, mcount;
509 unsigned int i;
510
511 msize = args->MemoryMapDescriptorSize;
512 mcount = args->MemoryMapSize / msize;
513
514 DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
515 args->kaddr, args->ksize);
516 DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
517 args->efiSystemTable,
518 (void *) ml_static_ptovirt(args->efiSystemTable));
519 DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
520 args->efiRuntimeServicesPageStart);
521 DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
522 args->efiRuntimeServicesPageCount);
523 DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
524 args->efiRuntimeServicesVirtualPageStart);
525 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
526 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
527 if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME)) {
528 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
529 vm_addr = (vm_offset_t) mptr->VirtualStart;
530 /* For K64 on EFI32, shadow-map into high KVA */
531 if (vm_addr < VM_MIN_KERNEL_ADDRESS) {
532 vm_addr |= VM_MIN_KERNEL_ADDRESS;
533 }
534 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
535 DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
536 mptr->Type,
537 (void *) (uintptr_t) phys_addr,
538 (void *) (uintptr_t) mptr->VirtualStart,
539 (void *) vm_addr,
540 (void *) vm_size);
541 pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size),
542 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ | VM_PROT_WRITE,
543 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
544 }
545 }
0c530ab8 546
0a7de745
A
547 if (args->Version != kBootArgsVersion2) {
548 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
549 }
0c530ab8 550
0a7de745
A
551 DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
552 if (args->efiMode == kBootArgsEfiMode64) {
553 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
554 } else {
555 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
556 }
ea3f0419 557 } while (FALSE);
0c530ab8 558
0a7de745 559 return;
0c530ab8
A
560}
561
0a7de745 562/* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */
5ba3f43e 563boolean_t
4ba76501 564bootloader_valid_page(ppnum_t ppn)
5ba3f43e 565{
0a7de745
A
566 boot_args *args = (boot_args *)PE_state.bootArgs;
567 ppnum_t pstart = args->efiRuntimeServicesPageStart;
568 ppnum_t pend = pstart + args->efiRuntimeServicesPageCount;
5ba3f43e 569
0a7de745 570 return pstart <= ppn && ppn < pend;
5ba3f43e
A
571}
572
0c530ab8
A
573/* Remap EFI runtime areas. */
574void
575hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset)
576{
0a7de745
A
577 boot_args *args = (boot_args *)PE_state.bootArgs;
578
579 kprintf("Reinitializing EFI runtime services\n");
580
ea3f0419 581 do {
0a7de745
A
582 vm_offset_t vm_size, vm_addr;
583 vm_map_offset_t phys_addr;
584 EfiMemoryRange *mptr;
585 unsigned int msize, mcount;
586 unsigned int i;
587
588 gPEEFISystemTable = 0;
589 gPEEFIRuntimeServices = 0;
590
591 system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
592
593 kprintf("Old system table 0x%x, new 0x%x\n",
594 (uint32_t)args->efiSystemTable, system_table_offset);
595
596 args->efiSystemTable = system_table_offset;
597
598 kprintf("Old map:\n");
599 msize = args->MemoryMapDescriptorSize;
600 mcount = args->MemoryMapSize / msize;
601 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
602 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
603 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
604 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
605 vm_addr = (vm_offset_t) mptr->VirtualStart;
606 /* K64 on EFI32 */
607 if (vm_addr < VM_MIN_KERNEL_ADDRESS) {
608 vm_addr |= VM_MIN_KERNEL_ADDRESS;
609 }
610 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
0c530ab8 611
0a7de745
A
612 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
613 }
614 }
0c530ab8 615
0a7de745
A
616 pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart),
617 i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount));
618
619 kprintf("New map:\n");
620 msize = args->MemoryMapDescriptorSize;
621 mcount = (unsigned int)(map_size / msize);
622 mptr = map;
623 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
624 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
625 vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
626 vm_addr = (vm_offset_t) mptr->VirtualStart;
627 if (vm_addr < VM_MIN_KERNEL_ADDRESS) {
628 vm_addr |= VM_MIN_KERNEL_ADDRESS;
629 }
630 phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
0c530ab8 631
0a7de745 632 kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
0c530ab8 633
0a7de745
A
634 pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
635 (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ | VM_PROT_WRITE,
636 (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
637 }
638 }
0c530ab8 639
0a7de745
A
640 if (args->Version != kBootArgsVersion2) {
641 panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
642 }
0c530ab8 643
0a7de745
A
644 kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
645 if (args->efiMode == kBootArgsEfiMode64) {
646 efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
647 } else {
648 efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
649 }
ea3f0419 650 } while (FALSE);
0c530ab8 651
0a7de745 652 kprintf("Done reinitializing EFI runtime services\n");
0c530ab8 653
0a7de745 654 return;
0c530ab8
A
655}
656
1c79356b
A
657/*
658 * Find devices. The system is alive.
659 */
660void
661machine_init(void)
662{
316670eb
A
663 /* Now with VM up, switch to dynamically allocated cpu data */
664 cpu_data_realloc();
316670eb 665
0a7de745
A
666 /* Ensure panic buffer is initialized. */
667 debug_log_init();
0c530ab8 668
1c79356b
A
669 /*
670 * Display CPU identification
671 */
0c530ab8
A
672 cpuid_cpu_display("CPU identification");
673 cpuid_feature_display("CPU features");
674 cpuid_extfeature_display("CPU extended features");
1c79356b 675
0a7de745
A
676 /*
677 * Initialize EFI runtime services.
678 */
679 efi_init();
55e303ae 680
55e303ae 681 smp_init();
1c79356b
A
682
683 /*
684 * Set up to use floating point.
685 */
686 init_fpu();
687
1c79356b
A
688 /*
689 * Configure clock devices.
690 */
691 clock_config();
91447636 692
6d2010ae 693#if CONFIG_MTRR
91447636
A
694 /*
695 * Initialize MTRR from boot processor.
696 */
697 mtrr_init();
698
699 /*
700 * Set up PAT for boot processor.
701 */
702 pat_init();
6d2010ae 703#endif
91447636
A
704
705 /*
b0d623f7 706 * Free lowmem pages and complete other setup
91447636 707 */
b0d623f7 708 pmap_lowmem_finalize();
1c79356b
A
709}
710
711/*
712 * Halt a cpu.
713 */
714void
715halt_cpu(void)
716{
717 halt_all_cpus(FALSE);
718}
719
720int reset_mem_on_reboot = 1;
721
722/*
723 * Halt the system or reboot.
724 */
39037602 725__attribute__((noreturn))
1c79356b 726void
9bccf70c 727halt_all_cpus(boolean_t reboot)
1c79356b 728{
55e303ae 729 if (reboot) {
55e303ae
A
730 printf("MACH Reboot\n");
731 PEHaltRestart( kPERestartCPU );
732 } else {
733 printf("CPU halted\n");
734 PEHaltRestart( kPEHaltCPU );
1c79356b 735 }
0a7de745
A
736 while (1) {
737 ;
738 }
1c79356b
A
739}
740
0a7de745 741
0c530ab8
A
742/* Issue an I/O port read if one has been requested - this is an event logic
743 * analyzers can use as a trigger point.
744 */
745
746void
0a7de745
A
747panic_io_port_read(void)
748{
749 if (panic_io_port) {
0c530ab8 750 (void)inb(panic_io_port);
0a7de745 751 }
0c530ab8
A
752}
753
754/* For use with the MP rendezvous mechanism
755 */
756
6d2010ae
A
757uint64_t panic_restart_timeout = ~(0ULL);
758
13f56ec4
A
759#define PANIC_RESTART_TIMEOUT (3ULL * NSEC_PER_SEC)
760
cc8bc92a
A
761/*
762 * We should always return from this function with the other log offset
763 * set in the panic_info structure.
764 */
5ba3f43e
A
765void
766RecordPanicStackshot()
767{
cb323159
A
768 int err = 0;
769 size_t bytes_traced = 0, bytes_used = 0, bytes_remaining = 0;
cc8bc92a
A
770 char *stackshot_begin_loc = NULL;
771
772 /* Don't re-enter this code if we panic here */
5ba3f43e 773 if (begun_panic_stackshot) {
cc8bc92a
A
774 if (panic_info->mph_other_log_offset == 0) {
775 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
776 }
13f56ec4 777 return;
6d2010ae 778 }
5ba3f43e 779 begun_panic_stackshot = TRUE;
13f56ec4 780
cc8bc92a
A
781 /* The panic log length should have been set before we came to capture a stackshot */
782 if (panic_info->mph_panic_log_len == 0) {
783 kdb_printf("Found zero length panic log, skipping capturing panic stackshot\n");
784 if (panic_info->mph_other_log_offset == 0) {
785 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
786 }
787 return;
788 }
789
cb323159
A
790 if (stackshot_active()) {
791 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED;
cc8bc92a 792 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
cb323159 793 kdb_printf("Panicked during stackshot, skipping panic stackshot\n");
cc8bc92a
A
794 return;
795 }
796
cb323159
A
797 /* Try to capture an in memory panic_stackshot */
798 if (extended_debug_log_enabled) {
799 /* On coprocessor systems we write this into the extended debug log */
800 stackshot_begin_loc = debug_buf_ptr;
801 bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base);
802 } else if (panic_stackshot_buf != 0) {
803 /* On other systems we use the panic stackshot_buf */
804 stackshot_begin_loc = (char *) panic_stackshot_buf;
805 bytes_remaining = panic_stackshot_buf_len;
806 } else {
807 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
5ba3f43e
A
808 return;
809 }
13f56ec4 810
cb323159
A
811
812 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc,
813 KCDATA_BUFFER_BEGIN_STACKSHOT, (unsigned int) bytes_remaining, KCFLAG_USE_MEMCOPY);
5ba3f43e 814 if (err != KERN_SUCCESS) {
cb323159
A
815 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
816 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
817 kdb_printf("Failed to initialize kcdata buffer for in-memory panic stackshot, skipping ...\n");
5ba3f43e
A
818 return;
819 }
1c79356b 820
cb323159
A
821 uint32_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT |
822 STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC |
823 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO);
824#if DEVELOPMENT
825 /*
826 * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate
827 * panic stackshots from corefiles.
828 */
829 stackshot_flags |= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT;
830#endif
831
832 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0);
5ba3f43e
A
833 err = do_stackshot(NULL);
834 bytes_traced = (int) kdp_stack_snapshot_bytes_traced();
cb323159
A
835 bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data);
836
837 if ((err != KERN_SUCCESS) && (bytes_used > 0)) {
838 /*
839 * We ran out of space while trying to capture a stackshot, try again without user frames.
840 * It's not safe to log from here (in case we're writing in the middle of the debug buffer on coprocessor systems)
841 * but append a flag to the panic flags.
842 */
843 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_KERNEL_ONLY;
844 panic_stackshot_reset_state();
845
846 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
847 memset(stackshot_begin_loc, '\0', bytes_used);
848
849 err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc,
850 KCDATA_BUFFER_BEGIN_STACKSHOT, (unsigned int) bytes_remaining, KCFLAG_USE_MEMCOPY);
851 if (err != KERN_SUCCESS) {
852 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
853 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
854 kdb_printf("Failed to re-initialize kcdata buffer for kernel only in-memory panic stackshot, skipping ...\n");
855 return;
856 }
857
858 stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_KCDATA_FORMAT | STACKSHOT_FROM_PANIC |
859 STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY);
860#if DEVELOPMENT
861 /*
862 * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate
863 * panic stackshots from corefiles.
864 */
865 stackshot_flags |= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT;
866#endif
867
868 kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0);
869 err = do_stackshot(NULL);
870 bytes_traced = (int) kdp_stack_snapshot_bytes_traced();
871 bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data);
872 }
873
874 if (err == KERN_SUCCESS) {
875 if (extended_debug_log_enabled) {
876 debug_buf_ptr += bytes_traced;
877 }
878 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED;
879 panic_info->mph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc);
880 panic_info->mph_stackshot_len = (uint32_t) bytes_traced;
881
882 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
883 kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu **\n", bytes_traced);
884
885 /* Used by the code that writes the buffer to disk */
886 panic_stackshot_buf = (vm_offset_t) stackshot_begin_loc;
5ba3f43e 887 panic_stackshot_len = bytes_traced;
cb323159
A
888
889 if (!extended_debug_log_enabled && (gIOPolledCoreFileMode == kIOPolledCoreFileModeStackshot)) {
890 /* System configured to write panic stackshot to disk */
891 kern_dump(KERN_DUMP_STACKSHOT_DISK);
892 }
5ba3f43e 893 } else {
5ba3f43e 894 if (bytes_used > 0) {
cb323159
A
895 /* Erase the stackshot data (this region is pre-populated with the NULL character) */
896 memset(stackshot_begin_loc, '\0', bytes_used);
897 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE;
898
899 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
900 kdb_printf("\n** In Memory Panic Stackshot Incomplete ** Bytes Filled %zu ** Err %d\n", bytes_used, err);
5ba3f43e 901 } else {
cb323159
A
902 bzero(stackshot_begin_loc, bytes_used);
903 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR;
904
905 panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr);
906 kdb_printf("\n** In Memory Panic Stackshot Failed ** Bytes Traced %zu, err %d\n", bytes_traced, err);
5ba3f43e 907 }
39037602 908 }
cc8bc92a 909
5ba3f43e 910 return;
316670eb
A
911}
912
1c79356b 913void
5ba3f43e 914SavePanicInfo(
d9a64523 915 __unused const char *message, void *panic_data, uint64_t panic_options)
1c79356b 916{
d9a64523
A
917 void *stackptr = NULL;
918 thread_t thread_to_trace = (thread_t) panic_data;
919 cframe_t synthetic_stack_frame = { };
920 char *debugger_msg = NULL;
6d2010ae 921 int cn = cpu_number();
39037602 922
0c530ab8 923 /*
5ba3f43e
A
924 * Issue an I/O port read if one has been requested - this is an event logic
925 * analyzers can use as a trigger point.
0c530ab8 926 */
5ba3f43e 927 panic_io_port_read();
0c530ab8 928
d9a64523
A
929 /* Obtain frame pointer for stack to trace */
930 if (panic_options & DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE) {
931 if (!mp_kdp_all_cpus_halted()) {
932 debugger_msg = "Backtracing panicked thread because failed to halt all CPUs\n";
933 } else if (thread_to_trace == THREAD_NULL) {
934 debugger_msg = "Backtracing panicked thread because no thread pointer provided\n";
935 } else if (kvtophys((vm_offset_t)thread_to_trace) == 0ULL) {
936 debugger_msg = "Backtracing panicked thread because unable to access specified thread\n";
937 } else if (thread_to_trace->kernel_stack == 0) {
938 debugger_msg = "Backtracing panicked thread because kernel_stack is NULL for specified thread\n";
939 } else if (kvtophys(STACK_IKS(thread_to_trace->kernel_stack) == 0ULL)) {
940 debugger_msg = "Backtracing panicked thread because unable to access kernel_stack for specified thread\n";
941 } else {
942 debugger_msg = "Backtracing specified thread\n";
943 /* We construct a synthetic stack frame so we can include the current instruction pointer */
944 synthetic_stack_frame.prev = (cframe_t *)STACK_IKS(thread_to_trace->kernel_stack)->k_rbp;
945 synthetic_stack_frame.caller = (uintptr_t) STACK_IKS(thread_to_trace->kernel_stack)->k_rip;
946 stackptr = (void *) &synthetic_stack_frame;
947 }
948 }
949
950 if (stackptr == NULL) {
0a7de745 951 __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr));
d9a64523 952 }
0c530ab8 953
cc8bc92a 954 /* Print backtrace - callee is internally synchronized */
5ba3f43e
A
955 if (panic_options & DEBUGGER_OPTION_INITPROC_PANIC) {
956 /* Special handling of launchd died panics */
957 print_launchd_info();
958 } else {
ea3f0419 959 panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80 : 48), debugger_msg, FALSE, NULL);
5ba3f43e 960 }
0c530ab8 961
5ba3f43e
A
962 if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) {
963 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC;
964 }
0c530ab8 965
cc8bc92a
A
966 if (PE_get_offset_into_panic_region(debug_buf_ptr) < panic_info->mph_panic_log_offset) {
967 kdb_printf("Invalid panic log offset found (not properly initialized?): debug_buf_ptr : 0x%p, panic_info: 0x%p mph_panic_log_offset: 0x%x\n",
0a7de745 968 debug_buf_ptr, panic_info, panic_info->mph_panic_log_offset);
cc8bc92a
A
969 panic_info->mph_panic_log_len = 0;
970 } else {
971 panic_info->mph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->mph_panic_log_offset;
972 }
973
974 /* Flush the panic log */
5c9f4661 975 paniclog_flush_internal(kPaniclogFlushBase);
0c530ab8 976
5ba3f43e
A
977 /* Try to take a panic stackshot */
978 RecordPanicStackshot();
cc8bc92a
A
979
980 /*
981 * Flush the panic log again with the stackshot or any relevant logging
982 * from when we tried to capture it.
983 */
cb323159 984 paniclog_flush_internal(kPaniclogFlushStackshot);
5ba3f43e 985}
6d2010ae 986
0a7de745
A
987void
988paniclog_flush_internal(paniclog_flush_type_t variant)
5ba3f43e 989{
cc8bc92a
A
990 /* Update the other log offset if we've opened the other log */
991 if (panic_info->mph_other_log_offset != 0) {
992 panic_info->mph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->mph_other_log_offset;
993 }
2d21ac55 994
5ba3f43e 995 /*
cc8bc92a 996 * If we've detected that we're on a co-processor system, we flush the panic log via the kPEPanicSync
5ba3f43e
A
997 * panic callbacks, otherwise we flush via nvram (unless that has been disabled).
998 */
999 if (coprocessor_paniclog_flush) {
5c9f4661
A
1000 uint32_t overall_buffer_size = debug_buf_size;
1001 uint32_t size_to_flush = 0, offset_to_flush = 0;
cc8bc92a
A
1002 if (extended_debug_log_enabled) {
1003 /*
1004 * debug_buf_size for the extended log does not include the length of the header.
1005 * There may be some extra data at the end of the 'basic' log that wouldn't get flushed
1006 * for the non-extended case (this is a concession we make to not shrink the paniclog data
1007 * for non-coprocessor systems that only use the basic log).
1008 */
5c9f4661 1009 overall_buffer_size = debug_buf_size + sizeof(struct macos_panic_header);
cc8bc92a
A
1010 }
1011
5c9f4661
A
1012 /* Update the CRC */
1013 panic_info->mph_crc = crc32(0L, &panic_info->mph_version, (overall_buffer_size - offsetof(struct macos_panic_header, mph_version)));
1014
1015 if (variant == kPaniclogFlushBase) {
1016 /* Flush the header and base panic log. */
1017 kprintf("Flushing base panic log\n");
1018 size_to_flush = ROUNDUP((panic_info->mph_panic_log_offset + panic_info->mph_panic_log_len), PANIC_FLUSH_BOUNDARY);
1019 offset_to_flush = 0;
1020 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1021 } else if ((variant == kPaniclogFlushStackshot) || (variant == kPaniclogFlushOtherLog)) {
1022 if (variant == kPaniclogFlushStackshot) {
1023 /*
1024 * We flush the stackshot before flushing the updated header because the stackshot
1025 * can take a while to flush. We want the paniclog header to be as consistent as possible even
1026 * if the stackshot isn't flushed completely. Flush starting from the end of the panic log.
1027 */
1028 kprintf("Flushing panic log stackshot\n");
1029 offset_to_flush = ROUNDDOWN((panic_info->mph_panic_log_offset + panic_info->mph_panic_log_len), PANIC_FLUSH_BOUNDARY);
1030 size_to_flush = ROUNDUP((panic_info->mph_stackshot_len + (panic_info->mph_stackshot_offset - offset_to_flush)), PANIC_FLUSH_BOUNDARY);
1031 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1032 }
5ba3f43e 1033
5c9f4661
A
1034 /* Flush the other log -- everything after the stackshot */
1035 kprintf("Flushing panic 'other' log\n");
1036 offset_to_flush = ROUNDDOWN((panic_info->mph_stackshot_offset + panic_info->mph_stackshot_len), PANIC_FLUSH_BOUNDARY);
1037 size_to_flush = ROUNDUP((panic_info->mph_other_log_len + (panic_info->mph_other_log_offset - offset_to_flush)), PANIC_FLUSH_BOUNDARY);
1038 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1039
1040 /* Flush the header -- everything before the paniclog */
1041 kprintf("Flushing panic log header\n");
1042 size_to_flush = ROUNDUP(panic_info->mph_panic_log_offset, PANIC_FLUSH_BOUNDARY);
1043 offset_to_flush = 0;
1044 PESavePanicInfoAction(panic_info, offset_to_flush, size_to_flush);
1045 }
cc8bc92a 1046 } else if (commit_paniclog_to_nvram) {
5ba3f43e
A
1047 assert(debug_buf_size != 0);
1048 unsigned int bufpos;
5c9f4661 1049 unsigned long pi_size = 0;
5ba3f43e
A
1050 uintptr_t cr0;
1051
1052 debug_putc(0);
1053
5ba3f43e
A
1054 /*
1055 * Now call the compressor
1056 * XXX Consider using the WKdm compressor in the
1057 * future, rather than just packing - would need to
1058 * be co-ordinated with crashreporter, which decodes
1059 * this post-restart. The compressor should be
1060 * capable of in-place compression.
1061 *
1062 * Don't include the macOS panic header (for co-processor systems only)
1063 */
1064 bufpos = packA(debug_buf_base, (unsigned int) (debug_buf_ptr - debug_buf_base),
0a7de745 1065 debug_buf_size);
5ba3f43e
A
1066 /*
1067 * If compression was successful, use the compressed length
1068 */
1069 pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf_base);
b7266188 1070
5ba3f43e
A
1071 /*
1072 * The following sequence is a workaround for:
1073 * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
1074 * any routines that use floating point (MMX in this case) when saving panic
1075 * logs to nvram/flash.
1076 */
1077 cr0 = get_cr0();
1078 clear_ts();
1079
1080 /*
1081 * Save panic log to non-volatile store
1082 * Panic info handler must truncate data that is
1083 * too long for this platform.
1084 * This call must save data synchronously,
1085 * since we can subsequently halt the system.
1086 */
1087 kprintf("Attempting to commit panic log to NVRAM\n");
1088 pi_size = PESavePanicInfo((unsigned char *)debug_buf_base,
0a7de745 1089 (uint32_t)pi_size );
5ba3f43e
A
1090 set_cr0(cr0);
1091
1092 /*
1093 * Uncompress in-place, to permit examination of
1094 * the panic log by debuggers.
1095 */
1096 if (bufpos) {
1097 unpackA(debug_buf_base, bufpos);
0c530ab8 1098 }
5ba3f43e 1099 }
1c79356b
A
1100}
1101
5c9f4661
A
1102void
1103paniclog_flush()
1104{
1105 /* Called outside of this file to update logging appended to the "other" log */
1106 paniclog_flush_internal(kPaniclogFlushOtherLog);
1107 return;
1108}
1109
1c79356b 1110char *
91447636 1111machine_boot_info(char *buf, __unused vm_size_t size)
1c79356b 1112{
0a7de745 1113 *buf = '\0';
1c79356b
A
1114 return buf;
1115}
1116
0c530ab8
A
1117/* Routines for address - symbol translation. Not called unless the "keepsyms"
1118 * boot-arg is supplied.
1119 */
1120
1121static int
6d2010ae 1122panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
0c530ab8 1123{
0a7de745
A
1124 kernel_nlist_t *sym = NULL;
1125 struct load_command *cmd;
1126 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
1127 struct symtab_command *orig_st = NULL;
1128 unsigned int i;
1129 char *strings, *bestsym = NULL;
1130 vm_address_t bestaddr = 0, diff, curdiff;
1131
1132 /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
1133
1134 cmd = (struct load_command *) &mh[1];
1135 for (i = 0; i < mh->ncmds; i++) {
1136 if (cmd->cmd == LC_SEGMENT_KERNEL) {
1137 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
1138
1139 if (strncmp(SEG_TEXT, orig_sg->segname,
1140 sizeof(orig_sg->segname)) == 0) {
1141 orig_ts = orig_sg;
1142 } else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
1143 sizeof(orig_sg->segname)) == 0) {
1144 orig_le = orig_sg;
1145 } else if (strncmp("", orig_sg->segname,
1146 sizeof(orig_sg->segname)) == 0) {
1147 orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
1148 }
1149 } else if (cmd->cmd == LC_SYMTAB) {
1150 orig_st = (struct symtab_command *) cmd;
1151 }
1152
1153 cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
1154 }
1155
1156 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) {
1157 return 0;
1158 }
1159
1160 if ((search < orig_ts->vmaddr) ||
1161 (search >= orig_ts->vmaddr + orig_ts->vmsize)) {
1162 /* search out of range for this mach header */
1163 return 0;
1164 }
1165
1166 sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
1167 strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
1168 diff = search;
1169
1170 for (i = 0; i < orig_st->nsyms; i++) {
1171 if (sym[i].n_type & N_STAB) {
1172 continue;
1173 }
1174
1175 if (sym[i].n_value <= search) {
1176 curdiff = search - (vm_address_t)sym[i].n_value;
1177 if (curdiff < diff) {
1178 diff = curdiff;
1179 bestaddr = sym[i].n_value;
1180 bestsym = strings + sym[i].n_un.n_strx;
1181 }
1182 }
1183 }
1184
1185 if (bestsym != NULL) {
1186 if (diff != 0) {
1187 paniclog_append_noflush("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
1188 } else {
1189 paniclog_append_noflush("%s : %s", module_name, bestsym);
1190 }
1191 return 1;
1192 }
1193 return 0;
0c530ab8
A
1194}
1195
1196extern kmod_info_t * kmod; /* the list of modules */
1197
1198static void
1199panic_print_kmod_symbol_name(vm_address_t search)
1200{
0a7de745
A
1201 u_int i;
1202
1203 if (gLoadedKextSummaries == NULL) {
1204 return;
1205 }
1206 for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
1207 OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
1208
1209 if ((search >= summary->address) &&
1210 (search < (summary->address + summary->size))) {
1211 kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
1212 if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
1213 paniclog_append_noflush("%s + %llu", summary->name, (unsigned long)search - summary->address);
1214 }
1215 break;
1216 }
1217 }
0c530ab8
A
1218}
1219
04b8595b 1220void
0c530ab8
A
1221panic_print_symbol_name(vm_address_t search)
1222{
0a7de745
A
1223 /* try searching in the kernel */
1224 if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
1225 /* that failed, now try to search for the right kext */
1226 panic_print_kmod_symbol_name(search);
1227 }
0c530ab8
A
1228}
1229
1230/* Generate a backtrace, given a frame pointer - this routine
1231 * should walk the stack safely. The trace is appended to the panic log
1232 * and conditionally, to the console. If the trace contains kernel module
1233 * addresses, display the module name, load address and dependencies.
1234 */
1235
1236#define DUMPFRAMES 32
1237#define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
1238void
935ed37a 1239panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
0c530ab8 1240{
0a7de745 1241 cframe_t *frame = (cframe_t *)_frame;
0c530ab8 1242 vm_offset_t raddrs[DUMPFRAMES];
935ed37a 1243 vm_offset_t PC = 0;
0c530ab8
A
1244 int frame_index;
1245 volatile uint32_t *ppbtcnt = &pbtcnt;
1246 uint64_t bt_tsc_timeout;
1247 boolean_t keepsyms = FALSE;
6d2010ae 1248 int cn = cpu_number();
3e170ce0 1249 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
0c530ab8 1250
ea3f0419
A
1251#if DEVELOPMENT || DEBUG
1252 /* Turn off I/O tracing now that we're panicking */
1253 mmiotrace_enabled = 0;
1254#endif
1255
0a7de745 1256 if (pbtcpu != cn) {
cb323159 1257 os_atomic_inc(&pbtcnt, relaxed);
0c530ab8
A
1258 /* Spin on print backtrace lock, which serializes output
1259 * Continue anyway if a timeout occurs.
1260 */
0a7de745 1261 hw_lock_to(&pbtlock, ~0U, LCK_GRP_NULL);
6d2010ae 1262 pbtcpu = cn;
0c530ab8
A
1263 }
1264
3e170ce0
A
1265 if (__improbable(doprnt_hide_pointers == TRUE)) {
1266 /* If we're called directly, the Debugger() function will not be called,
1267 * so we need to reset the value in here. */
1268 doprnt_hide_pointers = FALSE;
1269 }
1270
fe8ab488
A
1271 panic_check_hook();
1272
0a7de745 1273 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
0c530ab8 1274
935ed37a 1275 if (msg != NULL) {
5ba3f43e 1276 paniclog_append_noflush("%s", msg);
935ed37a
A
1277 }
1278
1279 if ((regdump == TRUE) && (regs != NULL)) {
0a7de745 1280 x86_saved_state64_t *ss64p = saved_state64(regs);
5ba3f43e 1281 paniclog_append_noflush(
0a7de745
A
1282 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
1283 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1284 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
1285 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1286 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
1287 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
1288 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
1289 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
1290 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
1291 ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
1292 ss64p->isf.ss);
b0d623f7 1293 PC = ss64p->isf.rip;
935ed37a
A
1294 }
1295
5ba3f43e 1296 paniclog_append_noflush("Backtrace (CPU %d), "
b0d623f7 1297#if PRINT_ARGS_FROM_STACK_FRAME
0a7de745 1298 "Frame : Return Address (4 potential args on stack)\n", cn);
b0d623f7 1299#else
0a7de745 1300 "Frame : Return Address\n", cn);
b0d623f7 1301#endif
0c530ab8
A
1302
1303 for (frame_index = 0; frame_index < nframes; frame_index++) {
1304 vm_offset_t curframep = (vm_offset_t) frame;
1305
0a7de745 1306 if (!curframep) {
0c530ab8 1307 break;
0a7de745 1308 }
0c530ab8
A
1309
1310 if (curframep & 0x3) {
5ba3f43e 1311 paniclog_append_noflush("Unaligned frame\n");
0c530ab8
A
1312 goto invalid;
1313 }
1314
1315 if (!kvtophys(curframep) ||
6d2010ae 1316 !kvtophys(curframep + sizeof(cframe_t) - 1)) {
5ba3f43e 1317 paniclog_append_noflush("No mapping exists for frame pointer\n");
0c530ab8
A
1318 goto invalid;
1319 }
1320
5ba3f43e 1321 paniclog_append_noflush("%p : 0x%lx ", frame, frame->caller);
0a7de745 1322 if (frame_index < DUMPFRAMES) {
0c530ab8 1323 raddrs[frame_index] = frame->caller;
0a7de745 1324 }
0c530ab8 1325
b0d623f7 1326#if PRINT_ARGS_FROM_STACK_FRAME
0a7de745 1327 if (kvtophys((vm_offset_t)&(frame->args[3]))) {
5ba3f43e 1328 paniclog_append_noflush("(0x%x 0x%x 0x%x 0x%x) ",
0c530ab8
A
1329 frame->args[0], frame->args[1],
1330 frame->args[2], frame->args[3]);
0a7de745 1331 }
b0d623f7 1332#endif
0c530ab8
A
1333
1334 /* Display address-symbol translation only if the "keepsyms"
1335 * boot-arg is suppplied, since we unload LINKEDIT otherwise.
1336 * This routine is potentially unsafe; also, function
1337 * boundary identification is unreliable after a strip -x.
1338 */
0a7de745 1339 if (keepsyms) {
0c530ab8 1340 panic_print_symbol_name((vm_address_t)frame->caller);
0a7de745
A
1341 }
1342
5ba3f43e 1343 paniclog_append_noflush("\n");
b0d623f7 1344
0c530ab8
A
1345 frame = frame->prev;
1346 }
1347
0a7de745 1348 if (frame_index >= nframes) {
5ba3f43e 1349 paniclog_append_noflush("\tBacktrace continues...\n");
0a7de745 1350 }
0c530ab8
A
1351
1352 goto out;
1353
1354invalid:
0a7de745 1355 paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n", frame);
0c530ab8
A
1356out:
1357
1358 /* Identify kernel modules in the backtrace and display their
1359 * load addresses and dependencies. This routine should walk
1360 * the kmod list safely.
1361 */
0a7de745 1362 if (frame_index) {
b0d623f7 1363 kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
0a7de745 1364 }
0c530ab8 1365
0a7de745 1366 if (PC != 0) {
b0d623f7 1367 kmod_panic_dump(&PC, 1);
0a7de745 1368 }
935ed37a 1369
d190cdc3 1370 panic_display_system_configuration(FALSE);
c910b4d9 1371
3e170ce0
A
1372 doprnt_hide_pointers = old_doprnt_hide_pointers;
1373
0c530ab8
A
1374 /* Release print backtrace lock, to permit other callers in the
1375 * event of panics on multiple processors.
1376 */
1377 hw_lock_unlock(&pbtlock);
cb323159 1378 os_atomic_dec(&pbtcnt, relaxed);
0c530ab8
A
1379 /* Wait for other processors to complete output
1380 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1381 */
1382 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
0a7de745
A
1383 while (*ppbtcnt && (rdtsc64() < bt_tsc_timeout)) {
1384 ;
1385 }
0c530ab8 1386}
04b8595b
A
1387
1388static boolean_t
1389debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1390{
0a7de745
A
1391 size_t rem = size;
1392 char *kvaddr = dest;
1393
1394 while (rem) {
1395 ppnum_t upn = pmap_find_phys(p, uaddr);
1396 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1397 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1398 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1399 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1400 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1401 cur_size = MIN(cur_size, rem);
1402
1403 if (upn && pmap_valid_page(upn) && phys_dest) {
1404 bcopy_phys(phys_src, phys_dest, cur_size);
1405 } else {
1406 break;
1407 }
1408 uaddr += cur_size;
1409 kvaddr += cur_size;
1410 rem -= cur_size;
1411 }
1412 return rem == 0;
04b8595b
A
1413}
1414
1415void
1416print_threads_registers(thread_t thread)
1417{
1418 x86_saved_state_t *savestate;
0a7de745 1419
04b8595b 1420 savestate = get_user_regs(thread);
5ba3f43e 1421 paniclog_append_noflush(
04b8595b 1422 "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
0a7de745
A
1423 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
1424 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
04b8595b
A
1425 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
1426 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
1427 savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
1428 savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
0a7de745 1429 savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
04b8595b
A
1430 savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
1431 savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
1432 savestate->ss_64.isf.ss);
1433}
1434
1435void
1436print_tasks_user_threads(task_t task)
1437{
0a7de745 1438 thread_t thread = current_thread();
04b8595b 1439 x86_saved_state_t *savestate;
0a7de745
A
1440 pmap_t pmap = 0;
1441 uint64_t rbp;
1442 const char *cur_marker = 0;
04b8595b 1443 int j;
04b8595b 1444
0a7de745
A
1445 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1446 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
5ba3f43e 1447 paniclog_append_noflush("Thread %d: %p\n", j, thread);
04b8595b
A
1448 pmap = get_task_pmap(task);
1449 savestate = get_user_regs(thread);
1450 rbp = savestate->ss_64.rbp;
5ba3f43e
A
1451 paniclog_append_noflush("\t0x%016llx\n", savestate->ss_64.isf.rip);
1452 print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE);
1453 paniclog_append_noflush("\n");
39037602 1454 }
04b8595b
A
1455}
1456
3e170ce0
A
1457void
1458print_thread_num_that_crashed(task_t task)
1459{
0a7de745
A
1460 thread_t c_thread = current_thread();
1461 thread_t thread;
3e170ce0 1462 int j;
3e170ce0 1463
0a7de745
A
1464 for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
1465 ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
3e170ce0 1466 if (c_thread == thread) {
5ba3f43e 1467 paniclog_append_noflush("\nThread %d crashed\n", j);
3e170ce0
A
1468 break;
1469 }
1470 }
1471}
1472
04b8595b
A
1473#define PANICLOG_UUID_BUF_SIZE 256
1474
0a7de745
A
1475void
1476print_uuid_info(task_t task)
04b8595b 1477{
0a7de745
A
1478 uint32_t uuid_info_count = 0;
1479 mach_vm_address_t uuid_info_addr = 0;
1480 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1481 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1482 int task_pid = pid_from_task(task);
1483 char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
1484 char *uuidbufptr = uuidbuf;
1485 uint32_t k;
04b8595b
A
1486
1487 if (have_pmap && task->active && task_pid > 0) {
1488 /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
1489 struct user64_dyld_all_image_infos task_image_infos;
1490 if (debug_copyin(task->map->pmap, task->all_image_info_addr,
0a7de745 1491 &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
04b8595b
A
1492 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1493 uuid_info_addr = task_image_infos.uuidArray;
1494 }
1495
1496 /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
1497 * in the middle of updating this data structure), we zero the
1498 * uuid_info_count so that we won't even try to save load info for this task
1499 */
1500 if (!uuid_info_addr) {
1501 uuid_info_count = 0;
1502 }
1503 }
1504
1505 if (task_pid > 0 && uuid_info_count > 0) {
1506 uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
1507 uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
1508 uint32_t uuid_copy_size = 0;
1509 uint32_t uuid_image_count = 0;
1510 char *current_uuid_buffer = NULL;
1511 /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
0a7de745 1512
5ba3f43e 1513 paniclog_append_noflush("\nuuid info:\n");
04b8595b
A
1514 while (uuid_array_size) {
1515 if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
1516 uuid_copy_size = uuid_array_size;
0a7de745 1517 uuid_image_count = uuid_array_size / uuid_info_size;
04b8595b 1518 } else {
0a7de745 1519 uuid_image_count = PANICLOG_UUID_BUF_SIZE / uuid_info_size;
04b8595b
A
1520 uuid_copy_size = uuid_image_count * uuid_info_size;
1521 }
1522 if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
0a7de745 1523 uuid_copy_size)) {
5ba3f43e 1524 paniclog_append_noflush("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
04b8595b
A
1525 uuid_image_count = 0;
1526 break;
1527 }
1528
1529 if (uuid_image_count > 0) {
1530 current_uuid_buffer = uuidbufptr;
1531 for (k = 0; k < uuid_image_count; k++) {
5ba3f43e 1532 paniclog_append_noflush(" %#llx", *(uint64_t *)current_uuid_buffer);
04b8595b
A
1533 current_uuid_buffer += sizeof(uint64_t);
1534 uint8_t *uuid = (uint8_t *)current_uuid_buffer;
5ba3f43e 1535 paniclog_append_noflush("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
0a7de745
A
1536 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
1537 uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
04b8595b
A
1538 current_uuid_buffer += 16;
1539 }
1540 bzero(&uuidbuf, sizeof(uuidbuf));
1541 }
1542 uuid_info_addr += uuid_copy_size;
1543 uuid_array_size -= uuid_copy_size;
1544 }
1545 }
1546}
1547
0a7de745
A
1548void
1549print_launchd_info(void)
04b8595b 1550{
0a7de745
A
1551 task_t task = current_task();
1552 thread_t thread = current_thread();
1553 volatile uint32_t *ppbtcnt = &pbtcnt;
1554 uint64_t bt_tsc_timeout;
1555 int cn = cpu_number();
04b8595b 1556
0a7de745 1557 if (pbtcpu != cn) {
cb323159 1558 os_atomic_inc(&pbtcnt, relaxed);
04b8595b
A
1559 /* Spin on print backtrace lock, which serializes output
1560 * Continue anyway if a timeout occurs.
1561 */
0a7de745 1562 hw_lock_to(&pbtlock, ~0U, LCK_GRP_NULL);
04b8595b
A
1563 pbtcpu = cn;
1564 }
0a7de745 1565
04b8595b 1566 print_uuid_info(task);
3e170ce0 1567 print_thread_num_that_crashed(task);
04b8595b
A
1568 print_threads_registers(thread);
1569 print_tasks_user_threads(task);
d190cdc3
A
1570
1571 panic_display_system_configuration(TRUE);
0a7de745 1572
04b8595b
A
1573 /* Release print backtrace lock, to permit other callers in the
1574 * event of panics on multiple processors.
1575 */
1576 hw_lock_unlock(&pbtlock);
cb323159 1577 os_atomic_dec(&pbtcnt, relaxed);
04b8595b
A
1578 /* Wait for other processors to complete output
1579 * Timeout and continue after PBT_TIMEOUT_CYCLES.
1580 */
1581 bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
0a7de745
A
1582 while (*ppbtcnt && (rdtsc64() < bt_tsc_timeout)) {
1583 ;
1584 }
04b8595b 1585}