2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <kern/thread.h>
33 #include <machine/pmap.h>
34 #include <device/device_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/clock_types.h>
38 #include <mach/machine.h>
39 #include <mach/kmod.h>
40 #include <pexpert/boot.h>
41 #include <pexpert/pexpert.h>
45 #include <kern/misc_protos.h>
46 #include <kern/startup.h>
47 #include <kern/clock.h>
48 #include <kern/debug.h>
49 #include <kern/processor.h>
50 #include <kdp/kdp_core.h>
51 #if ALTERNATE_DEBUGGER
52 #include <arm64/alternate_debugger.h>
54 #include <machine/atomic.h>
55 #include <machine/trap.h>
57 #include <pexpert/pexpert.h>
58 #include <kdp/kdp_callout.h>
59 #include <kdp/kdp_dyld.h>
60 #include <kdp/kdp_internal.h>
61 #include <uuid/uuid.h>
62 #include <sys/codesign.h>
65 #include <IOKit/IOPlatformExpert.h>
67 #include <mach/vm_prot.h>
68 #include <vm/vm_map.h>
70 #include <vm/vm_shared_region.h>
71 #include <mach/time_value.h>
72 #include <machine/machparam.h> /* for btop */
74 #include <console/video_console.h>
75 #include <console/serial_protos.h>
76 #include <arm/cpu_data.h>
77 #include <arm/cpu_data_internal.h>
78 #include <arm/cpu_internal.h>
79 #include <arm/misc_protos.h>
80 #include <libkern/OSKextLibPrivate.h>
81 #include <vm/vm_kern.h>
82 #include <kern/kern_cdata.h>
85 void kdp_trap(unsigned int, struct arm_saved_state
*);
88 extern kern_return_t
do_stackshot(void *);
89 extern void kdp_snapshot_preflight(int pid
, void * tracebuf
,
90 uint32_t tracebuf_size
, uint64_t flags
,
91 kcdata_descriptor_t data_p
,
92 uint64_t since_timestamp
, uint32_t pagetable_mask
);
93 extern int kdp_stack_snapshot_bytes_traced(void);
94 extern int kdp_stack_snapshot_bytes_uncompressed(void);
96 #if INTERRUPT_MASKED_DEBUG
97 extern boolean_t interrupt_masked_debug
;
101 * Increment the PANICLOG_VERSION if you change the format of the panic
104 #define PANICLOG_VERSION 13
105 static struct kcdata_descriptor kc_panic_data
;
107 extern char firmware_version
[];
108 extern volatile uint32_t debug_enabled
;
109 extern unsigned int not_in_kdp
;
111 extern int copyinframe(vm_address_t fp
, uint32_t * frame
);
112 extern void kdp_callouts(kdp_event_t event
);
114 /* #include <sys/proc.h> */
117 extern int proc_pid(struct proc
*p
);
118 extern void proc_name_kdp(task_t
, char *, int);
121 * Make sure there's enough space to include the relevant bits in the format required
122 * within the space allocated for the panic version string in the panic header.
123 * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'.
125 #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)"
127 extern const char version
[];
128 extern char osversion
[];
129 extern char osproductversion
[];
130 extern char osreleasetype
[];
132 #if defined(XNU_TARGET_OS_BRIDGE)
133 extern char macosproductversion
[];
134 extern char macosversion
[];
137 extern uint8_t gPlatformECID
[8];
138 extern uint32_t gPlatformMemoryID
;
140 extern uint64_t last_hwaccess_thread
;
142 /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32
143 * since the target name and model name typically doesn't exceed this size */
144 extern char gTargetTypeBuffer
[16];
145 extern char gModelTypeBuffer
[32];
147 decl_simple_lock_data(extern, clock_lock
);
148 extern struct timeval gIOLastSleepTime
;
149 extern struct timeval gIOLastWakeTime
;
150 extern boolean_t is_clock_configured
;
151 extern boolean_t kernelcache_uuid_valid
;
152 extern uuid_t kernelcache_uuid
;
154 extern void stackshot_memcpy(void *dst
, const void *src
, size_t len
);
156 /* Definitions for frame pointers */
157 #define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
158 #define FP_LR_OFFSET ((uint32_t)4)
159 #define FP_LR_OFFSET64 ((uint32_t)8)
160 #define FP_MAX_NUM_TO_EVALUATE (50)
162 /* Timeout (in nanoseconds) for all processors responding to debug crosscall */
163 #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000)
165 /* Forward functions definitions */
166 void panic_display_times(void);
167 void panic_print_symbol_name(vm_address_t search
);
170 /* Global variables */
171 static uint32_t panic_bt_depth
;
172 boolean_t PanicInfoSaved
= FALSE
;
173 boolean_t force_immediate_debug_halt
= FALSE
;
174 unsigned int debug_ack_timeout_count
= 0;
175 volatile unsigned int debugger_sync
= 0;
176 volatile unsigned int mp_kdp_trap
= 0; /* CPUs signalled by the debug CPU will spin on this */
177 volatile unsigned int debug_cpus_spinning
= 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */
178 unsigned int DebugContextCount
= 0;
180 #if defined(__arm64__)
181 uint8_t PE_smc_stashed_x86_system_state
= 0xFF;
182 uint8_t PE_smc_stashed_x86_power_state
= 0xFF;
183 uint8_t PE_smc_stashed_x86_efi_boot_state
= 0xFF;
184 uint8_t PE_smc_stashed_x86_shutdown_cause
= 0xFF;
185 uint64_t PE_smc_stashed_x86_prev_power_transitions
= UINT64_MAX
;
186 uint32_t PE_pcie_stashed_link_state
= UINT32_MAX
;
190 // Convenient macros to easily validate one or more pointers if
191 // they have defined types
192 #define VALIDATE_PTR(ptr) \
193 validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr)
195 #define VALIDATE_PTR_2(ptr0, ptr1) \
196 VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1)
198 #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \
199 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2)
201 #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \
202 VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3)
204 #define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME
206 #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__)
209 * Evaluate if a pointer is valid
210 * Print a message if pointer is invalid
214 vm_offset_t ptr
, vm_size_t size
, const char * ptr_name
)
217 if (ml_validate_nofault(ptr
, size
)) {
220 paniclog_append_noflush("Invalid %s pointer: %p size: %d\n",
221 ptr_name
, (void *)ptr
, (int)size
);
225 paniclog_append_noflush("NULL %s pointer\n", ptr_name
);
231 * Backtrace a single frame.
234 print_one_backtrace(pmap_t pmap
, vm_offset_t topfp
, const char *cur_marker
,
235 boolean_t is_64_bit
, boolean_t print_kexts_in_backtrace
)
242 boolean_t dump_kernel_stack
;
243 vm_offset_t raddrs
[FP_MAX_NUM_TO_EVALUATE
];
249 if (fp
>= VM_MIN_KERNEL_ADDRESS
) {
250 dump_kernel_stack
= TRUE
;
252 dump_kernel_stack
= FALSE
;
256 if ((fp
== 0) || ((fp
& FP_ALIGNMENT_MASK
) != 0)) {
259 if (dump_kernel_stack
&& ((fp
< VM_MIN_KERNEL_ADDRESS
) || (fp
> VM_MAX_KERNEL_ADDRESS
))) {
262 if ((!dump_kernel_stack
) && (fp
>= VM_MIN_KERNEL_ADDRESS
)) {
267 * Check to see if current address will result in a different
268 * ppn than previously computed (to avoid recomputation) via
269 * (addr) ^ fp_for_ppn) >> PAGE_SHIFT)
271 if ((((fp
+ FP_LR_OFFSET
) ^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
272 ppn
= pmap_find_phys(pmap
, fp
+ FP_LR_OFFSET
);
273 fp_for_ppn
= fp
+ (is_64_bit
? FP_LR_OFFSET64
: FP_LR_OFFSET
);
275 if (ppn
!= (ppnum_t
)NULL
) {
277 lr
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET64
) & PAGE_MASK
));
278 #if defined(HAS_APPLE_PAC)
279 /* return addresses on stack will be signed by arm64e ABI */
280 lr
= (addr64_t
) ptrauth_strip((void *)lr
, ptrauth_key_return_address
);
283 lr
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | ((fp
+ FP_LR_OFFSET
) & PAGE_MASK
));
287 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker
, fp
+ FP_LR_OFFSET64
);
289 paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker
, (uint32_t)(fp
+ FP_LR_OFFSET
));
293 if (((fp
^ fp_for_ppn
) >> PAGE_SHIFT
) != 0x0U
) {
294 ppn
= pmap_find_phys(pmap
, fp
);
297 if (ppn
!= (ppnum_t
)NULL
) {
299 fp
= ml_phys_read_double_64(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
301 fp
= ml_phys_read_word(((((vm_offset_t
)ppn
) << PAGE_SHIFT
)) | (fp
& PAGE_MASK
));
305 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker
, fp
);
307 paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker
, (uint32_t)fp
);
314 paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker
, lr
, fp
);
316 paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker
, (uint32_t)lr
, (uint32_t)fp
);
320 } while ((++i
< FP_MAX_NUM_TO_EVALUATE
) && (fp
!= topfp
));
322 if (print_kexts_in_backtrace
&& i
!= 0) {
323 kmod_panic_dump(&raddrs
[0], i
);
327 #define SANE_TASK_LIMIT 256
328 #define TOP_RUNNABLE_LIMIT 5
329 #define PANICLOG_UUID_BUF_SIZE 256
331 extern void panic_print_vnodes(void);
334 panic_display_hung_cpus_help(void)
336 #if defined(__arm64__)
337 const uint32_t pcsr_offset
= 0x90;
340 * Print some info that might help in cases where nothing
343 const ml_topology_info_t
*info
= ml_get_topology_info();
347 for (i
= 0; i
< info
->num_cpus
; i
++) {
348 if (info
->cpus
[i
].cpu_UTTDBG_regs
) {
349 volatile uint64_t *pcsr
= (volatile uint64_t*)(info
->cpus
[i
].cpu_UTTDBG_regs
+ pcsr_offset
);
350 volatile uint32_t *pcsrTrigger
= (volatile uint32_t*)pcsr
;
353 // a number of retries are needed till this works
354 for (retry
= 1024; retry
&& !pc
; retry
--) {
355 //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero
360 //postprocessing (same as astris does)
362 pc
|= 0xffff000000000000ull
;
364 paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i
, pc
);
368 #endif //defined(__arm64__)
372 do_print_all_backtraces(const char *message
, uint64_t panic_options
)
374 int logversion
= PANICLOG_VERSION
;
375 thread_t cur_thread
= current_thread();
378 int print_vnodes
= 0;
379 const char *nohilite_thread_marker
= "\t";
381 /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */
382 int bytes_traced
= 0, bytes_remaining
= 0, end_marker_bytes
= 200;
383 int bytes_uncompressed
= 0;
384 uint64_t bytes_used
= 0ULL;
386 char *stackshot_begin_loc
= NULL
;
387 kc_format_t kc_format
;
388 bool filesetKC
= false;
391 __asm__
volatile ("mov %0, r7":"=r"(cur_fp
));
392 #elif defined(__arm64__)
393 __asm__
volatile ("add %0, xzr, fp":"=r"(cur_fp
));
395 #error Unknown architecture.
397 if (panic_bt_depth
!= 0) {
402 __unused
bool result
= PE_get_primary_kc_format(&kc_format
);
403 assert(result
== true);
404 filesetKC
= kc_format
== KCFormatFileset
;
406 /* Truncate panic string to 1200 bytes */
407 paniclog_append_noflush("Debugger message: %.1200s\n", message
);
409 paniclog_append_noflush("Device: %s\n",
410 ('\0' != gTargetTypeBuffer
[0]) ? gTargetTypeBuffer
: "Not set yet");
411 paniclog_append_noflush("Hardware Model: %s\n",
412 ('\0' != gModelTypeBuffer
[0]) ? gModelTypeBuffer
:"Not set yet");
413 paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID
[7],
414 gPlatformECID
[6], gPlatformECID
[5], gPlatformECID
[4], gPlatformECID
[3],
415 gPlatformECID
[2], gPlatformECID
[1], gPlatformECID
[0]);
416 if (last_hwaccess_thread
) {
417 paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread
);
419 paniclog_append_noflush("Boot args: %s\n", PE_boot_args());
421 paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID
);
422 paniclog_append_noflush("OS release type: %.256s\n",
423 ('\0' != osreleasetype
[0]) ? osreleasetype
: "Not set yet");
424 paniclog_append_noflush("OS version: %.256s\n",
425 ('\0' != osversion
[0]) ? osversion
: "Not set yet");
426 #if defined(XNU_TARGET_OS_BRIDGE)
427 paniclog_append_noflush("macOS version: %.256s\n",
428 ('\0' != macosversion
[0]) ? macosversion
: "Not set");
430 paniclog_append_noflush("Kernel version: %.512s\n", version
);
432 if (kernelcache_uuid_valid
) {
434 paniclog_append_noflush("Fileset Kernelcache UUID: ");
436 paniclog_append_noflush("KernelCache UUID: ");
438 for (size_t index
= 0; index
< sizeof(uuid_t
); index
++) {
439 paniclog_append_noflush("%02X", kernelcache_uuid
[index
]);
441 paniclog_append_noflush("\n");
443 panic_display_kernel_uuid();
445 paniclog_append_noflush("iBoot version: %.128s\n", firmware_version
);
446 paniclog_append_noflush("secure boot?: %s\n", debug_enabled
? "NO": "YES");
447 #if defined(XNU_TARGET_OS_BRIDGE)
448 paniclog_append_noflush("x86 EFI Boot State: ");
449 if (PE_smc_stashed_x86_efi_boot_state
!= 0xFF) {
450 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state
);
452 paniclog_append_noflush("not available\n");
454 paniclog_append_noflush("x86 System State: ");
455 if (PE_smc_stashed_x86_system_state
!= 0xFF) {
456 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state
);
458 paniclog_append_noflush("not available\n");
460 paniclog_append_noflush("x86 Power State: ");
461 if (PE_smc_stashed_x86_power_state
!= 0xFF) {
462 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state
);
464 paniclog_append_noflush("not available\n");
466 paniclog_append_noflush("x86 Shutdown Cause: ");
467 if (PE_smc_stashed_x86_shutdown_cause
!= 0xFF) {
468 paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause
);
470 paniclog_append_noflush("not available\n");
472 paniclog_append_noflush("x86 Previous Power Transitions: ");
473 if (PE_smc_stashed_x86_prev_power_transitions
!= UINT64_MAX
) {
474 paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions
);
476 paniclog_append_noflush("not available\n");
478 paniclog_append_noflush("PCIeUp link state: ");
479 if (PE_pcie_stashed_link_state
!= UINT32_MAX
) {
480 paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state
);
482 paniclog_append_noflush("not available\n");
485 if (panic_data_buffers
!= NULL
) {
486 paniclog_append_noflush("%s data: ", panic_data_buffers
->producer_name
);
487 uint8_t *panic_buffer_data
= (uint8_t *) panic_data_buffers
->buf
;
488 for (int i
= 0; i
< panic_data_buffers
->len
; i
++) {
489 paniclog_append_noflush("%02X", panic_buffer_data
[i
]);
491 paniclog_append_noflush("\n");
493 paniclog_append_noflush("Paniclog version: %d\n", logversion
);
495 panic_display_kernel_aslr();
496 panic_display_times();
497 panic_display_zprint();
498 panic_display_hung_cpus_help();
500 panic_display_ztrace();
501 #endif /* CONFIG_ZLEAKS */
502 #if CONFIG_ECC_LOGGING
503 panic_display_ecc_errors();
504 #endif /* CONFIG_ECC_LOGGING */
506 #if DEVELOPMENT || DEBUG
507 if (cs_debug_unsigned_exec_failures
!= 0 || cs_debug_unsigned_mmap_failures
!= 0) {
508 paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures
);
509 paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures
);
513 // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics)
514 if (panic_options
& DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG
) {
515 thread_t top_runnable
[5] = {0};
517 int total_cpu_usage
= 0;
522 for (thread
= (thread_t
)queue_first(&threads
);
523 VALIDATE_PTR(thread
) && !queue_end(&threads
, (queue_entry_t
)thread
);
524 thread
= (thread_t
)queue_next(&thread
->threads
)) {
525 total_cpu_usage
+= thread
->cpu_usage
;
527 // Look for the 5 runnable threads with highest priority
528 if (thread
->state
& TH_RUN
) {
530 thread_t comparison_thread
= thread
;
532 for (k
= 0; k
< TOP_RUNNABLE_LIMIT
; k
++) {
533 if (top_runnable
[k
] == 0) {
534 top_runnable
[k
] = comparison_thread
;
536 } else if (comparison_thread
->sched_pri
> top_runnable
[k
]->sched_pri
) {
537 thread_t temp
= top_runnable
[k
];
538 top_runnable
[k
] = comparison_thread
;
539 comparison_thread
= temp
;
540 } // if comparison thread has higher priority than previously saved thread
541 } // loop through highest priority runnable threads
542 } // Check if thread is runnable
543 } // Loop through all threads
545 // Print the relevant info for each thread identified
546 paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage
);
547 paniclog_append_noflush("Thread task pri cpu_usage\n");
549 for (int i
= 0; i
< TOP_RUNNABLE_LIMIT
; i
++) {
550 if (top_runnable
[i
] && VALIDATE_PTR(top_runnable
[i
]->task
) &&
551 validate_ptr((vm_offset_t
)top_runnable
[i
]->task
->bsd_info
, 1, "bsd_info")) {
552 char name
[MAXCOMLEN
+ 1];
553 proc_name_kdp(top_runnable
[i
]->task
, name
, sizeof(name
));
554 paniclog_append_noflush("%p %s %d %d\n",
555 top_runnable
[i
], name
, top_runnable
[i
]->sched_pri
, top_runnable
[i
]->cpu_usage
);
557 } // Loop through highest priority runnable threads
558 paniclog_append_noflush("\n");
561 // print current task info
562 if (VALIDATE_PTR_LIST(cur_thread
, cur_thread
->task
)) {
563 task
= cur_thread
->task
;
565 if (VALIDATE_PTR_LIST(task
->map
, task
->map
->pmap
)) {
566 paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ",
567 task
, task
->map
->pmap
->stats
.resident_count
, task
->thread_count
);
569 paniclog_append_noflush("Panicked task %p: %d threads: ",
570 task
, task
->thread_count
);
573 if (validate_ptr((vm_offset_t
)task
->bsd_info
, 1, "bsd_info")) {
574 char name
[MAXCOMLEN
+ 1];
575 int pid
= proc_pid(task
->bsd_info
);
576 proc_name_kdp(task
, name
, sizeof(name
));
577 paniclog_append_noflush("pid %d: %s", pid
, name
);
579 paniclog_append_noflush("unknown task");
582 paniclog_append_noflush("\n");
585 if (cur_fp
< VM_MAX_KERNEL_ADDRESS
) {
586 paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n",
587 cur_thread
, (addr64_t
)cur_fp
, thread_tid(cur_thread
));
589 print_one_backtrace(kernel_pmap
, cur_fp
, nohilite_thread_marker
, TRUE
, filesetKC
);
591 print_one_backtrace(kernel_pmap
, cur_fp
, nohilite_thread_marker
, FALSE
, filesetKC
);
594 paniclog_append_noflush("Could not print panicked thread backtrace:"
595 "frame pointer outside kernel vm.\n");
598 paniclog_append_noflush("\n");
600 kext_dump_panic_lists(&paniclog_append_noflush
);
601 paniclog_append_noflush("\n");
603 panic_info
->eph_panic_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_panic_log_offset
;
604 /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */
605 if ((osversion
[0] != '\0') && (osproductversion
[0] != '\0')) {
606 snprintf((char *)&panic_info
->eph_os_version
, sizeof(panic_info
->eph_os_version
), PANIC_HEADER_VERSION_FMT_STR
,
607 osproductversion
, osversion
);
609 #if defined(XNU_TARGET_OS_BRIDGE)
610 if ((macosversion
[0] != '\0') && (macosproductversion
[0] != '\0')) {
611 snprintf((char *)&panic_info
->eph_macos_version
, sizeof(panic_info
->eph_macos_version
), PANIC_HEADER_VERSION_FMT_STR
,
612 macosproductversion
, macosversion
);
616 if (debug_ack_timeout_count
) {
617 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC
;
618 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
619 paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n");
620 } else if (stackshot_active()) {
621 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED
;
622 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
623 paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n");
625 /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */
626 debug_buf_ptr
+= (8 - ((uintptr_t)debug_buf_ptr
% 8));
627 stackshot_begin_loc
= debug_buf_ptr
;
629 bytes_remaining
= debug_buf_size
- (unsigned int)((uintptr_t)stackshot_begin_loc
- (uintptr_t)debug_buf_base
);
630 err
= kcdata_memory_static_init(&kc_panic_data
, (mach_vm_address_t
)debug_buf_ptr
,
631 KCDATA_BUFFER_BEGIN_COMPRESSED
, bytes_remaining
- end_marker_bytes
,
633 if (err
== KERN_SUCCESS
) {
634 uint64_t stackshot_flags
= (STACKSHOT_GET_GLOBAL_MEM_STATS
| STACKSHOT_SAVE_LOADINFO
| STACKSHOT_KCDATA_FORMAT
|
635 STACKSHOT_ENABLE_BT_FAULTING
| STACKSHOT_ENABLE_UUID_FAULTING
| STACKSHOT_FROM_PANIC
| STACKSHOT_DO_COMPRESS
|
636 STACKSHOT_DISABLE_LATENCY_INFO
| STACKSHOT_NO_IO_STATS
| STACKSHOT_THREAD_WAITINFO
| STACKSHOT_GET_DQ
|
637 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
);
639 err
= kcdata_init_compress(&kc_panic_data
, KCDATA_BUFFER_BEGIN_STACKSHOT
, stackshot_memcpy
, KCDCT_ZLIB
);
640 if (err
!= KERN_SUCCESS
) {
641 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED
;
642 stackshot_flags
&= ~STACKSHOT_DO_COMPRESS
;
645 stackshot_flags
|= STACKSHOT_SAVE_KEXT_LOADINFO
;
648 kdp_snapshot_preflight(-1, stackshot_begin_loc
, bytes_remaining
- end_marker_bytes
,
649 stackshot_flags
, &kc_panic_data
, 0, 0);
650 err
= do_stackshot(NULL
);
651 bytes_traced
= kdp_stack_snapshot_bytes_traced();
652 if (bytes_traced
> 0 && !err
) {
653 debug_buf_ptr
+= bytes_traced
;
654 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED
;
655 panic_info
->eph_stackshot_offset
= PE_get_offset_into_panic_region(stackshot_begin_loc
);
656 panic_info
->eph_stackshot_len
= bytes_traced
;
658 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
659 if (stackshot_flags
& STACKSHOT_DO_COMPRESS
) {
660 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED
;
661 bytes_uncompressed
= kdp_stack_snapshot_bytes_uncompressed();
662 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced
, bytes_uncompressed
);
664 paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced
);
667 bytes_used
= kcdata_memory_get_used_bytes(&kc_panic_data
);
668 if (bytes_used
> 0) {
669 /* Zero out the stackshot data */
670 bzero(stackshot_begin_loc
, bytes_used
);
671 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE
;
673 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
674 paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used
);
676 bzero(stackshot_begin_loc
, bytes_used
);
677 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
679 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
680 paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced
, err
);
684 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR
;
685 panic_info
->eph_other_log_offset
= PE_get_offset_into_panic_region(debug_buf_ptr
);
686 paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err
);
690 assert(panic_info
->eph_other_log_offset
!= 0);
692 if (print_vnodes
!= 0) {
693 panic_print_vnodes();
700 * Entry to print_all_backtraces is serialized by the debugger lock
703 print_all_backtraces(const char *message
, uint64_t panic_options
)
705 unsigned int initial_not_in_kdp
= not_in_kdp
;
707 cpu_data_t
* cpu_data_ptr
= getCpuDatap();
709 assert(cpu_data_ptr
->PAB_active
== FALSE
);
710 cpu_data_ptr
->PAB_active
= TRUE
;
713 * Because print all backtraces uses the pmap routines, it needs to
714 * avoid taking pmap locks. Right now, this is conditionalized on
718 do_print_all_backtraces(message
, panic_options
);
720 not_in_kdp
= initial_not_in_kdp
;
722 cpu_data_ptr
->PAB_active
= FALSE
;
726 panic_display_times()
728 if (kdp_clock_is_locked()) {
729 paniclog_append_noflush("Warning: clock is locked. Can't get time\n");
733 if ((is_clock_configured
) && (simple_lock_try(&clock_lock
, LCK_GRP_NULL
))) {
734 clock_sec_t secs
, boot_secs
;
735 clock_usec_t usecs
, boot_usecs
;
737 simple_unlock(&clock_lock
);
739 clock_get_calendar_microtime(&secs
, &usecs
);
740 clock_get_boottime_microtime(&boot_secs
, &boot_usecs
);
742 paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time());
743 paniclog_append_noflush("Epoch Time: sec usec\n");
744 paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs
, (unsigned int)boot_usecs
);
745 paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime
.tv_sec
, (unsigned int)gIOLastSleepTime
.tv_usec
);
746 paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime
.tv_sec
, (unsigned int)gIOLastWakeTime
.tv_usec
);
747 paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs
, (unsigned int)usecs
);
752 panic_print_symbol_name(vm_address_t search
)
754 #pragma unused(search)
755 // empty stub. Really only used on x86_64.
761 const char *message
, __unused
void *panic_data
, uint64_t panic_options
)
764 * This should be initialized by the time we get here, but
765 * if it is not, asserting about it will be of no use (it will
766 * come right back to here), so just loop right here and now.
767 * This prevents early-boot panics from becoming recursive and
768 * thus makes them easier to debug. If you attached to a device
769 * and see your PC here, look down a few frames to see your
770 * early-boot panic there.
772 while (!panic_info
|| panic_info
->eph_panic_log_offset
== 0) {
776 if (panic_options
& DEBUGGER_OPTION_PANICLOGANDREBOOT
) {
777 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC
;
780 if (panic_options
& DEBUGGER_OPTION_COPROC_INITIATED_PANIC
) {
781 panic_info
->eph_panic_flags
|= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC
;
784 #if defined(XNU_TARGET_OS_BRIDGE)
785 panic_info
->eph_x86_power_state
= PE_smc_stashed_x86_power_state
;
786 panic_info
->eph_x86_efi_boot_state
= PE_smc_stashed_x86_efi_boot_state
;
787 panic_info
->eph_x86_system_state
= PE_smc_stashed_x86_system_state
;
791 * On newer targets, panic data is stored directly into the iBoot panic region.
792 * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the
793 * panic CRC so that iBoot can hopefully find *something* useful in the panic region.
795 if (PanicInfoSaved
&& (debug_buf_base
>= (char*)gPanicBase
) && (debug_buf_base
< (char*)gPanicBase
+ gPanicSize
)) {
796 unsigned int pi_size
= (unsigned int)(debug_buf_ptr
- gPanicBase
);
797 PE_save_buffer_to_vram((unsigned char*)gPanicBase
, &pi_size
);
798 PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here
801 if (PanicInfoSaved
|| (debug_buf_size
== 0)) {
805 PanicInfoSaved
= TRUE
;
807 print_all_backtraces(message
, panic_options
);
809 assert(panic_info
->eph_panic_log_len
!= 0);
810 panic_info
->eph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_other_log_offset
;
812 PEHaltRestart(kPEPanicSync
);
815 * Notifies registered IOPlatformPanicAction callbacks
816 * (which includes one to disable the memcache) and flushes
817 * the buffer contents from the cache
825 unsigned int panicbuf_length
= 0;
827 panicbuf_length
= (unsigned int)(debug_buf_ptr
- gPanicBase
);
828 if (!panicbuf_length
) {
833 * Updates the log length of the last part of the panic log.
835 panic_info
->eph_other_log_len
= PE_get_offset_into_panic_region(debug_buf_ptr
) - panic_info
->eph_other_log_offset
;
838 * Updates the metadata at the beginning of the panic buffer,
841 PE_save_buffer_to_vram((unsigned char *)gPanicBase
, &panicbuf_length
);
844 * This is currently unused by platform KEXTs on embedded but is
845 * kept for compatibility with the published IOKit interfaces.
847 PESavePanicInfo((unsigned char *)gPanicBase
, panicbuf_length
);
849 PE_sync_panic_buffers();
853 * @function _was_in_userspace
855 * @abstract Unused function used to indicate that a CPU was in userspace
856 * before it was IPI'd to enter the Debugger context.
858 * @discussion This function should never actually be called.
860 static void __attribute__((__noreturn__
))
861 _was_in_userspace(void)
863 panic("%s: should not have been invoked.", __FUNCTION__
);
867 * @function DebuggerXCallEnter
869 * @abstract IPI other cores so this core can run in a single-threaded context.
871 * @discussion This function should be called with the debugger lock held. It
872 * signals the other cores to go into a busy loop so this core can run in a
873 * single-threaded context and inspect kernel memory.
875 * @param proceed_on_sync_failure If true, then go ahead and try to debug even
876 * if we can't synch with the other cores. This is inherently unsafe and should
877 * only be used if the kernel is going down in flames anyway.
879 * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and
880 * proceed_on_sync_failure is false.
884 boolean_t proceed_on_sync_failure
)
886 uint64_t max_mabs_time
, current_mabs_time
;
889 cpu_data_t
*target_cpu_datap
;
890 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
892 /* Check for nested debugger entry. */
893 cpu_data_ptr
->debugger_active
++;
894 if (cpu_data_ptr
->debugger_active
!= 1) {
899 * If debugger_sync is not 0, someone responded excessively late to the last
900 * debug request (we zero the sync variable in the return function). Zero it
901 * again here. This should prevent us from getting out of sync (heh) and
902 * timing out on every entry to the debugger if we timeout once.
907 debug_cpus_spinning
= 0;
910 * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding
913 __builtin_arm_dmb(DMB_ISH
);
916 * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to
917 * synchronize with every CPU that we appeared to signal successfully (cpu_signal
918 * is not synchronous).
920 bool cpu_signal_failed
= false;
921 max_cpu
= ml_get_max_cpu_number();
923 boolean_t immediate_halt
= FALSE
;
924 if (proceed_on_sync_failure
&& force_immediate_debug_halt
) {
925 immediate_halt
= TRUE
;
928 if (!immediate_halt
) {
929 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
930 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
932 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
)) {
936 if (KERN_SUCCESS
== cpu_signal(target_cpu_datap
, SIGPdebug
, (void *)NULL
, NULL
)) {
937 os_atomic_inc(&debugger_sync
, relaxed
);
938 os_atomic_inc(&debug_cpus_spinning
, relaxed
);
940 cpu_signal_failed
= true;
941 kprintf("cpu_signal failed in DebuggerXCallEnter\n");
945 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT
, &max_mabs_time
);
946 current_mabs_time
= mach_absolute_time();
947 max_mabs_time
+= current_mabs_time
;
948 assert(max_mabs_time
> current_mabs_time
);
951 * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we
952 * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be
953 * uninterruptibly spinning on someone else. The best we can hope for is that
954 * all other CPUs have either responded or are spinning in a context that is
957 while ((debugger_sync
!= 0) && (current_mabs_time
< max_mabs_time
)) {
958 current_mabs_time
= mach_absolute_time();
962 if (cpu_signal_failed
&& !proceed_on_sync_failure
) {
963 DebuggerXCallReturn();
965 } else if (immediate_halt
|| (current_mabs_time
>= max_mabs_time
)) {
967 * For the moment, we're aiming for a timeout that the user shouldn't notice,
968 * but will be sufficient to let the other core respond.
970 __builtin_arm_dmb(DMB_ISH
);
971 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
972 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
974 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
)) {
977 if (!(target_cpu_datap
->cpu_signal
& SIGPdebug
) && !immediate_halt
) {
980 if (proceed_on_sync_failure
) {
981 paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu
);
982 dbgwrap_status_t halt_status
= ml_dbgwrap_halt_cpu(cpu
, 0);
983 if (halt_status
< 0) {
984 paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu
, halt_status
, ml_dbgwrap_strerror(halt_status
));
986 if (halt_status
> 0) {
987 paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu
, halt_status
, ml_dbgwrap_strerror(halt_status
));
989 target_cpu_datap
->halt_status
= CPU_HALTED
;
992 kprintf("Debugger synch pending on cpu %d\n", cpu
);
995 if (proceed_on_sync_failure
) {
996 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
997 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
999 if ((target_cpu_datap
== NULL
) || (target_cpu_datap
== cpu_data_ptr
) ||
1000 (target_cpu_datap
->halt_status
== CPU_NOT_HALTED
)) {
1003 dbgwrap_status_t halt_status
= ml_dbgwrap_halt_cpu_with_state(cpu
,
1004 NSEC_PER_SEC
, &target_cpu_datap
->halt_state
);
1005 if ((halt_status
< 0) || (halt_status
== DBGWRAP_WARN_CPU_OFFLINE
)) {
1006 paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu
, halt_status
, ml_dbgwrap_strerror(halt_status
));
1008 paniclog_append_noflush("cpu %d successfully halted\n", cpu
);
1009 target_cpu_datap
->halt_status
= CPU_HALTED_WITH_STATE
;
1012 if (immediate_halt
) {
1013 paniclog_append_noflush("Immediate halt requested on all cores\n");
1015 paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT
);
1017 debug_ack_timeout_count
++;
1018 return KERN_SUCCESS
;
1020 DebuggerXCallReturn();
1021 return KERN_OPERATION_TIMED_OUT
;
1024 return KERN_SUCCESS
;
1029 * @function DebuggerXCallReturn
1031 * @abstract Resume normal multicore operation after DebuggerXCallEnter()
1033 * @discussion This function should be called with debugger lock held.
1036 DebuggerXCallReturn(
1039 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
1040 uint64_t max_mabs_time
, current_mabs_time
;
1042 cpu_data_ptr
->debugger_active
--;
1043 if (cpu_data_ptr
->debugger_active
!= 0) {
1050 nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT
, &max_mabs_time
);
1051 current_mabs_time
= mach_absolute_time();
1052 max_mabs_time
+= current_mabs_time
;
1053 assert(max_mabs_time
> current_mabs_time
);
1056 * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall).
1057 * It's possible for one or more CPUs to not decrement debug_cpus_spinning,
1058 * since they may be stuck somewhere else with interrupts disabled.
1059 * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it.
1061 * Note that the same is done in DebuggerXCallEnter, when we wait for other
1062 * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be
1063 * spinning in a debugger-safe context
1065 while ((debug_cpus_spinning
!= 0) && (current_mabs_time
< max_mabs_time
)) {
1066 current_mabs_time
= mach_absolute_time();
1069 /* Do we need a barrier here? */
1070 __builtin_arm_dmb(DMB_ISH
);
1077 boolean_t save_context
= FALSE
;
1078 vm_offset_t kstackptr
= 0;
1079 arm_saved_state_t
*regs
= (arm_saved_state_t
*) ctx
;
1082 #if defined(__arm64__)
1083 save_context
= PSR64_IS_KERNEL(get_saved_state_cpsr(regs
));
1085 save_context
= PSR_IS_KERNEL(regs
->cpsr
);
1089 kstackptr
= current_thread()->machine
.kstackptr
;
1091 #if defined(__arm64__)
1092 arm_kernel_saved_state_t
*state
= (arm_kernel_saved_state_t
*)kstackptr
;
1095 /* Save the interrupted context before acknowledging the signal */
1096 current_thread()->machine
.kpcb
= regs
;
1098 /* zero old state so machine_trace_thread knows not to backtrace it */
1099 register_t pc
= (register_t
)ptrauth_strip((void *)&_was_in_userspace
, ptrauth_key_function_pointer
);
1106 arm_saved_state_t
*state
= (arm_saved_state_t
*)kstackptr
;
1109 /* Save the interrupted context before acknowledging the signal */
1110 copy_signed_thread_state(state
, regs
);
1112 /* zero old state so machine_trace_thread knows not to backtrace it */
1113 register_t pc
= (register_t
)ptrauth_strip((void *)&_was_in_userspace
, ptrauth_key_function_pointer
);
1114 set_saved_state_fp(state
, 0);
1115 set_saved_state_pc(state
, pc
);
1116 set_saved_state_lr(state
, 0);
1117 set_saved_state_sp(state
, 0);
1122 * When running in serial mode, the core capturing the dump may hold interrupts disabled
1123 * for a time longer than the timeout. That path includes logic to reset the timestamp
1124 * so that we do not eventually trigger the interrupt timeout assert().
1126 * Here we check whether other cores have already gone over the timeout at this point
1127 * before spinning, so we at least cover the IPI reception path. After spinning, however,
1128 * we reset the timestamp so as to avoid hitting the interrupt timeout assert().
1130 if ((serialmode
& SERIALMODE_OUTPUT
) || stackshot_active()) {
1131 INTERRUPT_MASKED_DEBUG_END();
1134 os_atomic_dec(&debugger_sync
, relaxed
);
1135 __builtin_arm_dmb(DMB_ISH
);
1136 while (mp_kdp_trap
) {
1141 * Alert the triggering CPU that this CPU is done spinning. The CPU that
1142 * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for
1143 * all of the CPUs to exit the above loop before continuing.
1145 os_atomic_dec(&debug_cpus_spinning
, relaxed
);
1147 if ((serialmode
& SERIALMODE_OUTPUT
) || stackshot_active()) {
1148 INTERRUPT_MASKED_DEBUG_START(current_thread()->machine
.int_handler_addr
, current_thread()->machine
.int_type
);
1151 #if defined(__arm64__)
1152 current_thread()->machine
.kpcb
= NULL
;
1153 #endif /* defined(__arm64__) */
1155 /* Any cleanup for our pushed context should go here */
1160 unsigned int reason
,
1164 #pragma unused(reason,ctx)
1165 #endif /* !MACH_KDP */
1167 #if ALTERNATE_DEBUGGER
1168 alternate_debugger_enter();
1172 kdp_trap(reason
, (struct arm_saved_state
*)ctx
);
1174 /* TODO: decide what to do if no debugger config */
1179 bootloader_valid_page(ppnum_t ppn
)
1181 return pmap_bootloader_page(ppn
);