]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2007-2020 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <debug.h> | |
30 | #include <mach_kdp.h> | |
31 | ||
32 | #include <kern/thread.h> | |
33 | #include <machine/pmap.h> | |
34 | #include <device/device_types.h> | |
35 | ||
36 | #include <mach/vm_param.h> | |
37 | #include <mach/clock_types.h> | |
38 | #include <mach/machine.h> | |
39 | #include <mach/kmod.h> | |
40 | #include <pexpert/boot.h> | |
41 | #include <pexpert/pexpert.h> | |
42 | ||
cb323159 | 43 | #include <ptrauth.h> |
d9a64523 | 44 | |
5ba3f43e A |
45 | #include <kern/misc_protos.h> |
46 | #include <kern/startup.h> | |
47 | #include <kern/clock.h> | |
48 | #include <kern/debug.h> | |
49 | #include <kern/processor.h> | |
50 | #include <kdp/kdp_core.h> | |
51 | #if ALTERNATE_DEBUGGER | |
52 | #include <arm64/alternate_debugger.h> | |
53 | #endif | |
54 | #include <machine/atomic.h> | |
55 | #include <machine/trap.h> | |
56 | #include <kern/spl.h> | |
57 | #include <pexpert/pexpert.h> | |
58 | #include <kdp/kdp_callout.h> | |
59 | #include <kdp/kdp_dyld.h> | |
60 | #include <kdp/kdp_internal.h> | |
61 | #include <uuid/uuid.h> | |
d9a64523 | 62 | #include <sys/codesign.h> |
5ba3f43e A |
63 | #include <sys/time.h> |
64 | ||
65 | #include <IOKit/IOPlatformExpert.h> | |
66 | ||
67 | #include <mach/vm_prot.h> | |
68 | #include <vm/vm_map.h> | |
69 | #include <vm/pmap.h> | |
70 | #include <vm/vm_shared_region.h> | |
71 | #include <mach/time_value.h> | |
0a7de745 | 72 | #include <machine/machparam.h> /* for btop */ |
5ba3f43e A |
73 | |
74 | #include <console/video_console.h> | |
f427ee49 | 75 | #include <console/serial_protos.h> |
5ba3f43e A |
76 | #include <arm/cpu_data.h> |
77 | #include <arm/cpu_data_internal.h> | |
78 | #include <arm/cpu_internal.h> | |
79 | #include <arm/misc_protos.h> | |
80 | #include <libkern/OSKextLibPrivate.h> | |
81 | #include <vm/vm_kern.h> | |
82 | #include <kern/kern_cdata.h> | |
83 | ||
84 | #if MACH_KDP | |
0a7de745 | 85 | void kdp_trap(unsigned int, struct arm_saved_state *); |
5ba3f43e A |
86 | #endif |
87 | ||
0a7de745 | 88 | extern kern_return_t do_stackshot(void *); |
f427ee49 A |
89 | extern void kdp_snapshot_preflight(int pid, void * tracebuf, |
90 | uint32_t tracebuf_size, uint64_t flags, | |
0a7de745 | 91 | kcdata_descriptor_t data_p, |
f427ee49 | 92 | uint64_t since_timestamp, uint32_t pagetable_mask); |
0a7de745 | 93 | extern int kdp_stack_snapshot_bytes_traced(void); |
f427ee49 A |
94 | extern int kdp_stack_snapshot_bytes_uncompressed(void); |
95 | ||
96 | #if INTERRUPT_MASKED_DEBUG | |
97 | extern boolean_t interrupt_masked_debug; | |
98 | #endif | |
5ba3f43e A |
99 | |
100 | /* | |
101 | * Increment the PANICLOG_VERSION if you change the format of the panic | |
102 | * log in any way. | |
103 | */ | |
0a7de745 | 104 | #define PANICLOG_VERSION 13 |
5ba3f43e A |
105 | static struct kcdata_descriptor kc_panic_data; |
106 | ||
107 | extern char firmware_version[]; | |
0a7de745 | 108 | extern volatile uint32_t debug_enabled; |
5ba3f43e A |
109 | extern unsigned int not_in_kdp; |
110 | ||
0a7de745 A |
111 | extern int copyinframe(vm_address_t fp, uint32_t * frame); |
112 | extern void kdp_callouts(kdp_event_t event); | |
5ba3f43e A |
113 | |
114 | /* #include <sys/proc.h> */ | |
115 | #define MAXCOMLEN 16 | |
f427ee49 A |
116 | struct proc; |
117 | extern int proc_pid(struct proc *p); | |
0a7de745 A |
118 | extern void proc_name_kdp(task_t, char *, int); |
119 | ||
120 | /* | |
121 | * Make sure there's enough space to include the relevant bits in the format required | |
122 | * within the space allocated for the panic version string in the panic header. | |
f427ee49 | 123 | * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'. |
0a7de745 A |
124 | */ |
125 | #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)" | |
126 | ||
f427ee49 A |
127 | extern const char version[]; |
128 | extern char osversion[]; | |
129 | extern char osproductversion[]; | |
130 | extern char osreleasetype[]; | |
0a7de745 A |
131 | |
132 | #if defined(XNU_TARGET_OS_BRIDGE) | |
133 | extern char macosproductversion[]; | |
134 | extern char macosversion[]; | |
135 | #endif | |
5ba3f43e | 136 | |
5ba3f43e A |
137 | extern uint8_t gPlatformECID[8]; |
138 | extern uint32_t gPlatformMemoryID; | |
139 | ||
0a7de745 | 140 | extern uint64_t last_hwaccess_thread; |
5ba3f43e | 141 | |
f427ee49 | 142 | /*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32 |
0a7de745 | 143 | * since the target name and model name typically doesn't exceed this size */ |
f427ee49 | 144 | extern char gTargetTypeBuffer[16]; |
5ba3f43e A |
145 | extern char gModelTypeBuffer[32]; |
146 | ||
cb323159 | 147 | decl_simple_lock_data(extern, clock_lock); |
0a7de745 A |
148 | extern struct timeval gIOLastSleepTime; |
149 | extern struct timeval gIOLastWakeTime; | |
150 | extern boolean_t is_clock_configured; | |
d9a64523 | 151 | extern boolean_t kernelcache_uuid_valid; |
5ba3f43e A |
152 | extern uuid_t kernelcache_uuid; |
153 | ||
f427ee49 A |
154 | extern void stackshot_memcpy(void *dst, const void *src, size_t len); |
155 | ||
5ba3f43e A |
156 | /* Definitions for frame pointers */ |
157 | #define FP_ALIGNMENT_MASK ((uint32_t)(0x3)) | |
158 | #define FP_LR_OFFSET ((uint32_t)4) | |
159 | #define FP_LR_OFFSET64 ((uint32_t)8) | |
160 | #define FP_MAX_NUM_TO_EVALUATE (50) | |
161 | ||
162 | /* Timeout (in nanoseconds) for all processors responding to debug crosscall */ | |
163 | #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000) | |
164 | ||
165 | /* Forward functions definitions */ | |
0a7de745 | 166 | void panic_display_times(void); |
5ba3f43e A |
167 | void panic_print_symbol_name(vm_address_t search); |
168 | ||
169 | ||
170 | /* Global variables */ | |
171 | static uint32_t panic_bt_depth; | |
172 | boolean_t PanicInfoSaved = FALSE; | |
173 | boolean_t force_immediate_debug_halt = FALSE; | |
174 | unsigned int debug_ack_timeout_count = 0; | |
175 | volatile unsigned int debugger_sync = 0; | |
176 | volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */ | |
f427ee49 | 177 | volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */ |
5ba3f43e | 178 | unsigned int DebugContextCount = 0; |
5c9f4661 A |
179 | |
180 | #if defined(__arm64__) | |
181 | uint8_t PE_smc_stashed_x86_system_state = 0xFF; | |
182 | uint8_t PE_smc_stashed_x86_power_state = 0xFF; | |
183 | uint8_t PE_smc_stashed_x86_efi_boot_state = 0xFF; | |
d9a64523 A |
184 | uint8_t PE_smc_stashed_x86_shutdown_cause = 0xFF; |
185 | uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX; | |
5c9f4661 A |
186 | uint32_t PE_pcie_stashed_link_state = UINT32_MAX; |
187 | #endif | |
188 | ||
0a7de745 A |
189 | |
190 | // Convenient macros to easily validate one or more pointers if | |
5ba3f43e A |
191 | // they have defined types |
192 | #define VALIDATE_PTR(ptr) \ | |
193 | validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr) | |
194 | ||
195 | #define VALIDATE_PTR_2(ptr0, ptr1) \ | |
0a7de745 A |
196 | VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1) |
197 | ||
5ba3f43e A |
198 | #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \ |
199 | VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2) | |
200 | ||
201 | #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \ | |
202 | VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3) | |
203 | ||
0a7de745 | 204 | #define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME |
5ba3f43e A |
205 | |
206 | #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__) | |
207 | ||
208 | /* | |
209 | * Evaluate if a pointer is valid | |
210 | * Print a message if pointer is invalid | |
211 | */ | |
0a7de745 A |
212 | static boolean_t |
213 | validate_ptr( | |
5ba3f43e A |
214 | vm_offset_t ptr, vm_size_t size, const char * ptr_name) |
215 | { | |
216 | if (ptr) { | |
217 | if (ml_validate_nofault(ptr, size)) { | |
218 | return TRUE; | |
219 | } else { | |
220 | paniclog_append_noflush("Invalid %s pointer: %p size: %d\n", | |
0a7de745 | 221 | ptr_name, (void *)ptr, (int)size); |
5ba3f43e A |
222 | return FALSE; |
223 | } | |
224 | } else { | |
225 | paniclog_append_noflush("NULL %s pointer\n", ptr_name); | |
226 | return FALSE; | |
227 | } | |
228 | } | |
229 | ||
230 | /* | |
231 | * Backtrace a single frame. | |
232 | */ | |
233 | static void | |
234 | print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, | |
f427ee49 | 235 | boolean_t is_64_bit, boolean_t print_kexts_in_backtrace) |
5ba3f43e | 236 | { |
0a7de745 A |
237 | int i = 0; |
238 | addr64_t lr; | |
239 | addr64_t fp; | |
240 | addr64_t fp_for_ppn; | |
241 | ppnum_t ppn; | |
242 | boolean_t dump_kernel_stack; | |
f427ee49 | 243 | vm_offset_t raddrs[FP_MAX_NUM_TO_EVALUATE]; |
5ba3f43e A |
244 | |
245 | fp = topfp; | |
246 | fp_for_ppn = 0; | |
247 | ppn = (ppnum_t)NULL; | |
248 | ||
0a7de745 | 249 | if (fp >= VM_MIN_KERNEL_ADDRESS) { |
5ba3f43e | 250 | dump_kernel_stack = TRUE; |
0a7de745 | 251 | } else { |
5ba3f43e | 252 | dump_kernel_stack = FALSE; |
0a7de745 | 253 | } |
5ba3f43e A |
254 | |
255 | do { | |
0a7de745 | 256 | if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) { |
5ba3f43e | 257 | break; |
0a7de745 A |
258 | } |
259 | if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) { | |
5ba3f43e | 260 | break; |
0a7de745 A |
261 | } |
262 | if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) { | |
5ba3f43e | 263 | break; |
0a7de745 A |
264 | } |
265 | ||
5ba3f43e A |
266 | /* |
267 | * Check to see if current address will result in a different | |
268 | * ppn than previously computed (to avoid recomputation) via | |
269 | * (addr) ^ fp_for_ppn) >> PAGE_SHIFT) | |
270 | */ | |
271 | if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) { | |
272 | ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET); | |
273 | fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET); | |
274 | } | |
275 | if (ppn != (ppnum_t)NULL) { | |
276 | if (is_64_bit) { | |
277 | lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK)); | |
cb323159 A |
278 | #if defined(HAS_APPLE_PAC) |
279 | /* return addresses on stack will be signed by arm64e ABI */ | |
280 | lr = (addr64_t) ptrauth_strip((void *)lr, ptrauth_key_return_address); | |
281 | #endif | |
5ba3f43e A |
282 | } else { |
283 | lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK)); | |
284 | } | |
285 | } else { | |
286 | if (is_64_bit) { | |
287 | paniclog_append_noflush("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64); | |
288 | } else { | |
289 | paniclog_append_noflush("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET)); | |
290 | } | |
291 | break; | |
292 | } | |
293 | if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) { | |
294 | ppn = pmap_find_phys(pmap, fp); | |
295 | fp_for_ppn = fp; | |
296 | } | |
297 | if (ppn != (ppnum_t)NULL) { | |
298 | if (is_64_bit) { | |
299 | fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK)); | |
300 | } else { | |
301 | fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK)); | |
302 | } | |
303 | } else { | |
304 | if (is_64_bit) { | |
305 | paniclog_append_noflush("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp); | |
306 | } else { | |
307 | paniclog_append_noflush("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp); | |
308 | } | |
309 | break; | |
310 | } | |
311 | ||
312 | if (lr) { | |
313 | if (is_64_bit) { | |
314 | paniclog_append_noflush("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp); | |
315 | } else { | |
316 | paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp); | |
317 | } | |
f427ee49 | 318 | raddrs[i] = lr; |
5ba3f43e A |
319 | } |
320 | } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp)); | |
f427ee49 A |
321 | |
322 | if (print_kexts_in_backtrace && i != 0) { | |
323 | kmod_panic_dump(&raddrs[0], i); | |
324 | } | |
5ba3f43e A |
325 | } |
326 | ||
327 | #define SANE_TASK_LIMIT 256 | |
328 | #define TOP_RUNNABLE_LIMIT 5 | |
329 | #define PANICLOG_UUID_BUF_SIZE 256 | |
330 | ||
331 | extern void panic_print_vnodes(void); | |
332 | ||
f427ee49 A |
333 | static void |
334 | panic_display_hung_cpus_help(void) | |
335 | { | |
336 | #if defined(__arm64__) | |
337 | const uint32_t pcsr_offset = 0x90; | |
338 | ||
339 | /* | |
340 | * Print some info that might help in cases where nothing | |
341 | * else does | |
342 | */ | |
343 | const ml_topology_info_t *info = ml_get_topology_info(); | |
344 | if (info) { | |
345 | unsigned i, retry; | |
346 | ||
347 | for (i = 0; i < info->num_cpus; i++) { | |
348 | if (info->cpus[i].cpu_UTTDBG_regs) { | |
349 | volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset); | |
350 | volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr; | |
351 | uint64_t pc = 0; | |
352 | ||
353 | // a number of retries are needed till this works | |
354 | for (retry = 1024; retry && !pc; retry--) { | |
355 | //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero | |
356 | (void)*pcsrTrigger; | |
357 | pc = *pcsr; | |
358 | } | |
359 | ||
360 | //postprocessing (same as astris does) | |
361 | if (pc >> 48) { | |
362 | pc |= 0xffff000000000000ull; | |
363 | } | |
364 | paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i, pc); | |
365 | } | |
366 | } | |
367 | } | |
368 | #endif //defined(__arm64__) | |
369 | } | |
370 | ||
5ba3f43e | 371 | static void |
cb323159 | 372 | do_print_all_backtraces(const char *message, uint64_t panic_options) |
5ba3f43e | 373 | { |
0a7de745 | 374 | int logversion = PANICLOG_VERSION; |
5ba3f43e | 375 | thread_t cur_thread = current_thread(); |
0a7de745 | 376 | uintptr_t cur_fp; |
5ba3f43e | 377 | task_t task; |
5ba3f43e | 378 | int print_vnodes = 0; |
0a7de745 | 379 | const char *nohilite_thread_marker = "\t"; |
5ba3f43e A |
380 | |
381 | /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */ | |
382 | int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200; | |
f427ee49 | 383 | int bytes_uncompressed = 0; |
5ba3f43e A |
384 | uint64_t bytes_used = 0ULL; |
385 | int err = 0; | |
386 | char *stackshot_begin_loc = NULL; | |
f427ee49 A |
387 | kc_format_t kc_format; |
388 | bool filesetKC = false; | |
5ba3f43e A |
389 | |
390 | #if defined(__arm__) | |
0a7de745 | 391 | __asm__ volatile ("mov %0, r7":"=r"(cur_fp)); |
5ba3f43e | 392 | #elif defined(__arm64__) |
0a7de745 | 393 | __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp)); |
5ba3f43e A |
394 | #else |
395 | #error Unknown architecture. | |
396 | #endif | |
0a7de745 | 397 | if (panic_bt_depth != 0) { |
5ba3f43e | 398 | return; |
0a7de745 | 399 | } |
5ba3f43e A |
400 | panic_bt_depth++; |
401 | ||
f427ee49 A |
402 | __unused bool result = PE_get_primary_kc_format(&kc_format); |
403 | assert(result == true); | |
404 | filesetKC = kc_format == KCFormatFileset; | |
405 | ||
cb323159 | 406 | /* Truncate panic string to 1200 bytes */ |
5ba3f43e A |
407 | paniclog_append_noflush("Debugger message: %.1200s\n", message); |
408 | if (debug_enabled) { | |
409 | paniclog_append_noflush("Device: %s\n", | |
0a7de745 | 410 | ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet"); |
5ba3f43e | 411 | paniclog_append_noflush("Hardware Model: %s\n", |
0a7de745 | 412 | ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet"); |
5ba3f43e | 413 | paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7], |
0a7de745 A |
414 | gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3], |
415 | gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]); | |
5ba3f43e A |
416 | if (last_hwaccess_thread) { |
417 | paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread); | |
418 | } | |
d9a64523 | 419 | paniclog_append_noflush("Boot args: %s\n", PE_boot_args()); |
5ba3f43e A |
420 | } |
421 | paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID); | |
f427ee49 A |
422 | paniclog_append_noflush("OS release type: %.256s\n", |
423 | ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet"); | |
5ba3f43e | 424 | paniclog_append_noflush("OS version: %.256s\n", |
0a7de745 A |
425 | ('\0' != osversion[0]) ? osversion : "Not set yet"); |
426 | #if defined(XNU_TARGET_OS_BRIDGE) | |
427 | paniclog_append_noflush("macOS version: %.256s\n", | |
428 | ('\0' != macosversion[0]) ? macosversion : "Not set"); | |
429 | #endif | |
5ba3f43e | 430 | paniclog_append_noflush("Kernel version: %.512s\n", version); |
d9a64523 A |
431 | |
432 | if (kernelcache_uuid_valid) { | |
f427ee49 A |
433 | if (filesetKC) { |
434 | paniclog_append_noflush("Fileset Kernelcache UUID: "); | |
435 | } else { | |
436 | paniclog_append_noflush("KernelCache UUID: "); | |
437 | } | |
0a7de745 | 438 | for (size_t index = 0; index < sizeof(uuid_t); index++) { |
d9a64523 A |
439 | paniclog_append_noflush("%02X", kernelcache_uuid[index]); |
440 | } | |
441 | paniclog_append_noflush("\n"); | |
5ba3f43e | 442 | } |
d9a64523 | 443 | panic_display_kernel_uuid(); |
5ba3f43e A |
444 | |
445 | paniclog_append_noflush("iBoot version: %.128s\n", firmware_version); | |
446 | paniclog_append_noflush("secure boot?: %s\n", debug_enabled ? "NO": "YES"); | |
5c9f4661 A |
447 | #if defined(XNU_TARGET_OS_BRIDGE) |
448 | paniclog_append_noflush("x86 EFI Boot State: "); | |
449 | if (PE_smc_stashed_x86_efi_boot_state != 0xFF) { | |
450 | paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_efi_boot_state); | |
451 | } else { | |
452 | paniclog_append_noflush("not available\n"); | |
453 | } | |
454 | paniclog_append_noflush("x86 System State: "); | |
455 | if (PE_smc_stashed_x86_system_state != 0xFF) { | |
456 | paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_system_state); | |
457 | } else { | |
458 | paniclog_append_noflush("not available\n"); | |
459 | } | |
460 | paniclog_append_noflush("x86 Power State: "); | |
461 | if (PE_smc_stashed_x86_power_state != 0xFF) { | |
462 | paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_power_state); | |
463 | } else { | |
464 | paniclog_append_noflush("not available\n"); | |
465 | } | |
d9a64523 A |
466 | paniclog_append_noflush("x86 Shutdown Cause: "); |
467 | if (PE_smc_stashed_x86_shutdown_cause != 0xFF) { | |
468 | paniclog_append_noflush("0x%x\n", PE_smc_stashed_x86_shutdown_cause); | |
469 | } else { | |
470 | paniclog_append_noflush("not available\n"); | |
471 | } | |
472 | paniclog_append_noflush("x86 Previous Power Transitions: "); | |
473 | if (PE_smc_stashed_x86_prev_power_transitions != UINT64_MAX) { | |
474 | paniclog_append_noflush("0x%llx\n", PE_smc_stashed_x86_prev_power_transitions); | |
475 | } else { | |
476 | paniclog_append_noflush("not available\n"); | |
477 | } | |
478 | paniclog_append_noflush("PCIeUp link state: "); | |
479 | if (PE_pcie_stashed_link_state != UINT32_MAX) { | |
480 | paniclog_append_noflush("0x%x\n", PE_pcie_stashed_link_state); | |
481 | } else { | |
482 | paniclog_append_noflush("not available\n"); | |
483 | } | |
5c9f4661 | 484 | #endif |
0a7de745 A |
485 | if (panic_data_buffers != NULL) { |
486 | paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name); | |
487 | uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf; | |
488 | for (int i = 0; i < panic_data_buffers->len; i++) { | |
489 | paniclog_append_noflush("%02X", panic_buffer_data[i]); | |
490 | } | |
491 | paniclog_append_noflush("\n"); | |
492 | } | |
5ba3f43e A |
493 | paniclog_append_noflush("Paniclog version: %d\n", logversion); |
494 | ||
495 | panic_display_kernel_aslr(); | |
496 | panic_display_times(); | |
497 | panic_display_zprint(); | |
f427ee49 | 498 | panic_display_hung_cpus_help(); |
5ba3f43e A |
499 | #if CONFIG_ZLEAKS |
500 | panic_display_ztrace(); | |
501 | #endif /* CONFIG_ZLEAKS */ | |
502 | #if CONFIG_ECC_LOGGING | |
503 | panic_display_ecc_errors(); | |
504 | #endif /* CONFIG_ECC_LOGGING */ | |
505 | ||
d9a64523 A |
506 | #if DEVELOPMENT || DEBUG |
507 | if (cs_debug_unsigned_exec_failures != 0 || cs_debug_unsigned_mmap_failures != 0) { | |
508 | paniclog_append_noflush("Unsigned code exec failures: %u\n", cs_debug_unsigned_exec_failures); | |
509 | paniclog_append_noflush("Unsigned code mmap failures: %u\n", cs_debug_unsigned_mmap_failures); | |
510 | } | |
511 | #endif | |
512 | ||
cb323159 A |
513 | // Highlight threads that used high amounts of CPU in the panic log if requested (historically requested for watchdog panics) |
514 | if (panic_options & DEBUGGER_OPTION_PRINT_CPU_USAGE_PANICLOG) { | |
0a7de745 A |
515 | thread_t top_runnable[5] = {0}; |
516 | thread_t thread; | |
517 | int total_cpu_usage = 0; | |
5ba3f43e A |
518 | |
519 | print_vnodes = 1; | |
520 | ||
0a7de745 | 521 | |
5ba3f43e | 522 | for (thread = (thread_t)queue_first(&threads); |
0a7de745 A |
523 | VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread); |
524 | thread = (thread_t)queue_next(&thread->threads)) { | |
5ba3f43e | 525 | total_cpu_usage += thread->cpu_usage; |
0a7de745 | 526 | |
5ba3f43e A |
527 | // Look for the 5 runnable threads with highest priority |
528 | if (thread->state & TH_RUN) { | |
0a7de745 A |
529 | int k; |
530 | thread_t comparison_thread = thread; | |
531 | ||
5ba3f43e A |
532 | for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) { |
533 | if (top_runnable[k] == 0) { | |
534 | top_runnable[k] = comparison_thread; | |
535 | break; | |
536 | } else if (comparison_thread->sched_pri > top_runnable[k]->sched_pri) { | |
537 | thread_t temp = top_runnable[k]; | |
538 | top_runnable[k] = comparison_thread; | |
539 | comparison_thread = temp; | |
540 | } // if comparison thread has higher priority than previously saved thread | |
541 | } // loop through highest priority runnable threads | |
542 | } // Check if thread is runnable | |
543 | } // Loop through all threads | |
0a7de745 | 544 | |
5ba3f43e A |
545 | // Print the relevant info for each thread identified |
546 | paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage); | |
547 | paniclog_append_noflush("Thread task pri cpu_usage\n"); | |
548 | ||
0a7de745 | 549 | for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) { |
5ba3f43e | 550 | if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) && |
0a7de745 | 551 | validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) { |
5ba3f43e A |
552 | char name[MAXCOMLEN + 1]; |
553 | proc_name_kdp(top_runnable[i]->task, name, sizeof(name)); | |
554 | paniclog_append_noflush("%p %s %d %d\n", | |
0a7de745 A |
555 | top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage); |
556 | } | |
5ba3f43e A |
557 | } // Loop through highest priority runnable threads |
558 | paniclog_append_noflush("\n"); | |
cb323159 | 559 | } |
5ba3f43e | 560 | |
0a7de745 | 561 | // print current task info |
5ba3f43e | 562 | if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) { |
5ba3f43e A |
563 | task = cur_thread->task; |
564 | ||
565 | if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) { | |
566 | paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ", | |
0a7de745 | 567 | task, task->map->pmap->stats.resident_count, task->thread_count); |
5ba3f43e A |
568 | } else { |
569 | paniclog_append_noflush("Panicked task %p: %d threads: ", | |
0a7de745 | 570 | task, task->thread_count); |
5ba3f43e A |
571 | } |
572 | ||
573 | if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) { | |
574 | char name[MAXCOMLEN + 1]; | |
575 | int pid = proc_pid(task->bsd_info); | |
576 | proc_name_kdp(task, name, sizeof(name)); | |
577 | paniclog_append_noflush("pid %d: %s", pid, name); | |
578 | } else { | |
579 | paniclog_append_noflush("unknown task"); | |
580 | } | |
581 | ||
582 | paniclog_append_noflush("\n"); | |
583 | } | |
584 | ||
585 | if (cur_fp < VM_MAX_KERNEL_ADDRESS) { | |
586 | paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n", | |
0a7de745 | 587 | cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread)); |
5ba3f43e | 588 | #if __LP64__ |
f427ee49 | 589 | print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC); |
5ba3f43e | 590 | #else |
f427ee49 | 591 | print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC); |
5ba3f43e A |
592 | #endif |
593 | } else { | |
594 | paniclog_append_noflush("Could not print panicked thread backtrace:" | |
0a7de745 | 595 | "frame pointer outside kernel vm.\n"); |
5ba3f43e A |
596 | } |
597 | ||
598 | paniclog_append_noflush("\n"); | |
f427ee49 A |
599 | if (filesetKC) { |
600 | kext_dump_panic_lists(&paniclog_append_noflush); | |
601 | paniclog_append_noflush("\n"); | |
602 | } | |
5ba3f43e | 603 | panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset; |
0a7de745 A |
604 | /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */ |
605 | if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) { | |
606 | snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR, | |
607 | osproductversion, osversion); | |
608 | } | |
609 | #if defined(XNU_TARGET_OS_BRIDGE) | |
610 | if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) { | |
611 | snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR, | |
612 | macosproductversion, macosversion); | |
613 | } | |
614 | #endif | |
5ba3f43e A |
615 | |
616 | if (debug_ack_timeout_count) { | |
617 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC; | |
618 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
619 | paniclog_append_noflush("!! debugger synchronization failed, no stackshot !!\n"); | |
620 | } else if (stackshot_active()) { | |
621 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_NESTED; | |
622 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
623 | paniclog_append_noflush("!! panicked during stackshot, skipping panic stackshot !!\n"); | |
624 | } else { | |
625 | /* Align the stackshot buffer to an 8-byte address (especially important for armv7k devices) */ | |
626 | debug_buf_ptr += (8 - ((uintptr_t)debug_buf_ptr % 8)); | |
627 | stackshot_begin_loc = debug_buf_ptr; | |
628 | ||
629 | bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base); | |
630 | err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr, | |
f427ee49 | 631 | KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes, |
0a7de745 | 632 | KCFLAG_USE_MEMCOPY); |
5ba3f43e | 633 | if (err == KERN_SUCCESS) { |
f427ee49 A |
634 | uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | |
635 | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS | | |
636 | STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ | | |
637 | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT); | |
638 | ||
639 | err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshot_memcpy, KCDCT_ZLIB); | |
640 | if (err != KERN_SUCCESS) { | |
641 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED; | |
642 | stackshot_flags &= ~STACKSHOT_DO_COMPRESS; | |
643 | } | |
644 | if (filesetKC) { | |
645 | stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO; | |
646 | } | |
647 | ||
5ba3f43e | 648 | kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes, |
f427ee49 | 649 | stackshot_flags, &kc_panic_data, 0, 0); |
5ba3f43e A |
650 | err = do_stackshot(NULL); |
651 | bytes_traced = kdp_stack_snapshot_bytes_traced(); | |
652 | if (bytes_traced > 0 && !err) { | |
653 | debug_buf_ptr += bytes_traced; | |
654 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED; | |
655 | panic_info->eph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc); | |
656 | panic_info->eph_stackshot_len = bytes_traced; | |
657 | ||
658 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
f427ee49 A |
659 | if (stackshot_flags & STACKSHOT_DO_COMPRESS) { |
660 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED; | |
661 | bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed(); | |
662 | paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed); | |
663 | } else { | |
664 | paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced); | |
665 | } | |
5ba3f43e A |
666 | } else { |
667 | bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data); | |
668 | if (bytes_used > 0) { | |
669 | /* Zero out the stackshot data */ | |
670 | bzero(stackshot_begin_loc, bytes_used); | |
671 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_INCOMPLETE; | |
672 | ||
673 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
674 | paniclog_append_noflush("\n** Stackshot Incomplete ** Bytes Filled %llu **\n", bytes_used); | |
675 | } else { | |
676 | bzero(stackshot_begin_loc, bytes_used); | |
677 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; | |
678 | ||
679 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
680 | paniclog_append_noflush("\n!! Stackshot Failed !! Bytes Traced %d, err %d\n", bytes_traced, err); | |
681 | } | |
682 | } | |
683 | } else { | |
684 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; | |
685 | panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); | |
686 | paniclog_append_noflush("\n!! Stackshot Failed !!\nkcdata_memory_static_init returned %d", err); | |
687 | } | |
688 | } | |
689 | ||
690 | assert(panic_info->eph_other_log_offset != 0); | |
691 | ||
0a7de745 | 692 | if (print_vnodes != 0) { |
5ba3f43e | 693 | panic_print_vnodes(); |
0a7de745 | 694 | } |
5ba3f43e A |
695 | |
696 | panic_bt_depth--; | |
697 | } | |
698 | ||
699 | /* | |
700 | * Entry to print_all_backtraces is serialized by the debugger lock | |
701 | */ | |
702 | static void | |
cb323159 | 703 | print_all_backtraces(const char *message, uint64_t panic_options) |
5ba3f43e A |
704 | { |
705 | unsigned int initial_not_in_kdp = not_in_kdp; | |
706 | ||
707 | cpu_data_t * cpu_data_ptr = getCpuDatap(); | |
708 | ||
709 | assert(cpu_data_ptr->PAB_active == FALSE); | |
710 | cpu_data_ptr->PAB_active = TRUE; | |
711 | ||
712 | /* | |
713 | * Because print all backtraces uses the pmap routines, it needs to | |
714 | * avoid taking pmap locks. Right now, this is conditionalized on | |
715 | * not_in_kdp. | |
716 | */ | |
717 | not_in_kdp = 0; | |
cb323159 | 718 | do_print_all_backtraces(message, panic_options); |
5ba3f43e A |
719 | |
720 | not_in_kdp = initial_not_in_kdp; | |
721 | ||
722 | cpu_data_ptr->PAB_active = FALSE; | |
723 | } | |
724 | ||
725 | void | |
726 | panic_display_times() | |
727 | { | |
728 | if (kdp_clock_is_locked()) { | |
729 | paniclog_append_noflush("Warning: clock is locked. Can't get time\n"); | |
730 | return; | |
731 | } | |
732 | ||
0a7de745 A |
733 | if ((is_clock_configured) && (simple_lock_try(&clock_lock, LCK_GRP_NULL))) { |
734 | clock_sec_t secs, boot_secs; | |
735 | clock_usec_t usecs, boot_usecs; | |
5ba3f43e A |
736 | |
737 | simple_unlock(&clock_lock); | |
738 | ||
739 | clock_get_calendar_microtime(&secs, &usecs); | |
740 | clock_get_boottime_microtime(&boot_secs, &boot_usecs); | |
741 | ||
0a7de745 | 742 | paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time()); |
5ba3f43e A |
743 | paniclog_append_noflush("Epoch Time: sec usec\n"); |
744 | paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs); | |
745 | paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec); | |
746 | paniclog_append_noflush(" Wake : 0x%08x 0x%08x\n", (unsigned int)gIOLastWakeTime.tv_sec, (unsigned int)gIOLastWakeTime.tv_usec); | |
747 | paniclog_append_noflush(" Calendar: 0x%08x 0x%08x\n\n", (unsigned int)secs, (unsigned int)usecs); | |
748 | } | |
749 | } | |
750 | ||
0a7de745 A |
751 | void |
752 | panic_print_symbol_name(vm_address_t search) | |
5ba3f43e A |
753 | { |
754 | #pragma unused(search) | |
755 | // empty stub. Really only used on x86_64. | |
756 | return; | |
757 | } | |
758 | ||
759 | void | |
760 | SavePanicInfo( | |
cb323159 | 761 | const char *message, __unused void *panic_data, uint64_t panic_options) |
5ba3f43e | 762 | { |
cb323159 A |
763 | /* |
764 | * This should be initialized by the time we get here, but | |
765 | * if it is not, asserting about it will be of no use (it will | |
766 | * come right back to here), so just loop right here and now. | |
767 | * This prevents early-boot panics from becoming recursive and | |
768 | * thus makes them easier to debug. If you attached to a device | |
769 | * and see your PC here, look down a few frames to see your | |
770 | * early-boot panic there. | |
771 | */ | |
772 | while (!panic_info || panic_info->eph_panic_log_offset == 0) { | |
773 | ; | |
774 | } | |
5ba3f43e A |
775 | |
776 | if (panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
777 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC; | |
778 | } | |
779 | ||
780 | if (panic_options & DEBUGGER_OPTION_COPROC_INITIATED_PANIC) { | |
781 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC; | |
782 | } | |
783 | ||
5c9f4661 A |
784 | #if defined(XNU_TARGET_OS_BRIDGE) |
785 | panic_info->eph_x86_power_state = PE_smc_stashed_x86_power_state; | |
786 | panic_info->eph_x86_efi_boot_state = PE_smc_stashed_x86_efi_boot_state; | |
787 | panic_info->eph_x86_system_state = PE_smc_stashed_x86_system_state; | |
788 | #endif | |
789 | ||
5ba3f43e A |
790 | /* |
791 | * On newer targets, panic data is stored directly into the iBoot panic region. | |
792 | * If we re-enter SavePanicInfo (e.g. on a double panic) on such a target, update the | |
793 | * panic CRC so that iBoot can hopefully find *something* useful in the panic region. | |
794 | */ | |
795 | if (PanicInfoSaved && (debug_buf_base >= (char*)gPanicBase) && (debug_buf_base < (char*)gPanicBase + gPanicSize)) { | |
796 | unsigned int pi_size = (unsigned int)(debug_buf_ptr - gPanicBase); | |
797 | PE_save_buffer_to_vram((unsigned char*)gPanicBase, &pi_size); | |
798 | PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here | |
799 | } | |
800 | ||
0a7de745 | 801 | if (PanicInfoSaved || (debug_buf_size == 0)) { |
5ba3f43e | 802 | return; |
0a7de745 | 803 | } |
5ba3f43e A |
804 | |
805 | PanicInfoSaved = TRUE; | |
806 | ||
cb323159 | 807 | print_all_backtraces(message, panic_options); |
5ba3f43e A |
808 | |
809 | assert(panic_info->eph_panic_log_len != 0); | |
810 | panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset; | |
811 | ||
812 | PEHaltRestart(kPEPanicSync); | |
813 | ||
814 | /* | |
815 | * Notifies registered IOPlatformPanicAction callbacks | |
816 | * (which includes one to disable the memcache) and flushes | |
817 | * the buffer contents from the cache | |
818 | */ | |
819 | paniclog_flush(); | |
820 | } | |
821 | ||
822 | void | |
823 | paniclog_flush() | |
824 | { | |
825 | unsigned int panicbuf_length = 0; | |
826 | ||
827 | panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase); | |
0a7de745 | 828 | if (!panicbuf_length) { |
5ba3f43e | 829 | return; |
0a7de745 | 830 | } |
5ba3f43e A |
831 | |
832 | /* | |
833 | * Updates the log length of the last part of the panic log. | |
834 | */ | |
835 | panic_info->eph_other_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_other_log_offset; | |
836 | ||
837 | /* | |
838 | * Updates the metadata at the beginning of the panic buffer, | |
839 | * updates the CRC. | |
840 | */ | |
841 | PE_save_buffer_to_vram((unsigned char *)gPanicBase, &panicbuf_length); | |
842 | ||
843 | /* | |
844 | * This is currently unused by platform KEXTs on embedded but is | |
845 | * kept for compatibility with the published IOKit interfaces. | |
846 | */ | |
847 | PESavePanicInfo((unsigned char *)gPanicBase, panicbuf_length); | |
848 | ||
849 | PE_sync_panic_buffers(); | |
850 | } | |
851 | ||
cb323159 A |
852 | /* |
853 | * @function _was_in_userspace | |
854 | * | |
855 | * @abstract Unused function used to indicate that a CPU was in userspace | |
856 | * before it was IPI'd to enter the Debugger context. | |
857 | * | |
858 | * @discussion This function should never actually be called. | |
859 | */ | |
860 | static void __attribute__((__noreturn__)) | |
861 | _was_in_userspace(void) | |
862 | { | |
863 | panic("%s: should not have been invoked.", __FUNCTION__); | |
864 | } | |
865 | ||
5ba3f43e A |
866 | /* |
867 | * @function DebuggerXCallEnter | |
868 | * | |
869 | * @abstract IPI other cores so this core can run in a single-threaded context. | |
870 | * | |
871 | * @discussion This function should be called with the debugger lock held. It | |
872 | * signals the other cores to go into a busy loop so this core can run in a | |
873 | * single-threaded context and inspect kernel memory. | |
874 | * | |
875 | * @param proceed_on_sync_failure If true, then go ahead and try to debug even | |
876 | * if we can't synch with the other cores. This is inherently unsafe and should | |
877 | * only be used if the kernel is going down in flames anyway. | |
878 | * | |
879 | * @result returns KERN_OPERATION_TIMED_OUT if synchronization times out and | |
880 | * proceed_on_sync_failure is false. | |
881 | */ | |
882 | kern_return_t | |
883 | DebuggerXCallEnter( | |
884 | boolean_t proceed_on_sync_failure) | |
885 | { | |
886 | uint64_t max_mabs_time, current_mabs_time; | |
887 | int cpu; | |
888 | int max_cpu; | |
0a7de745 A |
889 | cpu_data_t *target_cpu_datap; |
890 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
5ba3f43e A |
891 | |
892 | /* Check for nested debugger entry. */ | |
893 | cpu_data_ptr->debugger_active++; | |
0a7de745 | 894 | if (cpu_data_ptr->debugger_active != 1) { |
5ba3f43e | 895 | return KERN_SUCCESS; |
0a7de745 | 896 | } |
5ba3f43e A |
897 | |
898 | /* | |
899 | * If debugger_sync is not 0, someone responded excessively late to the last | |
900 | * debug request (we zero the sync variable in the return function). Zero it | |
901 | * again here. This should prevent us from getting out of sync (heh) and | |
902 | * timing out on every entry to the debugger if we timeout once. | |
903 | */ | |
904 | ||
905 | debugger_sync = 0; | |
906 | mp_kdp_trap = 1; | |
f427ee49 | 907 | debug_cpus_spinning = 0; |
5ba3f43e A |
908 | |
909 | /* | |
910 | * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding | |
911 | * to the signal. | |
912 | */ | |
913 | __builtin_arm_dmb(DMB_ISH); | |
914 | ||
915 | /* | |
916 | * Try to signal all CPUs (except ourselves, of course). Use debugger_sync to | |
917 | * synchronize with every CPU that we appeared to signal successfully (cpu_signal | |
918 | * is not synchronous). | |
919 | */ | |
920 | bool cpu_signal_failed = false; | |
921 | max_cpu = ml_get_max_cpu_number(); | |
922 | ||
923 | boolean_t immediate_halt = FALSE; | |
0a7de745 A |
924 | if (proceed_on_sync_failure && force_immediate_debug_halt) { |
925 | immediate_halt = TRUE; | |
926 | } | |
5ba3f43e A |
927 | |
928 | if (!immediate_halt) { | |
0a7de745 | 929 | for (cpu = 0; cpu <= max_cpu; cpu++) { |
5ba3f43e A |
930 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
931 | ||
0a7de745 | 932 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) { |
5ba3f43e | 933 | continue; |
0a7de745 | 934 | } |
5ba3f43e | 935 | |
0a7de745 | 936 | if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) { |
cb323159 | 937 | os_atomic_inc(&debugger_sync, relaxed); |
f427ee49 | 938 | os_atomic_inc(&debug_cpus_spinning, relaxed); |
5ba3f43e A |
939 | } else { |
940 | cpu_signal_failed = true; | |
941 | kprintf("cpu_signal failed in DebuggerXCallEnter\n"); | |
942 | } | |
943 | } | |
944 | ||
945 | nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time); | |
946 | current_mabs_time = mach_absolute_time(); | |
947 | max_mabs_time += current_mabs_time; | |
948 | assert(max_mabs_time > current_mabs_time); | |
949 | ||
950 | /* | |
951 | * Wait for DEBUG_ACK_TIMEOUT ns for a response from everyone we IPI'd. If we | |
952 | * timeout, that is simply too bad; we don't have a true NMI, and one CPU may be | |
953 | * uninterruptibly spinning on someone else. The best we can hope for is that | |
954 | * all other CPUs have either responded or are spinning in a context that is | |
955 | * debugger safe. | |
956 | */ | |
0a7de745 | 957 | while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) { |
5ba3f43e | 958 | current_mabs_time = mach_absolute_time(); |
0a7de745 | 959 | } |
5ba3f43e A |
960 | } |
961 | ||
962 | if (cpu_signal_failed && !proceed_on_sync_failure) { | |
963 | DebuggerXCallReturn(); | |
964 | return KERN_FAILURE; | |
965 | } else if (immediate_halt || (current_mabs_time >= max_mabs_time)) { | |
966 | /* | |
967 | * For the moment, we're aiming for a timeout that the user shouldn't notice, | |
968 | * but will be sufficient to let the other core respond. | |
969 | */ | |
970 | __builtin_arm_dmb(DMB_ISH); | |
0a7de745 | 971 | for (cpu = 0; cpu <= max_cpu; cpu++) { |
5ba3f43e A |
972 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
973 | ||
0a7de745 | 974 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) { |
5ba3f43e | 975 | continue; |
0a7de745 A |
976 | } |
977 | if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) { | |
5ba3f43e | 978 | continue; |
0a7de745 | 979 | } |
5ba3f43e A |
980 | if (proceed_on_sync_failure) { |
981 | paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu); | |
982 | dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0); | |
0a7de745 | 983 | if (halt_status < 0) { |
d9a64523 | 984 | paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
0a7de745 A |
985 | } else { |
986 | if (halt_status > 0) { | |
d9a64523 | 987 | paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
0a7de745 | 988 | } |
5ba3f43e A |
989 | target_cpu_datap->halt_status = CPU_HALTED; |
990 | } | |
0a7de745 | 991 | } else { |
5ba3f43e | 992 | kprintf("Debugger synch pending on cpu %d\n", cpu); |
0a7de745 | 993 | } |
5ba3f43e A |
994 | } |
995 | if (proceed_on_sync_failure) { | |
996 | for (cpu = 0; cpu <= max_cpu; cpu++) { | |
997 | target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; | |
998 | ||
999 | if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) || | |
0a7de745 | 1000 | (target_cpu_datap->halt_status == CPU_NOT_HALTED)) { |
5ba3f43e | 1001 | continue; |
0a7de745 | 1002 | } |
5ba3f43e A |
1003 | dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu, |
1004 | NSEC_PER_SEC, &target_cpu_datap->halt_state); | |
0a7de745 | 1005 | if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) { |
d9a64523 | 1006 | paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); |
0a7de745 | 1007 | } else { |
f427ee49 | 1008 | paniclog_append_noflush("cpu %d successfully halted\n", cpu); |
5ba3f43e | 1009 | target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE; |
0a7de745 | 1010 | } |
5ba3f43e | 1011 | } |
0a7de745 | 1012 | if (immediate_halt) { |
5ba3f43e | 1013 | paniclog_append_noflush("Immediate halt requested on all cores\n"); |
0a7de745 | 1014 | } else { |
5ba3f43e | 1015 | paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT); |
0a7de745 | 1016 | } |
5ba3f43e A |
1017 | debug_ack_timeout_count++; |
1018 | return KERN_SUCCESS; | |
1019 | } else { | |
1020 | DebuggerXCallReturn(); | |
1021 | return KERN_OPERATION_TIMED_OUT; | |
1022 | } | |
1023 | } else { | |
1024 | return KERN_SUCCESS; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * @function DebuggerXCallReturn | |
1030 | * | |
1031 | * @abstract Resume normal multicore operation after DebuggerXCallEnter() | |
1032 | * | |
1033 | * @discussion This function should be called with debugger lock held. | |
1034 | */ | |
1035 | void | |
1036 | DebuggerXCallReturn( | |
1037 | void) | |
1038 | { | |
0a7de745 | 1039 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
f427ee49 | 1040 | uint64_t max_mabs_time, current_mabs_time; |
5ba3f43e A |
1041 | |
1042 | cpu_data_ptr->debugger_active--; | |
0a7de745 | 1043 | if (cpu_data_ptr->debugger_active != 0) { |
5ba3f43e | 1044 | return; |
0a7de745 | 1045 | } |
5ba3f43e A |
1046 | |
1047 | mp_kdp_trap = 0; | |
1048 | debugger_sync = 0; | |
1049 | ||
f427ee49 A |
1050 | nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time); |
1051 | current_mabs_time = mach_absolute_time(); | |
1052 | max_mabs_time += current_mabs_time; | |
1053 | assert(max_mabs_time > current_mabs_time); | |
1054 | ||
1055 | /* | |
1056 | * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall). | |
1057 | * It's possible for one or more CPUs to not decrement debug_cpus_spinning, | |
1058 | * since they may be stuck somewhere else with interrupts disabled. | |
1059 | * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it. | |
1060 | * | |
1061 | * Note that the same is done in DebuggerXCallEnter, when we wait for other | |
1062 | * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be | |
1063 | * spinning in a debugger-safe context | |
1064 | */ | |
1065 | while ((debug_cpus_spinning != 0) && (current_mabs_time < max_mabs_time)) { | |
1066 | current_mabs_time = mach_absolute_time(); | |
1067 | } | |
1068 | ||
5ba3f43e A |
1069 | /* Do we need a barrier here? */ |
1070 | __builtin_arm_dmb(DMB_ISH); | |
1071 | } | |
1072 | ||
1073 | void | |
1074 | DebuggerXCall( | |
0a7de745 | 1075 | void *ctx) |
5ba3f43e | 1076 | { |
0a7de745 A |
1077 | boolean_t save_context = FALSE; |
1078 | vm_offset_t kstackptr = 0; | |
1079 | arm_saved_state_t *regs = (arm_saved_state_t *) ctx; | |
5ba3f43e A |
1080 | |
1081 | if (regs != NULL) { | |
1082 | #if defined(__arm64__) | |
1083 | save_context = PSR64_IS_KERNEL(get_saved_state_cpsr(regs)); | |
1084 | #else | |
1085 | save_context = PSR_IS_KERNEL(regs->cpsr); | |
1086 | #endif | |
1087 | } | |
1088 | ||
1089 | kstackptr = current_thread()->machine.kstackptr; | |
f427ee49 A |
1090 | |
1091 | #if defined(__arm64__) | |
1092 | arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr; | |
1093 | ||
1094 | if (save_context) { | |
1095 | /* Save the interrupted context before acknowledging the signal */ | |
1096 | current_thread()->machine.kpcb = regs; | |
1097 | } else if (regs) { | |
1098 | /* zero old state so machine_trace_thread knows not to backtrace it */ | |
1099 | register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer); | |
1100 | state->fp = 0; | |
1101 | state->pc = pc; | |
1102 | state->lr = 0; | |
1103 | state->sp = 0; | |
1104 | } | |
1105 | #else | |
5ba3f43e A |
1106 | arm_saved_state_t *state = (arm_saved_state_t *)kstackptr; |
1107 | ||
1108 | if (save_context) { | |
1109 | /* Save the interrupted context before acknowledging the signal */ | |
cb323159 | 1110 | copy_signed_thread_state(state, regs); |
5ba3f43e A |
1111 | } else if (regs) { |
1112 | /* zero old state so machine_trace_thread knows not to backtrace it */ | |
f427ee49 | 1113 | register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer); |
5ba3f43e | 1114 | set_saved_state_fp(state, 0); |
f427ee49 | 1115 | set_saved_state_pc(state, pc); |
5ba3f43e A |
1116 | set_saved_state_lr(state, 0); |
1117 | set_saved_state_sp(state, 0); | |
1118 | } | |
f427ee49 A |
1119 | #endif |
1120 | ||
1121 | /* | |
1122 | * When running in serial mode, the core capturing the dump may hold interrupts disabled | |
1123 | * for a time longer than the timeout. That path includes logic to reset the timestamp | |
1124 | * so that we do not eventually trigger the interrupt timeout assert(). | |
1125 | * | |
1126 | * Here we check whether other cores have already gone over the timeout at this point | |
1127 | * before spinning, so we at least cover the IPI reception path. After spinning, however, | |
1128 | * we reset the timestamp so as to avoid hitting the interrupt timeout assert(). | |
1129 | */ | |
1130 | if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) { | |
1131 | INTERRUPT_MASKED_DEBUG_END(); | |
1132 | } | |
5ba3f43e | 1133 | |
cb323159 | 1134 | os_atomic_dec(&debugger_sync, relaxed); |
5ba3f43e | 1135 | __builtin_arm_dmb(DMB_ISH); |
0a7de745 A |
1136 | while (mp_kdp_trap) { |
1137 | ; | |
1138 | } | |
5ba3f43e | 1139 | |
f427ee49 A |
1140 | /** |
1141 | * Alert the triggering CPU that this CPU is done spinning. The CPU that | |
1142 | * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for | |
1143 | * all of the CPUs to exit the above loop before continuing. | |
1144 | */ | |
1145 | os_atomic_dec(&debug_cpus_spinning, relaxed); | |
1146 | ||
1147 | if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) { | |
1148 | INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type); | |
1149 | } | |
1150 | ||
c3c9b80d A |
1151 | #if defined(__arm64__) |
1152 | current_thread()->machine.kpcb = NULL; | |
1153 | #endif /* defined(__arm64__) */ | |
1154 | ||
5ba3f43e A |
1155 | /* Any cleanup for our pushed context should go here */ |
1156 | } | |
1157 | ||
5ba3f43e A |
1158 | void |
1159 | DebuggerCall( | |
0a7de745 A |
1160 | unsigned int reason, |
1161 | void *ctx) | |
5ba3f43e | 1162 | { |
0a7de745 | 1163 | #if !MACH_KDP |
5ba3f43e A |
1164 | #pragma unused(reason,ctx) |
1165 | #endif /* !MACH_KDP */ | |
1166 | ||
1167 | #if ALTERNATE_DEBUGGER | |
1168 | alternate_debugger_enter(); | |
1169 | #endif | |
1170 | ||
0a7de745 | 1171 | #if MACH_KDP |
5ba3f43e A |
1172 | kdp_trap(reason, (struct arm_saved_state *)ctx); |
1173 | #else | |
1174 | /* TODO: decide what to do if no debugger config */ | |
1175 | #endif | |
1176 | } | |
4ba76501 A |
1177 | |
1178 | boolean_t | |
1179 | bootloader_valid_page(ppnum_t ppn) | |
1180 | { | |
1181 | return pmap_bootloader_page(ppn); | |
1182 | } |