]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <mach_assert.h> | |
1c79356b | 58 | #include <mach_kdp.h> |
5ba3f43e A |
59 | #include <kdp/kdp.h> |
60 | #include <kdp/kdp_core.h> | |
61 | #include <kdp/kdp_internal.h> | |
62 | #include <kdp/kdp_callout.h> | |
1c79356b | 63 | #include <kern/cpu_number.h> |
b0d623f7 | 64 | #include <kern/kalloc.h> |
f427ee49 | 65 | #include <kern/percpu.h> |
1c79356b A |
66 | #include <kern/spl.h> |
67 | #include <kern/thread.h> | |
68 | #include <kern/assert.h> | |
69 | #include <kern/sched_prim.h> | |
70 | #include <kern/misc_protos.h> | |
c910b4d9 | 71 | #include <kern/clock.h> |
39236c6e | 72 | #include <kern/telemetry.h> |
fe8ab488 | 73 | #include <kern/ecc.h> |
3e170ce0 | 74 | #include <kern/kern_cdata.h> |
f427ee49 | 75 | #include <kern/zalloc_internal.h> |
9bccf70c | 76 | #include <vm/vm_kern.h> |
cb323159 | 77 | #include <vm/vm_map.h> |
91447636 | 78 | #include <vm/pmap.h> |
1c79356b | 79 | #include <stdarg.h> |
5ba3f43e A |
80 | #include <stdatomic.h> |
81 | #include <sys/pgo.h> | |
82 | #include <console/serial_protos.h> | |
83 | ||
fe8ab488 | 84 | #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) |
2d21ac55 A |
85 | #include <kdp/kdp_udp.h> |
86 | #endif | |
5ba3f43e | 87 | #include <kern/processor.h> |
1c79356b | 88 | |
b0d623f7 | 89 | #if defined(__i386__) || defined(__x86_64__) |
cb323159 A |
90 | #include <IOKit/IOBSD.h> |
91 | ||
2d21ac55 A |
92 | #include <i386/cpu_threads.h> |
93 | #include <i386/pmCPU.h> | |
94 | #endif | |
95 | ||
96 | #include <IOKit/IOPlatformExpert.h> | |
6d2010ae | 97 | #include <machine/pal_routines.h> |
2d21ac55 | 98 | |
b0d623f7 A |
99 | #include <sys/kdebug.h> |
100 | #include <libkern/OSKextLibPrivate.h> | |
6d2010ae A |
101 | #include <libkern/OSAtomic.h> |
102 | #include <libkern/kernel_mach_header.h> | |
cc8bc92a | 103 | #include <libkern/section_keywords.h> |
6d2010ae | 104 | #include <uuid/uuid.h> |
3e170ce0 | 105 | #include <mach_debug/zone_info.h> |
f427ee49 | 106 | #include <mach/resource_monitors.h> |
b0d623f7 | 107 | |
39037602 A |
108 | #include <os/log_private.h> |
109 | ||
f427ee49 | 110 | #if defined(__arm__) || defined(__arm64__) |
fe8ab488 | 111 | #include <pexpert/pexpert.h> /* For gPanicBase */ |
5ba3f43e A |
112 | #include <arm/caches_internal.h> |
113 | #include <arm/misc_protos.h> | |
114 | extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info; | |
fe8ab488 A |
115 | #endif |
116 | ||
d9a64523 A |
117 | #if CONFIG_XNUPOST |
118 | #include <tests/xnupost.h> | |
119 | extern int vsnprintf(char *, size_t, const char *, va_list); | |
120 | #endif | |
39037602 | 121 | |
f427ee49 A |
122 | #if CONFIG_CSR |
123 | #include <sys/csr.h> | |
124 | #endif | |
125 | ||
126 | extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize ); | |
127 | ||
0a7de745 A |
128 | unsigned int halt_in_debugger = 0; |
129 | unsigned int current_debugger = 0; | |
130 | unsigned int active_debugger = 0; | |
131 | unsigned int panicDebugging = FALSE; | |
0a7de745 | 132 | unsigned int kernel_debugger_entry_count = 0; |
5ba3f43e | 133 | |
f427ee49 | 134 | #if defined(__arm__) || defined(__arm64__) |
0a7de745 A |
135 | struct additional_panic_data_buffer *panic_data_buffers = NULL; |
136 | #endif | |
5ba3f43e A |
137 | |
138 | #if defined(__arm__) | |
139 | #define TRAP_DEBUGGER __asm__ volatile("trap") | |
140 | #elif defined(__arm64__) | |
141 | /* | |
142 | * Magic number; this should be identical to the __arm__ encoding for trap. | |
143 | */ | |
144 | #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff") | |
145 | #elif defined (__x86_64__) | |
146 | #define TRAP_DEBUGGER __asm__("int3") | |
147 | #else | |
148 | #error No TRAP_DEBUGGER for this architecture | |
149 | #endif | |
150 | ||
151 | #if defined(__i386__) || defined(__x86_64__) | |
0a7de745 | 152 | #define panic_stop() pmCPUHalt(PM_HALT_PANIC) |
5ba3f43e | 153 | #else |
0a7de745 | 154 | #define panic_stop() panic_spin_forever() |
5ba3f43e A |
155 | #endif |
156 | ||
f427ee49 A |
157 | struct debugger_state { |
158 | uint64_t db_panic_options; | |
159 | debugger_op db_current_op; | |
160 | boolean_t db_proceed_on_sync_failure; | |
161 | const char *db_message; | |
162 | const char *db_panic_str; | |
163 | va_list *db_panic_args; | |
164 | void *db_panic_data_ptr; | |
165 | unsigned long db_panic_caller; | |
166 | /* incremented whenever we panic or call Debugger (current CPU panic level) */ | |
167 | uint32_t db_entry_count; | |
168 | kern_return_t db_op_return; | |
169 | }; | |
170 | static struct debugger_state PERCPU_DATA(debugger_state); | |
171 | ||
172 | /* __pure2 is correct if this function is called with preemption disabled */ | |
173 | static inline __pure2 struct debugger_state * | |
174 | current_debugger_state(void) | |
175 | { | |
176 | return PERCPU_GET(debugger_state); | |
177 | } | |
178 | ||
179 | #define CPUDEBUGGEROP current_debugger_state()->db_current_op | |
180 | #define CPUDEBUGGERMSG current_debugger_state()->db_message | |
181 | #define CPUPANICSTR current_debugger_state()->db_panic_str | |
182 | #define CPUPANICARGS current_debugger_state()->db_panic_args | |
183 | #define CPUPANICOPTS current_debugger_state()->db_panic_options | |
184 | #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr | |
185 | #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure | |
186 | #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count | |
187 | #define CPUDEBUGGERRET current_debugger_state()->db_op_return | |
188 | #define CPUPANICCALLER current_debugger_state()->db_panic_caller | |
5ba3f43e A |
189 | |
190 | #if DEVELOPMENT || DEBUG | |
f427ee49 A |
191 | #define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ |
192 | MACRO_BEGIN \ | |
193 | if (requested) { \ | |
194 | volatile int *badpointer = (int *)4; \ | |
195 | *badpointer = 0; \ | |
196 | } \ | |
5ba3f43e A |
197 | MACRO_END |
198 | #endif /* DEVELOPMENT || DEBUG */ | |
199 | ||
200 | debugger_op debugger_current_op = DBOP_NONE; | |
201 | const char *debugger_panic_str = NULL; | |
202 | va_list *debugger_panic_args = NULL; | |
d9a64523 | 203 | void *debugger_panic_data = NULL; |
5ba3f43e A |
204 | uint64_t debugger_panic_options = 0; |
205 | const char *debugger_message = NULL; | |
206 | unsigned long debugger_panic_caller = 0; | |
207 | ||
cb323159 A |
208 | void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, |
209 | unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data, | |
210 | unsigned long panic_caller) __dead2; | |
211 | static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags); | |
212 | void panic_spin_forever(void) __dead2; | |
5ba3f43e | 213 | extern kern_return_t do_stackshot(void); |
cb323159 | 214 | extern void PE_panic_hook(const char*); |
1c79356b | 215 | |
5ba3f43e | 216 | #define NESTEDDEBUGGERENTRYMAX 5 |
cb323159 | 217 | static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; |
1c79356b | 218 | |
f427ee49 | 219 | #if defined(__arm__) || defined(__arm64__) |
5ba3f43e | 220 | #define DEBUG_BUF_SIZE (4096) |
c910b4d9 | 221 | |
f427ee49 | 222 | /* debug_buf is directly linked with iBoot panic region for arm targets */ |
5ba3f43e | 223 | char *debug_buf_base = NULL; |
fe8ab488 A |
224 | char *debug_buf_ptr = NULL; |
225 | unsigned int debug_buf_size = 0; | |
f427ee49 A |
226 | |
227 | SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE; | |
228 | #else /* defined(__arm__) || defined(__arm64__) */ | |
229 | #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data)) | |
230 | /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */ | |
231 | static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements"); | |
232 | ||
c910b4d9 | 233 | char debug_buf[DEBUG_BUF_SIZE]; |
5ba3f43e A |
234 | struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf; |
235 | char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
236 | char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
237 | ||
238 | /* | |
239 | * We don't include the size of the panic header in the length of the data we actually write. | |
240 | * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of | |
241 | * the end of the log because we only support writing (3*PAGESIZE) bytes. | |
242 | */ | |
cc8bc92a A |
243 | unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); |
244 | ||
245 | boolean_t extended_debug_log_enabled = FALSE; | |
f427ee49 A |
246 | #endif /* defined(__arm__) || defined(__arm64__) */ |
247 | ||
248 | #if defined(XNU_TARGET_OS_OSX) | |
249 | #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace" | |
250 | #else | |
251 | #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace" | |
fe8ab488 | 252 | #endif |
2d21ac55 | 253 | |
5ba3f43e A |
254 | /* Debugger state */ |
255 | atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU); | |
256 | boolean_t debugger_allcpus_halted = FALSE; | |
257 | boolean_t debugger_safe_to_return = TRUE; | |
258 | unsigned int debugger_context = 0; | |
39037602 | 259 | |
2d21ac55 | 260 | static char model_name[64]; |
39236c6e | 261 | unsigned char *kernel_uuid; |
2d21ac55 | 262 | |
d9a64523 A |
263 | boolean_t kernelcache_uuid_valid = FALSE; |
264 | uuid_t kernelcache_uuid; | |
265 | uuid_string_t kernelcache_uuid_string; | |
266 | ||
f427ee49 A |
267 | boolean_t pageablekc_uuid_valid = FALSE; |
268 | uuid_t pageablekc_uuid; | |
269 | uuid_string_t pageablekc_uuid_string; | |
270 | ||
271 | boolean_t auxkc_uuid_valid = FALSE; | |
272 | uuid_t auxkc_uuid; | |
273 | uuid_string_t auxkc_uuid_string; | |
274 | ||
5ba3f43e A |
275 | /* |
276 | * By default we treat Debugger() the same as calls to panic(), unless | |
277 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
278 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
279 | * | |
280 | * Return from Debugger() is currently only implemented on x86 | |
281 | */ | |
282 | static boolean_t debugger_is_panic = TRUE; | |
316670eb | 283 | |
f427ee49 | 284 | TUNABLE(unsigned int, debug_boot_arg, "debug", 0); |
2d21ac55 | 285 | |
5ba3f43e | 286 | char kernel_uuid_string[37]; /* uuid_string_t */ |
d9a64523 | 287 | char kernelcache_uuid_string[37]; /* uuid_string_t */ |
5ba3f43e A |
288 | char panic_disk_error_description[512]; |
289 | size_t panic_disk_error_description_size = sizeof(panic_disk_error_description); | |
9bccf70c | 290 | |
5ba3f43e | 291 | extern unsigned int write_trace_on_panic; |
39037602 A |
292 | int kext_assertions_enable = |
293 | #if DEBUG || DEVELOPMENT | |
0a7de745 | 294 | TRUE; |
39037602 | 295 | #else |
0a7de745 | 296 | FALSE; |
39037602 A |
297 | #endif |
298 | ||
cb323159 A |
299 | /* |
300 | * Maintain the physically-contiguous carveout for the `phys_carveout_mb` | |
301 | * boot-arg. | |
302 | */ | |
303 | SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0; | |
304 | SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0; | |
305 | SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0; | |
306 | ||
f427ee49 A |
307 | boolean_t |
308 | kernel_debugging_allowed(void) | |
309 | { | |
310 | #if XNU_TARGET_OS_OSX | |
311 | #if CONFIG_CSR | |
312 | if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) { | |
313 | return FALSE; | |
314 | } | |
315 | #endif /* CONFIG_CSR */ | |
316 | return TRUE; | |
317 | #else /* XNU_TARGET_OS_OSX */ | |
318 | return PE_i_can_has_debugger(NULL); | |
319 | #endif /* XNU_TARGET_OS_OSX */ | |
320 | } | |
321 | ||
322 | __startup_func | |
323 | static void | |
1c79356b A |
324 | panic_init(void) |
325 | { | |
6d2010ae A |
326 | unsigned long uuidlen = 0; |
327 | void *uuid; | |
328 | ||
329 | uuid = getuuidfromheader(&_mh_execute_header, &uuidlen); | |
330 | if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) { | |
39236c6e A |
331 | kernel_uuid = uuid; |
332 | uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string); | |
6d2010ae A |
333 | } |
334 | ||
cc8bc92a | 335 | /* |
f427ee49 | 336 | * Take the value of the debug boot-arg into account |
cc8bc92a | 337 | */ |
f427ee49 A |
338 | #if MACH_KDP |
339 | if (kernel_debugging_allowed() && debug_boot_arg) { | |
cc8bc92a | 340 | if (debug_boot_arg & DB_HALT) { |
0a7de745 | 341 | halt_in_debugger = 1; |
cc8bc92a | 342 | } |
5ba3f43e | 343 | |
f427ee49 | 344 | #if defined(__arm__) || defined(__arm64__) |
cc8bc92a A |
345 | if (debug_boot_arg & DB_NMI) { |
346 | panicDebugging = TRUE; | |
347 | } | |
348 | #else | |
349 | panicDebugging = TRUE; | |
f427ee49 | 350 | #endif /* defined(__arm__) || defined(__arm64__) */ |
cc8bc92a | 351 | } |
cb323159 A |
352 | |
353 | if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) { | |
354 | max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; | |
355 | } | |
356 | ||
f427ee49 A |
357 | #if defined(__arm__) || defined(__arm64__) |
358 | char kdpname[80]; | |
cc8bc92a | 359 | |
f427ee49 A |
360 | kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)); |
361 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
362 | ||
363 | #endif /* MACH_KDP */ | |
cc8bc92a | 364 | |
f427ee49 | 365 | #if defined (__x86_64__) |
cc8bc92a A |
366 | /* |
367 | * By default we treat Debugger() the same as calls to panic(), unless | |
368 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
369 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
370 | * This is because writing an on-device corefile is a destructive operation. | |
371 | * | |
372 | * Return from Debugger() is currently only implemented on x86 | |
373 | */ | |
374 | if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) { | |
5ba3f43e A |
375 | debugger_is_panic = FALSE; |
376 | } | |
377 | #endif | |
1c79356b | 378 | } |
f427ee49 | 379 | STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init); |
1c79356b | 380 | |
cc8bc92a A |
381 | #if defined (__x86_64__) |
382 | void | |
383 | extended_debug_log_init(void) | |
384 | { | |
385 | assert(coprocessor_paniclog_flush); | |
386 | /* | |
387 | * Allocate an extended panic log buffer that has space for the panic | |
388 | * stackshot at the end. Update the debug buf pointers appropriately | |
389 | * to point at this new buffer. | |
f427ee49 | 390 | * |
5c9f4661 A |
391 | * iBoot pre-initializes the panic region with the NULL character. We set this here |
392 | * so we can accurately calculate the CRC for the region without needing to flush the | |
393 | * full region over SMC. | |
394 | */ | |
f427ee49 | 395 | char *new_debug_buf = kalloc_flags(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO); |
cc8bc92a A |
396 | |
397 | panic_info = (struct macos_panic_header *)new_debug_buf; | |
398 | debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
399 | debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); | |
400 | ||
401 | extended_debug_log_enabled = TRUE; | |
cb323159 A |
402 | |
403 | /* | |
404 | * Insert a compiler barrier so we don't free the other panic stackshot buffer | |
405 | * until after we've marked the new one as available | |
406 | */ | |
407 | __compiler_barrier(); | |
408 | kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len); | |
409 | panic_stackshot_buf = 0; | |
410 | panic_stackshot_buf_len = 0; | |
cc8bc92a A |
411 | } |
412 | #endif /* defined (__x86_64__) */ | |
413 | ||
2d21ac55 A |
414 | void |
415 | debug_log_init(void) | |
416 | { | |
f427ee49 | 417 | #if defined(__arm__) || defined(__arm64__) |
fe8ab488 A |
418 | if (!gPanicBase) { |
419 | printf("debug_log_init: Error!! gPanicBase is still not initialized\n"); | |
420 | return; | |
421 | } | |
5ba3f43e A |
422 | /* Shift debug buf start location and size by the length of the panic header */ |
423 | debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header); | |
424 | debug_buf_ptr = debug_buf_base; | |
425 | debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header); | |
fe8ab488 | 426 | #else |
cb323159 | 427 | kern_return_t kr = KERN_SUCCESS; |
cc8bc92a A |
428 | bzero(panic_info, DEBUG_BUF_SIZE); |
429 | ||
5ba3f43e A |
430 | assert(debug_buf_base != NULL); |
431 | assert(debug_buf_ptr != NULL); | |
432 | assert(debug_buf_size != 0); | |
cb323159 A |
433 | |
434 | /* | |
435 | * We allocate a buffer to store a panic time stackshot. If we later discover that this is a | |
436 | * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory | |
437 | * as it's not necessary on this platform. This information won't be available until the IOPlatform has come | |
438 | * up. | |
439 | */ | |
440 | kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG); | |
441 | assert(kr == KERN_SUCCESS); | |
442 | if (kr == KERN_SUCCESS) { | |
443 | panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE; | |
444 | } | |
fe8ab488 | 445 | #endif |
2d21ac55 A |
446 | } |
447 | ||
cb323159 A |
448 | void |
449 | phys_carveout_init(void) | |
450 | { | |
451 | if (!PE_i_can_has_debugger(NULL)) { | |
452 | return; | |
453 | } | |
454 | ||
455 | unsigned int phys_carveout_mb = 0; | |
456 | ||
457 | if (!PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb, | |
458 | sizeof(phys_carveout_mb))) { | |
459 | return; | |
460 | } | |
461 | if (phys_carveout_mb == 0) { | |
462 | return; | |
463 | } | |
464 | ||
465 | size_t size = 0; | |
466 | if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &size)) { | |
467 | printf("phys_carveout_mb size overflowed (%uMB)\n", | |
468 | phys_carveout_mb); | |
469 | return; | |
470 | } | |
471 | ||
472 | kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, size, | |
473 | VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT, | |
474 | VM_KERN_MEMORY_DIAG); | |
475 | if (kr != KERN_SUCCESS) { | |
476 | printf("failed to allocate %uMB for phys_carveout_mb: %u\n", | |
477 | phys_carveout_mb, (unsigned int)kr); | |
478 | return; | |
479 | } | |
480 | ||
481 | phys_carveout_pa = kvtophys(phys_carveout); | |
482 | phys_carveout_size = size; | |
483 | } | |
484 | ||
5ba3f43e | 485 | static void |
f427ee49 | 486 | DebuggerLock(void) |
5ba3f43e A |
487 | { |
488 | int my_cpu = cpu_number(); | |
489 | int debugger_exp_cpu = DEBUGGER_NO_CPU; | |
490 | assert(ml_get_interrupts_enabled() == FALSE); | |
491 | ||
cb323159 | 492 | if (atomic_load(&debugger_cpu) == my_cpu) { |
5ba3f43e A |
493 | return; |
494 | } | |
495 | ||
0a7de745 | 496 | while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) { |
5ba3f43e A |
497 | debugger_exp_cpu = DEBUGGER_NO_CPU; |
498 | } | |
499 | ||
500 | return; | |
501 | } | |
502 | ||
503 | static void | |
f427ee49 | 504 | DebuggerUnlock(void) |
5ba3f43e | 505 | { |
cb323159 | 506 | assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number()); |
5ba3f43e A |
507 | |
508 | /* | |
509 | * We don't do an atomic exchange here in case | |
510 | * there's another CPU spinning to acquire the debugger_lock | |
511 | * and we never get a chance to update it. We already have the | |
512 | * lock so we can simply store DEBUGGER_NO_CPU and follow with | |
513 | * a barrier. | |
514 | */ | |
cb323159 | 515 | atomic_store(&debugger_cpu, DEBUGGER_NO_CPU); |
5ba3f43e A |
516 | OSMemoryBarrier(); |
517 | ||
518 | return; | |
519 | } | |
520 | ||
521 | static kern_return_t | |
522 | DebuggerHaltOtherCores(boolean_t proceed_on_failure) | |
523 | { | |
f427ee49 | 524 | #if defined(__arm__) || defined(__arm64__) |
5ba3f43e | 525 | return DebuggerXCallEnter(proceed_on_failure); |
f427ee49 | 526 | #else /* defined(__arm__) || defined(__arm64__) */ |
5ba3f43e A |
527 | #pragma unused(proceed_on_failure) |
528 | mp_kdp_enter(proceed_on_failure); | |
529 | return KERN_SUCCESS; | |
2d21ac55 | 530 | #endif |
5ba3f43e A |
531 | } |
532 | ||
533 | static void | |
f427ee49 | 534 | DebuggerResumeOtherCores(void) |
5ba3f43e | 535 | { |
f427ee49 | 536 | #if defined(__arm__) || defined(__arm64__) |
5ba3f43e | 537 | DebuggerXCallReturn(); |
f427ee49 | 538 | #else /* defined(__arm__) || defined(__arm64__) */ |
5ba3f43e A |
539 | mp_kdp_exit(); |
540 | #endif | |
541 | } | |
542 | ||
543 | static void | |
544 | DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
0a7de745 A |
545 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, |
546 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
5ba3f43e A |
547 | { |
548 | CPUDEBUGGEROP = db_op; | |
549 | ||
550 | /* Preserve the original panic message */ | |
551 | if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) { | |
552 | CPUDEBUGGERMSG = db_message; | |
553 | CPUPANICSTR = db_panic_str; | |
554 | CPUPANICARGS = db_panic_args; | |
d9a64523 | 555 | CPUPANICDATAPTR = db_panic_data_ptr; |
5ba3f43e A |
556 | CPUPANICCALLER = db_panic_caller; |
557 | } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) { | |
cc8bc92a | 558 | kprintf("Nested panic detected:"); |
0a7de745 | 559 | if (db_panic_str != NULL) { |
cc8bc92a | 560 | _doprnt(db_panic_str, db_panic_args, PE_kputc, 0); |
0a7de745 | 561 | } |
5ba3f43e A |
562 | } |
563 | ||
564 | CPUDEBUGGERSYNC = db_proceed_on_sync_failure; | |
565 | CPUDEBUGGERRET = KERN_SUCCESS; | |
566 | ||
567 | /* Reset these on any nested panics */ | |
568 | CPUPANICOPTS = db_panic_options; | |
569 | ||
570 | return; | |
571 | } | |
2d21ac55 | 572 | |
b0d623f7 | 573 | /* |
f427ee49 A |
574 | * Save the requested debugger state/action into the current processor's |
575 | * percu state and trap to the debugger. | |
b0d623f7 | 576 | */ |
5ba3f43e A |
577 | kern_return_t |
578 | DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
0a7de745 A |
579 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, |
580 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
5ba3f43e A |
581 | { |
582 | kern_return_t ret; | |
583 | ||
584 | assert(ml_get_interrupts_enabled() == FALSE); | |
d9a64523 | 585 | DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args, |
0a7de745 A |
586 | db_panic_options, db_panic_data_ptr, |
587 | db_proceed_on_sync_failure, db_panic_caller); | |
5ba3f43e | 588 | |
f427ee49 A |
589 | /* |
590 | * On ARM this generates an uncategorized exception -> sleh code -> | |
591 | * DebuggerCall -> kdp_trap -> handle_debugger_trap | |
592 | * So that is how XNU ensures that only one core can panic. | |
593 | * The rest of the cores are halted by IPI if possible; if that | |
594 | * fails it will fall back to dbgwrap. | |
595 | */ | |
5ba3f43e A |
596 | TRAP_DEBUGGER; |
597 | ||
598 | ret = CPUDEBUGGERRET; | |
b0d623f7 | 599 | |
d9a64523 | 600 | DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0); |
5ba3f43e A |
601 | |
602 | return ret; | |
603 | } | |
604 | ||
605 | void __attribute__((noinline)) | |
606 | Assert( | |
0a7de745 A |
607 | const char *file, |
608 | int line, | |
609 | const char *expression | |
610 | ) | |
b0d623f7 | 611 | { |
cb323159 | 612 | #if CONFIG_NONFATAL_ASSERTS |
f427ee49 A |
613 | static TUNABLE(bool, mach_assert, "assertions", true); |
614 | ||
5ba3f43e A |
615 | if (!mach_assert) { |
616 | kprintf("%s:%d non-fatal Assertion: %s", file, line, expression); | |
617 | return; | |
618 | } | |
cb323159 | 619 | #endif |
5ba3f43e A |
620 | |
621 | panic_plain("%s:%d Assertion failed: %s", file, line, expression); | |
b0d623f7 A |
622 | } |
623 | ||
f427ee49 A |
624 | boolean_t |
625 | debug_is_current_cpu_in_panic_state(void) | |
626 | { | |
627 | return current_debugger_state()->db_entry_count > 0; | |
628 | } | |
fe8ab488 | 629 | |
5ba3f43e A |
630 | void |
631 | Debugger(const char *message) | |
1c79356b | 632 | { |
5ba3f43e A |
633 | DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE); |
634 | } | |
1c79356b | 635 | |
5ba3f43e A |
636 | void |
637 | DebuggerWithContext(unsigned int reason, void *ctx, const char *message, | |
0a7de745 | 638 | uint64_t debugger_options_mask) |
5ba3f43e A |
639 | { |
640 | spl_t previous_interrupts_state; | |
641 | boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; | |
7ddcb079 | 642 | |
5ba3f43e | 643 | previous_interrupts_state = ml_set_interrupts_enabled(FALSE); |
7ddcb079 A |
644 | disable_preemption(); |
645 | ||
5ba3f43e | 646 | CPUDEBUGGERCOUNT++; |
b0d623f7 | 647 | |
cb323159 | 648 | if (CPUDEBUGGERCOUNT > max_debugger_entry_count) { |
5ba3f43e | 649 | static boolean_t in_panic_kprintf = FALSE; |
2d21ac55 | 650 | |
5ba3f43e | 651 | /* Notify any listeners that we've started a panic */ |
f427ee49 A |
652 | uint32_t panic_details = 0; |
653 | if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
654 | panic_details |= kPanicDetailsForcePowerOff; | |
655 | } | |
656 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
91447636 | 657 | |
5ba3f43e A |
658 | if (!in_panic_kprintf) { |
659 | in_panic_kprintf = TRUE; | |
660 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
cb323159 | 661 | max_debugger_entry_count); |
5ba3f43e A |
662 | in_panic_kprintf = FALSE; |
663 | } | |
316670eb | 664 | |
5ba3f43e | 665 | if (!panicDebugging) { |
cb323159 | 666 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask); |
1c79356b | 667 | } |
5ba3f43e A |
668 | |
669 | panic_spin_forever(); | |
1c79356b | 670 | } |
1c79356b | 671 | |
f427ee49 A |
672 | /* Handle any necessary platform specific actions before we proceed */ |
673 | PEInitiatePanic(); | |
674 | ||
5ba3f43e A |
675 | #if DEVELOPMENT || DEBUG |
676 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
677 | #endif | |
678 | ||
679 | doprnt_hide_pointers = FALSE; | |
680 | ||
681 | if (ctx != NULL) { | |
682 | DebuggerSaveState(DBOP_DEBUGGER, message, | |
0a7de745 | 683 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); |
5ba3f43e A |
684 | handle_debugger_trap(reason, 0, 0, ctx); |
685 | DebuggerSaveState(DBOP_NONE, NULL, NULL, | |
0a7de745 | 686 | NULL, 0, NULL, FALSE, 0); |
5ba3f43e A |
687 | } else { |
688 | DebuggerTrapWithState(DBOP_DEBUGGER, message, | |
0a7de745 | 689 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); |
5ba3f43e | 690 | } |
39037602 | 691 | |
5ba3f43e A |
692 | CPUDEBUGGERCOUNT--; |
693 | doprnt_hide_pointers = old_doprnt_hide_pointers; | |
694 | enable_preemption(); | |
695 | ml_set_interrupts_enabled(previous_interrupts_state); | |
696 | } | |
697 | ||
698 | static struct kdp_callout { | |
699 | struct kdp_callout * callout_next; | |
700 | kdp_callout_fn_t callout_fn; | |
701 | boolean_t callout_in_progress; | |
702 | void * callout_arg; | |
703 | } * kdp_callout_list = NULL; | |
704 | ||
705 | /* | |
706 | * Called from kernel context to register a kdp event callout. | |
707 | */ | |
708 | void | |
709 | kdp_register_callout(kdp_callout_fn_t fn, void * arg) | |
710 | { | |
711 | struct kdp_callout * kcp; | |
712 | struct kdp_callout * list_head; | |
713 | ||
714 | kcp = kalloc(sizeof(*kcp)); | |
0a7de745 | 715 | if (kcp == NULL) { |
5ba3f43e | 716 | panic("kdp_register_callout() kalloc failed"); |
0a7de745 | 717 | } |
5ba3f43e A |
718 | |
719 | kcp->callout_fn = fn; | |
720 | kcp->callout_arg = arg; | |
721 | kcp->callout_in_progress = FALSE; | |
722 | ||
723 | /* Lock-less list insertion using compare and exchange. */ | |
724 | do { | |
725 | list_head = kdp_callout_list; | |
726 | kcp->callout_next = list_head; | |
727 | } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list)); | |
316670eb | 728 | } |
1c79356b | 729 | |
39037602 | 730 | static void |
5ba3f43e | 731 | kdp_callouts(kdp_event_t event) |
316670eb | 732 | { |
0a7de745 | 733 | struct kdp_callout *kcp = kdp_callout_list; |
2d21ac55 | 734 | |
5ba3f43e A |
735 | while (kcp) { |
736 | if (!kcp->callout_in_progress) { | |
737 | kcp->callout_in_progress = TRUE; | |
738 | kcp->callout_fn(kcp->callout_arg, event); | |
739 | kcp->callout_in_progress = FALSE; | |
740 | } | |
741 | kcp = kcp->callout_next; | |
55e303ae | 742 | } |
1c79356b A |
743 | } |
744 | ||
f427ee49 | 745 | #if defined(__arm__) || defined(__arm64__) |
0a7de745 A |
746 | /* |
747 | * Register an additional buffer with data to include in the panic log | |
748 | * | |
749 | * <rdar://problem/50137705> tracks supporting more than one buffer | |
750 | * | |
751 | * Note that producer_name and buf should never be de-allocated as we reference these during panic. | |
752 | */ | |
753 | void | |
754 | register_additional_panic_data_buffer(const char *producer_name, void *buf, int len) | |
755 | { | |
756 | if (panic_data_buffers != NULL) { | |
757 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
758 | } | |
759 | ||
760 | if (producer_name == NULL || (strlen(producer_name) == 0)) { | |
761 | panic("register_additional_panic_data_buffer called with invalid producer_name"); | |
762 | } | |
763 | ||
764 | if (buf == NULL) { | |
765 | panic("register_additional_panic_data_buffer called with invalid buffer pointer"); | |
766 | } | |
767 | ||
768 | if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) { | |
769 | panic("register_additional_panic_data_buffer called with invalid length"); | |
770 | } | |
771 | ||
772 | struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer)); | |
773 | new_panic_data_buffer->producer_name = producer_name; | |
774 | new_panic_data_buffer->buf = buf; | |
775 | new_panic_data_buffer->len = len; | |
776 | ||
777 | if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) { | |
778 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
779 | } | |
780 | ||
781 | return; | |
782 | } | |
f427ee49 | 783 | #endif /* defined(__arm__) || defined(__arm64__) */ |
0a7de745 | 784 | |
5ba3f43e A |
785 | /* |
786 | * An overview of the xnu panic path: | |
787 | * | |
788 | * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger(). | |
f427ee49 | 789 | * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior |
5ba3f43e A |
790 | * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap() |
791 | * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu. | |
792 | * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and | |
793 | * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed | |
794 | * according to the device's boot-args. | |
795 | */ | |
796 | #undef panic | |
316670eb A |
797 | void |
798 | panic(const char *str, ...) | |
799 | { | |
5ba3f43e A |
800 | va_list panic_str_args; |
801 | ||
802 | va_start(panic_str_args, str); | |
d9a64523 | 803 | panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0)); |
5ba3f43e A |
804 | va_end(panic_str_args); |
805 | } | |
806 | ||
807 | void | |
808 | panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...) | |
809 | { | |
810 | va_list panic_str_args; | |
811 | ||
812 | va_start(panic_str_args, str); | |
d9a64523 | 813 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK), |
0a7de745 | 814 | NULL, (unsigned long)(char *)__builtin_return_address(0)); |
5ba3f43e A |
815 | va_end(panic_str_args); |
816 | } | |
817 | ||
d9a64523 A |
818 | #if defined (__x86_64__) |
819 | /* | |
820 | * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog. | |
821 | * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot | |
822 | * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the | |
823 | * thread when writing the panic log. | |
824 | * | |
825 | * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread. | |
826 | */ | |
5ba3f43e | 827 | void |
d9a64523 | 828 | panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...) |
5ba3f43e A |
829 | { |
830 | va_list panic_str_args; | |
cb323159 | 831 | __assert_only os_ref_count_t th_ref_count; |
d9a64523 A |
832 | |
833 | assert_thread_magic(thread); | |
cb323159 A |
834 | th_ref_count = os_ref_get_count(&thread->ref_count); |
835 | assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count); | |
d9a64523 A |
836 | |
837 | /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */ | |
838 | thread_reference(thread); | |
5ba3f43e A |
839 | |
840 | va_start(panic_str_args, str); | |
d9a64523 | 841 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE), |
0a7de745 | 842 | thread, (unsigned long)(char *)__builtin_return_address(0)); |
d9a64523 | 843 | |
5ba3f43e A |
844 | va_end(panic_str_args); |
845 | } | |
d9a64523 | 846 | #endif /* defined (__x86_64__) */ |
5ba3f43e A |
847 | |
848 | #pragma clang diagnostic push | |
849 | #pragma clang diagnostic ignored "-Wmissing-noreturn" | |
850 | void | |
d9a64523 | 851 | panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, |
0a7de745 | 852 | uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller) |
5ba3f43e A |
853 | { |
854 | #pragma clang diagnostic pop | |
855 | ||
0a7de745 A |
856 | #if defined(__x86_64__) && (DEVELOPMENT || DEBUG) |
857 | /* Turn off I/O tracing once we've panicked */ | |
858 | mmiotrace_enabled = 0; | |
859 | #endif | |
860 | ||
f427ee49 | 861 | ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); |
5ba3f43e A |
862 | |
863 | CPUDEBUGGERCOUNT++; | |
864 | ||
cb323159 | 865 | if (CPUDEBUGGERCOUNT > max_debugger_entry_count) { |
5ba3f43e A |
866 | static boolean_t in_panic_kprintf = FALSE; |
867 | ||
868 | /* Notify any listeners that we've started a panic */ | |
f427ee49 A |
869 | uint32_t panic_details = 0; |
870 | if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
871 | panic_details |= kPanicDetailsForcePowerOff; | |
872 | } | |
873 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
5ba3f43e A |
874 | |
875 | if (!in_panic_kprintf) { | |
876 | in_panic_kprintf = TRUE; | |
877 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
cb323159 | 878 | max_debugger_entry_count); |
5ba3f43e A |
879 | in_panic_kprintf = FALSE; |
880 | } | |
881 | ||
882 | if (!panicDebugging) { | |
cb323159 | 883 | kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask); |
5ba3f43e A |
884 | } |
885 | ||
886 | panic_spin_forever(); | |
887 | } | |
888 | ||
f427ee49 A |
889 | /* Handle any necessary platform specific actions before we proceed */ |
890 | PEInitiatePanic(); | |
891 | ||
5ba3f43e A |
892 | #if DEVELOPMENT || DEBUG |
893 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
894 | #endif | |
895 | ||
cb323159 | 896 | PE_panic_hook(panic_format_str); |
316670eb | 897 | |
39037602 A |
898 | #if defined (__x86_64__) |
899 | plctrace_disable(); | |
900 | #endif | |
39037602 | 901 | |
5ba3f43e A |
902 | if (write_trace_on_panic && kdebug_enable) { |
903 | if (get_preemption_level() == 0 && !ml_at_interrupt_context()) { | |
904 | ml_set_interrupts_enabled(TRUE); | |
a39ff7e2 | 905 | KDBG_RELEASE(TRACE_PANIC); |
5ba3f43e A |
906 | kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME); |
907 | } | |
908 | } | |
909 | ||
910 | ml_set_interrupts_enabled(FALSE); | |
911 | disable_preemption(); | |
39037602 | 912 | |
5ba3f43e A |
913 | #if defined (__x86_64__) |
914 | pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE); | |
915 | #endif /* defined (__x86_64__) */ | |
3e170ce0 A |
916 | |
917 | /* Never hide pointers from panic logs. */ | |
918 | doprnt_hide_pointers = FALSE; | |
919 | ||
5ba3f43e A |
920 | if (ctx != NULL) { |
921 | /* | |
922 | * We called into panic from a trap, no need to trap again. Set the | |
923 | * state on the current CPU and then jump to handle_debugger_trap. | |
924 | */ | |
925 | DebuggerSaveState(DBOP_PANIC, "panic", | |
0a7de745 A |
926 | panic_format_str, panic_args, |
927 | panic_options_mask, panic_data_ptr, TRUE, panic_caller); | |
5ba3f43e | 928 | handle_debugger_trap(reason, 0, 0, ctx); |
316670eb | 929 | } |
316670eb | 930 | |
5ba3f43e | 931 | #if defined(__arm64__) |
316670eb | 932 | /* |
5ba3f43e | 933 | * Signal to fastsim that it should open debug ports (nop on hardware) |
316670eb | 934 | */ |
0a7de745 | 935 | __asm__ volatile ("HINT 0x45"); |
5ba3f43e | 936 | #endif /* defined(__arm64__) */ |
3e170ce0 | 937 | |
5ba3f43e | 938 | DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str, |
0a7de745 | 939 | panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller); |
3e170ce0 | 940 | |
5ba3f43e A |
941 | /* |
942 | * Not reached. | |
943 | */ | |
944 | panic_stop(); | |
cb323159 | 945 | __builtin_unreachable(); |
316670eb A |
946 | } |
947 | ||
39037602 | 948 | void |
cb323159 | 949 | panic_spin_forever(void) |
39037602 | 950 | { |
5ba3f43e A |
951 | paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n"); |
952 | ||
0a7de745 A |
953 | for (;;) { |
954 | } | |
5ba3f43e A |
955 | } |
956 | ||
957 | static void | |
cb323159 | 958 | kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags) |
5ba3f43e | 959 | { |
f427ee49 | 960 | printf("Attempting system restart...\n"); |
cb323159 | 961 | if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) { |
f427ee49 | 962 | PEHaltRestart(kPEPanicRestartCPUNoCallouts); |
cb323159 A |
963 | } else { |
964 | PEHaltRestart(type); | |
965 | } | |
5ba3f43e A |
966 | halt_all_cpus(TRUE); |
967 | } | |
968 | ||
969 | void | |
970 | kdp_machine_reboot(void) | |
971 | { | |
cb323159 | 972 | kdp_machine_reboot_type(kPEPanicRestartCPU, 0); |
5ba3f43e A |
973 | } |
974 | ||
975 | /* | |
976 | * Gather and save diagnostic information about a panic (or Debugger call). | |
977 | * | |
978 | * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can | |
979 | * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no | |
980 | * paniclog is written and no core is written unless we request a core on NMI. | |
981 | * | |
982 | * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured), | |
983 | * and calling out to any other functions we have for collecting diagnostic info. | |
984 | */ | |
985 | static void | |
986 | debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state) | |
987 | { | |
988 | #if DEVELOPMENT || DEBUG | |
989 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG)); | |
990 | #endif | |
991 | ||
992 | #if defined(__x86_64__) | |
cc8bc92a | 993 | kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); |
5ba3f43e A |
994 | #endif |
995 | /* | |
996 | * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate | |
997 | * a coredump/paniclog for this type of debugger entry. If KDP isn't configured, | |
998 | * we'll just spin in kdp_raise_exception. | |
999 | */ | |
1000 | if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) { | |
1001 | kdp_raise_exception(exception, code, subcode, state); | |
1002 | if (debugger_safe_to_return && !debugger_is_panic) { | |
1003 | return; | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | if ((debugger_current_op == DBOP_PANIC) || | |
0a7de745 | 1008 | ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { |
5ba3f43e A |
1009 | /* |
1010 | * Attempt to notify listeners once and only once that we've started | |
1011 | * panicking. Only do this for Debugger() calls if we're treating | |
1012 | * Debugger() calls like panic(). | |
1013 | */ | |
f427ee49 A |
1014 | uint32_t panic_details = 0; |
1015 | if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
1016 | panic_details |= kPanicDetailsForcePowerOff; | |
1017 | } | |
1018 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
5ba3f43e A |
1019 | |
1020 | /* | |
1021 | * Set the begin pointer in the panic log structure. We key off of this | |
1022 | * static variable rather than contents from the panic header itself in case someone | |
1023 | * has stomped over the panic_info structure. Also initializes the header magic. | |
1024 | */ | |
1025 | static boolean_t began_writing_paniclog = FALSE; | |
1026 | if (!began_writing_paniclog) { | |
1027 | PE_init_panicheader(); | |
1028 | began_writing_paniclog = TRUE; | |
1029 | } else { | |
1030 | /* | |
1031 | * If we reached here, update the panic header to keep it as consistent | |
1032 | * as possible during a nested panic | |
1033 | */ | |
1034 | PE_update_panicheader_nestedpanic(); | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Write panic string if this was a panic. | |
1040 | * | |
1041 | * TODO: Consider moving to SavePanicInfo as this is part of the panic log. | |
1042 | */ | |
1043 | if (debugger_current_op == DBOP_PANIC) { | |
1044 | paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller); | |
1045 | if (debugger_panic_str) { | |
1046 | _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0); | |
1047 | } | |
1048 | paniclog_append_noflush("\n"); | |
1049 | } | |
cc8bc92a A |
1050 | #if defined(__x86_64__) |
1051 | else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
1052 | paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); | |
1053 | } | |
5ba3f43e A |
1054 | |
1055 | /* | |
1056 | * Debugger() is treated like panic() on embedded -- for example we use it for WDT | |
1057 | * panics (so we need to write a paniclog). On desktop Debugger() is used in the | |
1058 | * conventional sense. | |
1059 | */ | |
cc8bc92a | 1060 | if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) |
c3c9b80d | 1061 | #endif /* __x86_64__ */ |
5ba3f43e A |
1062 | { |
1063 | kdp_callouts(KDP_EVENT_PANICLOG); | |
39037602 | 1064 | |
5ba3f43e A |
1065 | /* |
1066 | * Write paniclog and panic stackshot (if supported) | |
1067 | * TODO: Need to clear panic log when return from debugger | |
1068 | * hooked up for embedded | |
1069 | */ | |
d9a64523 | 1070 | SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options); |
39037602 | 1071 | |
5ba3f43e A |
1072 | #if DEVELOPMENT || DEBUG |
1073 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG)); | |
1074 | #endif | |
39037602 | 1075 | |
5ba3f43e | 1076 | /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */ |
0a7de745 | 1077 | if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { |
c3c9b80d | 1078 | PEHaltRestart(kPEPanicDiagnosticsDone); |
cb323159 | 1079 | PEHaltRestart(kPEPanicRestartCPUNoCallouts); |
0a7de745 | 1080 | } |
39037602 | 1081 | } |
5ba3f43e A |
1082 | |
1083 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING | |
39037602 | 1084 | /* |
5ba3f43e A |
1085 | * If reboot on panic is enabled and the caller of panic indicated that we should skip |
1086 | * local coredumps, don't try to write these and instead go straight to reboot. This | |
1087 | * allows us to persist any data that's stored in the panic log. | |
39037602 | 1088 | */ |
5ba3f43e | 1089 | if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) && |
0a7de745 | 1090 | (debug_boot_arg & DB_REBOOT_POST_CORE)) { |
c3c9b80d | 1091 | PEHaltRestart(kPEPanicDiagnosticsDone); |
cb323159 | 1092 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); |
5ba3f43e A |
1093 | } |
1094 | ||
1095 | /* | |
1096 | * Consider generating a local corefile if the infrastructure is configured | |
1097 | * and we haven't disabled on-device coredumps. | |
1098 | */ | |
cb323159 | 1099 | if (on_device_corefile_enabled()) { |
d9a64523 A |
1100 | if (!kdp_has_polled_corefile()) { |
1101 | if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) { | |
c3c9b80d | 1102 | paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)\n", |
0a7de745 | 1103 | kdp_polled_corefile_error()); |
f427ee49 | 1104 | #if defined(__arm__) || defined(__arm64__) |
d9a64523 A |
1105 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; |
1106 | paniclog_flush(); | |
f427ee49 | 1107 | #else /* defined(__arm__) || defined(__arm64__) */ |
d9a64523 A |
1108 | if (panic_info->mph_panic_log_offset != 0) { |
1109 | panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
1110 | paniclog_flush(); | |
1111 | } | |
f427ee49 | 1112 | #endif /* defined(__arm__) || defined(__arm64__) */ |
d9a64523 | 1113 | } |
f427ee49 A |
1114 | } |
1115 | #if XNU_MONITOR | |
1116 | else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) { | |
c3c9b80d | 1117 | paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n"); |
f427ee49 A |
1118 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; |
1119 | paniclog_flush(); | |
1120 | } | |
1121 | #endif /* XNU_MONITOR */ | |
1122 | else { | |
d9a64523 | 1123 | int ret = -1; |
5ba3f43e A |
1124 | |
1125 | #if defined (__x86_64__) | |
d9a64523 A |
1126 | /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */ |
1127 | if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI)) | |
5ba3f43e | 1128 | #endif |
d9a64523 A |
1129 | { |
1130 | /* | |
1131 | * Doing an on-device coredump leaves the disk driver in a state | |
1132 | * that can not be resumed. | |
1133 | */ | |
1134 | debugger_safe_to_return = FALSE; | |
1135 | begin_panic_transfer(); | |
1136 | ret = kern_dump(KERN_DUMP_DISK); | |
1137 | abort_panic_transfer(); | |
5ba3f43e A |
1138 | |
1139 | #if DEVELOPMENT || DEBUG | |
d9a64523 | 1140 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE)); |
5ba3f43e | 1141 | #endif |
d9a64523 | 1142 | } |
5ba3f43e | 1143 | |
d9a64523 A |
1144 | /* |
1145 | * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved | |
1146 | * or if option to ignore failures is set. | |
1147 | */ | |
1148 | if ((debug_boot_arg & DB_REBOOT_POST_CORE) && | |
0a7de745 | 1149 | ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) { |
c3c9b80d | 1150 | PEHaltRestart(kPEPanicDiagnosticsDone); |
cb323159 | 1151 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); |
d9a64523 | 1152 | } |
cc8bc92a | 1153 | } |
5ba3f43e A |
1154 | } |
1155 | ||
c3c9b80d A |
1156 | if (debugger_current_op == DBOP_PANIC || |
1157 | ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
1158 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1159 | } | |
1160 | ||
5c9f4661 | 1161 | if (debug_boot_arg & DB_REBOOT_ALWAYS) { |
cb323159 | 1162 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); |
5c9f4661 A |
1163 | } |
1164 | ||
5ba3f43e | 1165 | /* If KDP is configured, try to trap to the debugger */ |
f427ee49 A |
1166 | #if defined(__arm__) || defined(__arm64__) |
1167 | if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) { | |
1168 | #else | |
5ba3f43e | 1169 | if (current_debugger != NO_CUR_DB) { |
f427ee49 | 1170 | #endif |
5ba3f43e A |
1171 | kdp_raise_exception(exception, code, subcode, state); |
1172 | /* | |
1173 | * Only return if we entered via Debugger and it's safe to return | |
1174 | * (we halted the other cores successfully, this isn't a nested panic, etc) | |
0a7de745 | 1175 | */ |
5ba3f43e | 1176 | if (debugger_current_op == DBOP_DEBUGGER && |
0a7de745 A |
1177 | debugger_safe_to_return && |
1178 | kernel_debugger_entry_count == 1 && | |
1179 | !debugger_is_panic) { | |
5ba3f43e A |
1180 | return; |
1181 | } | |
1182 | } | |
1183 | ||
f427ee49 A |
1184 | #if defined(__arm__) || defined(__arm64__) |
1185 | if (PE_i_can_has_debugger(NULL) && panicDebugging) { | |
1186 | /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */ | |
5ba3f43e A |
1187 | panic_spin_shmcon(); |
1188 | } | |
f427ee49 | 1189 | #endif /* defined(__arm__) || defined(__arm64__) */ |
c3c9b80d A |
1190 | |
1191 | #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
1192 | ||
1193 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1194 | ||
5ba3f43e A |
1195 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ |
1196 | ||
1197 | if (!panicDebugging) { | |
cb323159 | 1198 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); |
5ba3f43e A |
1199 | } |
1200 | ||
1201 | panic_spin_forever(); | |
39037602 A |
1202 | } |
1203 | ||
5ba3f43e A |
1204 | #if INTERRUPT_MASKED_DEBUG |
1205 | uint64_t debugger_trap_timestamps[9]; | |
1206 | # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time(); | |
1207 | #else | |
1208 | # define DEBUGGER_TRAP_TIMESTAMP(i) | |
39037602 | 1209 | #endif |
5ba3f43e | 1210 | |
316670eb | 1211 | void |
5ba3f43e | 1212 | handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state) |
316670eb | 1213 | { |
5ba3f43e A |
1214 | unsigned int initial_not_in_kdp = not_in_kdp; |
1215 | kern_return_t ret; | |
1216 | debugger_op db_prev_op = debugger_current_op; | |
1217 | ||
1218 | DEBUGGER_TRAP_TIMESTAMP(0); | |
1219 | ||
1220 | DebuggerLock(); | |
1221 | ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC); | |
316670eb | 1222 | |
5ba3f43e | 1223 | DEBUGGER_TRAP_TIMESTAMP(1); |
39236c6e | 1224 | |
5ba3f43e A |
1225 | #if INTERRUPT_MASKED_DEBUG |
1226 | if (serialmode & SERIALMODE_OUTPUT) { | |
1227 | ml_spin_debug_reset(current_thread()); | |
1228 | } | |
1229 | #endif | |
1230 | if (ret != KERN_SUCCESS) { | |
1231 | CPUDEBUGGERRET = ret; | |
1232 | DebuggerUnlock(); | |
1233 | return; | |
1234 | } | |
1235 | ||
1236 | /* Update the global panic/debugger nested entry level */ | |
1237 | kernel_debugger_entry_count = CPUDEBUGGERCOUNT; | |
f427ee49 A |
1238 | if (kernel_debugger_entry_count > 0) { |
1239 | console_suspend(); | |
1240 | } | |
5ba3f43e A |
1241 | |
1242 | /* | |
1243 | * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice | |
1244 | * should we call into the debugger if it's configured and then reboot if the panic log has been written? | |
1245 | */ | |
1246 | ||
1247 | if (CPUDEBUGGEROP == DBOP_NONE) { | |
1248 | /* If there was no debugger context setup, we trapped due to a software breakpoint */ | |
1249 | debugger_current_op = DBOP_BREAKPOINT; | |
1250 | } else { | |
1251 | /* Not safe to return from a nested panic/debugger call */ | |
1252 | if (debugger_current_op == DBOP_PANIC || | |
0a7de745 | 1253 | debugger_current_op == DBOP_DEBUGGER) { |
5ba3f43e A |
1254 | debugger_safe_to_return = FALSE; |
1255 | } | |
1256 | ||
1257 | debugger_current_op = CPUDEBUGGEROP; | |
1258 | ||
1259 | /* Only overwrite the panic message if there is none already - save the data from the first call */ | |
1260 | if (debugger_panic_str == NULL) { | |
1261 | debugger_panic_str = CPUPANICSTR; | |
1262 | debugger_panic_args = CPUPANICARGS; | |
d9a64523 | 1263 | debugger_panic_data = CPUPANICDATAPTR; |
5ba3f43e A |
1264 | debugger_message = CPUDEBUGGERMSG; |
1265 | debugger_panic_caller = CPUPANICCALLER; | |
1266 | } | |
1267 | ||
1268 | debugger_panic_options = CPUPANICOPTS; | |
316670eb | 1269 | } |
316670eb A |
1270 | |
1271 | /* | |
5ba3f43e A |
1272 | * Clear the op from the processor debugger context so we can handle |
1273 | * breakpoints in the debugger | |
316670eb | 1274 | */ |
5ba3f43e A |
1275 | CPUDEBUGGEROP = DBOP_NONE; |
1276 | ||
1277 | DEBUGGER_TRAP_TIMESTAMP(2); | |
1278 | ||
1279 | kdp_callouts(KDP_EVENT_ENTER); | |
1280 | not_in_kdp = 0; | |
1281 | ||
1282 | DEBUGGER_TRAP_TIMESTAMP(3); | |
1283 | ||
1284 | if (debugger_current_op == DBOP_BREAKPOINT) { | |
1285 | kdp_raise_exception(exception, code, subcode, state); | |
1286 | } else if (debugger_current_op == DBOP_STACKSHOT) { | |
1287 | CPUDEBUGGERRET = do_stackshot(); | |
1288 | #if PGO | |
0a7de745 | 1289 | } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) { |
5ba3f43e A |
1290 | CPUDEBUGGERRET = do_pgo_reset_counters(); |
1291 | #endif | |
1292 | } else { | |
1293 | debugger_collect_diagnostics(exception, code, subcode, state); | |
1294 | } | |
1295 | ||
1296 | DEBUGGER_TRAP_TIMESTAMP(4); | |
1297 | ||
1298 | not_in_kdp = initial_not_in_kdp; | |
1299 | kdp_callouts(KDP_EVENT_EXIT); | |
1300 | ||
1301 | DEBUGGER_TRAP_TIMESTAMP(5); | |
1302 | ||
1303 | if (debugger_current_op != DBOP_BREAKPOINT) { | |
1304 | debugger_panic_str = NULL; | |
1305 | debugger_panic_args = NULL; | |
d9a64523 | 1306 | debugger_panic_data = NULL; |
5ba3f43e A |
1307 | debugger_panic_options = 0; |
1308 | debugger_message = NULL; | |
1309 | } | |
1310 | ||
1311 | /* Restore the previous debugger state */ | |
1312 | debugger_current_op = db_prev_op; | |
1313 | ||
1314 | DEBUGGER_TRAP_TIMESTAMP(6); | |
1315 | ||
1316 | DebuggerResumeOtherCores(); | |
1317 | ||
1318 | DEBUGGER_TRAP_TIMESTAMP(7); | |
1319 | ||
1320 | DebuggerUnlock(); | |
1321 | ||
1322 | DEBUGGER_TRAP_TIMESTAMP(8); | |
1323 | ||
1324 | return; | |
316670eb A |
1325 | } |
1326 | ||
0a7de745 A |
1327 | __attribute__((noinline, not_tail_called)) |
1328 | void | |
1329 | log(__unused int level, char *fmt, ...) | |
1c79356b | 1330 | { |
39037602 | 1331 | void *caller = __builtin_return_address(0); |
0a7de745 A |
1332 | va_list listp; |
1333 | va_list listp2; | |
39037602 | 1334 | |
1c79356b A |
1335 | |
1336 | #ifdef lint | |
1337 | level++; | |
1338 | #endif /* lint */ | |
0a7de745 | 1339 | #ifdef MACH_BSD |
1c79356b | 1340 | va_start(listp, fmt); |
39037602 A |
1341 | va_copy(listp2, listp); |
1342 | ||
1343 | disable_preemption(); | |
1344 | _doprnt(fmt, &listp, cons_putc_locked, 0); | |
1c79356b | 1345 | enable_preemption(); |
39037602 A |
1346 | |
1347 | va_end(listp); | |
1348 | ||
1349 | os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); | |
1350 | va_end(listp2); | |
1c79356b A |
1351 | #endif |
1352 | } | |
9bccf70c | 1353 | |
39037602 | 1354 | /* |
5ba3f43e A |
1355 | * Per <rdar://problem/24974766>, skip appending log messages to |
1356 | * the new logging infrastructure in contexts where safety is | |
1357 | * uncertain. These contexts include: | |
39037602 A |
1358 | * - When we're in the debugger |
1359 | * - We're in a panic | |
1360 | * - Interrupts are disabled | |
1361 | * - Or Pre-emption is disabled | |
1362 | * In all the above cases, it is potentially unsafe to log messages. | |
1363 | */ | |
1364 | ||
5ba3f43e | 1365 | boolean_t |
0a7de745 A |
1366 | oslog_is_safe(void) |
1367 | { | |
1368 | return kernel_debugger_entry_count == 0 && | |
1369 | not_in_kdp == 1 && | |
1370 | get_preemption_level() == 0 && | |
1371 | ml_get_interrupts_enabled() == TRUE; | |
39037602 A |
1372 | } |
1373 | ||
5ba3f43e A |
1374 | boolean_t |
1375 | debug_mode_active(void) | |
1376 | { | |
0a7de745 | 1377 | return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp); |
5ba3f43e A |
1378 | } |
1379 | ||
9bccf70c A |
1380 | void |
1381 | debug_putc(char c) | |
1382 | { | |
2d21ac55 | 1383 | if ((debug_buf_size != 0) && |
0a7de745 A |
1384 | ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) { |
1385 | *debug_buf_ptr = c; | |
9bccf70c A |
1386 | debug_buf_ptr++; |
1387 | } | |
1388 | } | |
2d21ac55 | 1389 | |
5ba3f43e A |
1390 | #if defined (__x86_64__) |
1391 | struct pasc { | |
0a7de745 A |
1392 | unsigned a: 7; |
1393 | unsigned b: 7; | |
1394 | unsigned c: 7; | |
1395 | unsigned d: 7; | |
1396 | unsigned e: 7; | |
1397 | unsigned f: 7; | |
1398 | unsigned g: 7; | |
1399 | unsigned h: 7; | |
5ba3f43e | 1400 | } __attribute__((packed)); |
2d21ac55 | 1401 | |
5ba3f43e A |
1402 | typedef struct pasc pasc_t; |
1403 | ||
1404 | /* | |
1405 | * In-place packing routines -- inefficient, but they're called at most once. | |
1406 | * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86. | |
1407 | */ | |
1408 | int | |
1409 | packA(char *inbuf, uint32_t length, uint32_t buflen) | |
2d21ac55 | 1410 | { |
0a7de745 A |
1411 | unsigned int i, j = 0; |
1412 | pasc_t pack; | |
1413 | ||
1414 | length = MIN(((length + 7) & ~7), buflen); | |
1415 | ||
1416 | for (i = 0; i < length; i += 8) { | |
1417 | pack.a = inbuf[i]; | |
1418 | pack.b = inbuf[i + 1]; | |
1419 | pack.c = inbuf[i + 2]; | |
1420 | pack.d = inbuf[i + 3]; | |
1421 | pack.e = inbuf[i + 4]; | |
1422 | pack.f = inbuf[i + 5]; | |
1423 | pack.g = inbuf[i + 6]; | |
1424 | pack.h = inbuf[i + 7]; | |
1425 | bcopy((char *) &pack, inbuf + j, 7); | |
1426 | j += 7; | |
1427 | } | |
1428 | return j; | |
2d21ac55 A |
1429 | } |
1430 | ||
5ba3f43e A |
1431 | void |
1432 | unpackA(char *inbuf, uint32_t length) | |
2d21ac55 A |
1433 | { |
1434 | pasc_t packs; | |
1435 | unsigned i = 0; | |
0a7de745 | 1436 | length = (length * 8) / 7; |
2d21ac55 A |
1437 | |
1438 | while (i < length) { | |
0a7de745 A |
1439 | packs = *(pasc_t *)&inbuf[i]; |
1440 | bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8))); | |
1441 | inbuf[i++] = packs.a; | |
1442 | inbuf[i++] = packs.b; | |
1443 | inbuf[i++] = packs.c; | |
1444 | inbuf[i++] = packs.d; | |
1445 | inbuf[i++] = packs.e; | |
1446 | inbuf[i++] = packs.f; | |
1447 | inbuf[i++] = packs.g; | |
1448 | inbuf[i++] = packs.h; | |
2d21ac55 A |
1449 | } |
1450 | } | |
5ba3f43e | 1451 | #endif /* defined (__x86_64__) */ |
2d21ac55 | 1452 | |
f427ee49 A |
1453 | extern char *proc_name_address(void *); |
1454 | extern char *proc_longname_address(void *); | |
2d21ac55 | 1455 | |
f427ee49 | 1456 | __private_extern__ void |
0a7de745 A |
1457 | panic_display_process_name(void) |
1458 | { | |
f427ee49 | 1459 | proc_name_t proc_name = {}; |
2d21ac55 A |
1460 | task_t ctask = 0; |
1461 | void *cbsd_info = 0; | |
f427ee49 | 1462 | vm_size_t size; |
2d21ac55 | 1463 | |
f427ee49 A |
1464 | size = ml_nofault_copy((vm_offset_t)¤t_thread()->task, |
1465 | (vm_offset_t)&ctask, sizeof(task_t)); | |
1466 | if (size != sizeof(task_t)) { | |
1467 | goto out; | |
1468 | } | |
1469 | ||
1470 | size = ml_nofault_copy((vm_offset_t)&ctask->bsd_info, | |
1471 | (vm_offset_t)&cbsd_info, sizeof(cbsd_info)); | |
1472 | if (size != sizeof(cbsd_info)) { | |
1473 | goto out; | |
1474 | } | |
1475 | ||
1476 | if (cbsd_info == NULL) { | |
1477 | goto out; | |
1478 | } | |
1479 | ||
1480 | size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info), | |
1481 | (vm_offset_t)&proc_name, sizeof(proc_name)); | |
1482 | ||
1483 | if (size == 0 || proc_name[0] == '\0') { | |
1484 | size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info), | |
1485 | (vm_offset_t)&proc_name, | |
1486 | MIN(sizeof(command_t), sizeof(proc_name))); | |
1487 | if (size > 0) { | |
1488 | proc_name[size - 1] = '\0'; | |
0a7de745 A |
1489 | } |
1490 | } | |
f427ee49 A |
1491 | |
1492 | out: | |
1493 | proc_name[sizeof(proc_name) - 1] = '\0'; | |
1494 | paniclog_append_noflush("\nProcess name corresponding to current thread: %s\n", | |
1495 | proc_name[0] != '\0' ? proc_name : "Unknown"); | |
2d21ac55 A |
1496 | } |
1497 | ||
5ba3f43e | 1498 | unsigned |
0a7de745 A |
1499 | panic_active(void) |
1500 | { | |
1501 | return debugger_panic_str != (char *) 0; | |
2d21ac55 A |
1502 | } |
1503 | ||
5ba3f43e | 1504 | void |
0a7de745 A |
1505 | populate_model_name(char *model_string) |
1506 | { | |
2d21ac55 A |
1507 | strlcpy(model_name, model_string, sizeof(model_name)); |
1508 | } | |
1509 | ||
5ba3f43e | 1510 | void |
0a7de745 A |
1511 | panic_display_model_name(void) |
1512 | { | |
2d21ac55 A |
1513 | char tmp_model_name[sizeof(model_name)]; |
1514 | ||
0a7de745 | 1515 | if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) { |
2d21ac55 | 1516 | return; |
0a7de745 | 1517 | } |
2d21ac55 | 1518 | |
6d2010ae A |
1519 | tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; |
1520 | ||
0a7de745 | 1521 | if (tmp_model_name[0] != 0) { |
5ba3f43e | 1522 | paniclog_append_noflush("System model name: %s\n", tmp_model_name); |
0a7de745 | 1523 | } |
6d2010ae A |
1524 | } |
1525 | ||
5ba3f43e | 1526 | void |
0a7de745 A |
1527 | panic_display_kernel_uuid(void) |
1528 | { | |
39236c6e | 1529 | char tmp_kernel_uuid[sizeof(kernel_uuid_string)]; |
6d2010ae | 1530 | |
0a7de745 | 1531 | if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) { |
6d2010ae | 1532 | return; |
0a7de745 | 1533 | } |
2d21ac55 | 1534 | |
0a7de745 | 1535 | if (tmp_kernel_uuid[0] != '\0') { |
5ba3f43e | 1536 | paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid); |
0a7de745 | 1537 | } |
2d21ac55 A |
1538 | } |
1539 | ||
5ba3f43e | 1540 | void |
0a7de745 A |
1541 | panic_display_kernel_aslr(void) |
1542 | { | |
f427ee49 | 1543 | kc_format_t kc_format; |
316670eb | 1544 | |
f427ee49 | 1545 | PE_get_primary_kc_format(&kc_format); |
fe8ab488 | 1546 | |
f427ee49 A |
1547 | if (kc_format == KCFormatFileset) { |
1548 | void *kch = PE_get_kc_header(KCKindPrimary); | |
c910b4d9 | 1549 | |
f427ee49 A |
1550 | paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); |
1551 | paniclog_append_noflush("KernelCache base: %p\n", (void*) kch); | |
1552 | paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide); | |
1553 | } else if (vm_kernel_slide) { | |
1554 | paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); | |
d190cdc3 | 1555 | } |
f427ee49 A |
1556 | paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); |
1557 | #if defined(__arm64__) | |
1558 | if (kc_format == KCFormatFileset) { | |
1559 | extern vm_offset_t segTEXTEXECB; | |
1560 | paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB); | |
cb323159 A |
1561 | } |
1562 | #endif | |
1563 | } | |
d190cdc3 | 1564 | |
f427ee49 A |
1565 | void |
1566 | panic_display_hibb(void) | |
0a7de745 | 1567 | { |
f427ee49 A |
1568 | #if defined(__i386__) || defined (__x86_64__) |
1569 | paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); | |
1570 | #endif | |
2d21ac55 A |
1571 | } |
1572 | ||
0a7de745 | 1573 | extern unsigned int stack_total; |
6d2010ae | 1574 | extern unsigned long long stack_allocs; |
c910b4d9 | 1575 | |
5ba3f43e | 1576 | #if defined (__x86_64__) |
0a7de745 | 1577 | extern unsigned int inuse_ptepages_count; |
6d2010ae | 1578 | extern long long alloc_ptepages_count; |
c910b4d9 A |
1579 | #endif |
1580 | ||
5ba3f43e | 1581 | __private_extern__ void |
f427ee49 | 1582 | panic_display_zprint(void) |
c910b4d9 | 1583 | { |
0a7de745 | 1584 | if (panic_include_zprint == TRUE) { |
0a7de745 | 1585 | struct zone zone_copy; |
c910b4d9 | 1586 | |
5ba3f43e | 1587 | paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); |
f427ee49 A |
1588 | zone_index_foreach(i) { |
1589 | if (ml_nofault_copy((vm_offset_t)&zone_array[i], | |
1590 | (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { | |
c3c9b80d | 1591 | if (zone_copy.z_wired_cur > atop(1024 * 1024)) { |
f427ee49 A |
1592 | paniclog_append_noflush("%-8s%-20s %10llu %10lu\n", |
1593 | zone_heap_name(&zone_copy), | |
c3c9b80d | 1594 | zone_copy.z_name, (uint64_t)zone_size_wired(&zone_copy), |
f427ee49 | 1595 | (uintptr_t)zone_size_free(&zone_copy)); |
c910b4d9 | 1596 | } |
0a7de745 | 1597 | } |
c910b4d9 A |
1598 | } |
1599 | ||
f427ee49 A |
1600 | paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", |
1601 | (uintptr_t)(kernel_stack_size * stack_total)); | |
5ba3f43e | 1602 | #if defined (__x86_64__) |
f427ee49 A |
1603 | paniclog_append_noflush("%-20s %10lu\n", "PageTables", |
1604 | (uintptr_t)ptoa(inuse_ptepages_count)); | |
c910b4d9 | 1605 | #endif |
f427ee49 A |
1606 | paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", |
1607 | (uintptr_t)kalloc_large_total); | |
b0d623f7 | 1608 | |
3e170ce0 | 1609 | if (panic_kext_memory_info) { |
5ba3f43e A |
1610 | mach_memory_info_t *mem_info = panic_kext_memory_info; |
1611 | paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size"); | |
f427ee49 A |
1612 | for (uint32_t i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) { |
1613 | if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && | |
1614 | (mem_info[i].size > (1024 * 1024))) { | |
5ba3f43e | 1615 | paniclog_append_noflush("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size); |
3e170ce0 A |
1616 | } |
1617 | } | |
1618 | } | |
c910b4d9 A |
1619 | } |
1620 | } | |
1621 | ||
fe8ab488 | 1622 | #if CONFIG_ECC_LOGGING |
5ba3f43e | 1623 | __private_extern__ void |
f427ee49 | 1624 | panic_display_ecc_errors(void) |
fe8ab488 A |
1625 | { |
1626 | uint32_t count = ecc_log_get_correction_count(); | |
1627 | ||
1628 | if (count > 0) { | |
5ba3f43e | 1629 | paniclog_append_noflush("ECC Corrections:%u\n", count); |
fe8ab488 A |
1630 | } |
1631 | } | |
1632 | #endif /* CONFIG_ECC_LOGGING */ | |
1633 | ||
6d2010ae | 1634 | #if CONFIG_ZLEAKS |
04b8595b A |
1635 | void panic_print_symbol_name(vm_address_t search); |
1636 | ||
6d2010ae A |
1637 | /* |
1638 | * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. | |
1639 | * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c | |
1640 | */ | |
5ba3f43e A |
1641 | __private_extern__ void |
1642 | panic_display_ztrace(void) | |
6d2010ae | 1643 | { |
0a7de745 | 1644 | if (panic_include_ztrace == TRUE) { |
6d2010ae | 1645 | unsigned int i = 0; |
0a7de745 | 1646 | boolean_t keepsyms = FALSE; |
04b8595b | 1647 | |
0a7de745 | 1648 | PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms)); |
6d2010ae | 1649 | struct ztrace top_ztrace_copy; |
0a7de745 | 1650 | |
6d2010ae | 1651 | /* Make sure not to trip another panic if there's something wrong with memory */ |
0a7de745 | 1652 | if (ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { |
5ba3f43e | 1653 | paniclog_append_noflush("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); |
6d2010ae | 1654 | /* Print the backtrace addresses */ |
0a7de745 | 1655 | for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH); i++) { |
5ba3f43e | 1656 | paniclog_append_noflush("%p ", top_ztrace_copy.zt_stack[i]); |
04b8595b A |
1657 | if (keepsyms) { |
1658 | panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); | |
1659 | } | |
5ba3f43e | 1660 | paniclog_append_noflush("\n"); |
6d2010ae A |
1661 | } |
1662 | /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ | |
1663 | kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); | |
0a7de745 | 1664 | } else { |
5ba3f43e | 1665 | paniclog_append_noflush("\nCan't access top_ztrace...\n"); |
6d2010ae | 1666 | } |
5ba3f43e | 1667 | paniclog_append_noflush("\n"); |
6d2010ae A |
1668 | } |
1669 | } | |
1670 | #endif /* CONFIG_ZLEAKS */ | |
1671 | ||
39236c6e | 1672 | #if !CONFIG_TELEMETRY |
5ba3f43e A |
1673 | int |
1674 | telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean_t mark __unused) | |
39236c6e A |
1675 | { |
1676 | return KERN_NOT_SUPPORTED; | |
1677 | } | |
1678 | #endif | |
5ba3f43e A |
1679 | |
1680 | #include <machine/machine_cpu.h> | |
1681 | ||
1682 | uint32_t kern_feature_overrides = 0; | |
1683 | ||
0a7de745 A |
1684 | boolean_t |
1685 | kern_feature_override(uint32_t fmask) | |
1686 | { | |
5ba3f43e A |
1687 | if (kern_feature_overrides == 0) { |
1688 | uint32_t fdisables = 0; | |
cb323159 A |
1689 | /* |
1690 | * Expected to be first invoked early, in a single-threaded | |
5ba3f43e A |
1691 | * environment |
1692 | */ | |
1693 | if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) { | |
1694 | fdisables |= KF_INITIALIZED; | |
1695 | kern_feature_overrides = fdisables; | |
1696 | } else { | |
1697 | kern_feature_overrides |= KF_INITIALIZED; | |
1698 | } | |
1699 | } | |
0a7de745 | 1700 | return (kern_feature_overrides & fmask) == fmask; |
5ba3f43e | 1701 | } |
cb323159 A |
1702 | |
1703 | boolean_t | |
1704 | on_device_corefile_enabled(void) | |
1705 | { | |
f427ee49 | 1706 | assert(startup_phase >= STARTUP_SUB_TUNABLES); |
cb323159 | 1707 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING |
f427ee49 A |
1708 | if (debug_boot_arg == 0) { |
1709 | return FALSE; | |
1710 | } | |
1711 | if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) { | |
1712 | return FALSE; | |
1713 | } | |
1714 | #if !XNU_TARGET_OS_OSX | |
1715 | /* | |
1716 | * outside of macOS, if there's a debug boot-arg set and local | |
1717 | * cores aren't explicitly disabled, we always write a corefile. | |
1718 | */ | |
1719 | return TRUE; | |
1720 | #else /* !XNU_TARGET_OS_OSX */ | |
1721 | /* | |
1722 | * on macOS, if corefiles on panic are requested and local cores | |
1723 | * aren't disabled we write a local core. | |
1724 | */ | |
1725 | if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) { | |
cb323159 A |
1726 | return TRUE; |
1727 | } | |
f427ee49 A |
1728 | #endif /* !XNU_TARGET_OS_OSX */ |
1729 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
cb323159 A |
1730 | return FALSE; |
1731 | } | |
1732 | ||
1733 | boolean_t | |
1734 | panic_stackshot_to_disk_enabled(void) | |
1735 | { | |
f427ee49 | 1736 | assert(startup_phase >= STARTUP_SUB_TUNABLES); |
cb323159 A |
1737 | #if defined(__x86_64__) |
1738 | if (PEGetCoprocessorVersion() < kCoprocessorVersion2) { | |
1739 | /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */ | |
1740 | if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) { | |
1741 | return FALSE; | |
1742 | } | |
1743 | ||
1744 | return TRUE; | |
1745 | } | |
1746 | #endif | |
1747 | return FALSE; | |
1748 | } | |
f427ee49 A |
1749 | |
1750 | #if DEBUG || DEVELOPMENT | |
1751 | const char * | |
1752 | sysctl_debug_get_preoslog(size_t *size) | |
1753 | { | |
1754 | int result = 0; | |
1755 | void *preoslog_pa = NULL; | |
1756 | int preoslog_size = 0; | |
1757 | ||
1758 | result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size); | |
1759 | if (result || preoslog_pa == NULL || preoslog_size == 0) { | |
1760 | kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size); | |
1761 | *size = 0; | |
1762 | return NULL; | |
1763 | } | |
1764 | ||
1765 | /* | |
1766 | * Beware: | |
1767 | * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer. | |
1768 | * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. | |
1769 | */ | |
1770 | *size = preoslog_size; | |
1771 | return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa))); | |
1772 | } | |
1773 | #endif /* DEBUG || DEVELOPMENT */ |