]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <mach_assert.h> | |
58 | #include <mach_kdp.h> | |
59 | #include <kdp/kdp.h> | |
60 | #include <kdp/kdp_core.h> | |
61 | #include <kdp/kdp_internal.h> | |
62 | #include <kdp/kdp_callout.h> | |
63 | #include <kern/cpu_number.h> | |
64 | #include <kern/kalloc.h> | |
65 | #include <kern/percpu.h> | |
66 | #include <kern/spl.h> | |
67 | #include <kern/thread.h> | |
68 | #include <kern/assert.h> | |
69 | #include <kern/sched_prim.h> | |
70 | #include <kern/misc_protos.h> | |
71 | #include <kern/clock.h> | |
72 | #include <kern/telemetry.h> | |
73 | #include <kern/ecc.h> | |
74 | #include <kern/kern_cdata.h> | |
75 | #include <kern/zalloc_internal.h> | |
76 | #include <vm/vm_kern.h> | |
77 | #include <vm/vm_map.h> | |
78 | #include <vm/pmap.h> | |
79 | #include <stdarg.h> | |
80 | #include <stdatomic.h> | |
81 | #include <sys/pgo.h> | |
82 | #include <console/serial_protos.h> | |
83 | ||
84 | #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) | |
85 | #include <kdp/kdp_udp.h> | |
86 | #endif | |
87 | #include <kern/processor.h> | |
88 | ||
89 | #if defined(__i386__) || defined(__x86_64__) | |
90 | #include <IOKit/IOBSD.h> | |
91 | ||
92 | #include <i386/cpu_threads.h> | |
93 | #include <i386/pmCPU.h> | |
94 | #endif | |
95 | ||
96 | #include <IOKit/IOPlatformExpert.h> | |
97 | #include <machine/pal_routines.h> | |
98 | ||
99 | #include <sys/kdebug.h> | |
100 | #include <libkern/OSKextLibPrivate.h> | |
101 | #include <libkern/OSAtomic.h> | |
102 | #include <libkern/kernel_mach_header.h> | |
103 | #include <libkern/section_keywords.h> | |
104 | #include <uuid/uuid.h> | |
105 | #include <mach_debug/zone_info.h> | |
106 | #include <mach/resource_monitors.h> | |
107 | ||
108 | #include <os/log_private.h> | |
109 | ||
110 | #if defined(__arm__) || defined(__arm64__) | |
111 | #include <pexpert/pexpert.h> /* For gPanicBase */ | |
112 | #include <arm/caches_internal.h> | |
113 | #include <arm/misc_protos.h> | |
114 | extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info; | |
115 | #endif | |
116 | ||
117 | #if CONFIG_XNUPOST | |
118 | #include <tests/xnupost.h> | |
119 | extern int vsnprintf(char *, size_t, const char *, va_list); | |
120 | #endif | |
121 | ||
122 | #if CONFIG_CSR | |
123 | #include <sys/csr.h> | |
124 | #endif | |
125 | ||
126 | extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize ); | |
127 | ||
128 | unsigned int halt_in_debugger = 0; | |
129 | unsigned int current_debugger = 0; | |
130 | unsigned int active_debugger = 0; | |
131 | unsigned int panicDebugging = FALSE; | |
132 | unsigned int kernel_debugger_entry_count = 0; | |
133 | ||
134 | #if defined(__arm__) || defined(__arm64__) | |
135 | struct additional_panic_data_buffer *panic_data_buffers = NULL; | |
136 | #endif | |
137 | ||
138 | #if defined(__arm__) | |
139 | #define TRAP_DEBUGGER __asm__ volatile("trap") | |
140 | #elif defined(__arm64__) | |
141 | /* | |
142 | * Magic number; this should be identical to the __arm__ encoding for trap. | |
143 | */ | |
144 | #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff") | |
145 | #elif defined (__x86_64__) | |
146 | #define TRAP_DEBUGGER __asm__("int3") | |
147 | #else | |
148 | #error No TRAP_DEBUGGER for this architecture | |
149 | #endif | |
150 | ||
151 | #if defined(__i386__) || defined(__x86_64__) | |
152 | #define panic_stop() pmCPUHalt(PM_HALT_PANIC) | |
153 | #else | |
154 | #define panic_stop() panic_spin_forever() | |
155 | #endif | |
156 | ||
157 | struct debugger_state { | |
158 | uint64_t db_panic_options; | |
159 | debugger_op db_current_op; | |
160 | boolean_t db_proceed_on_sync_failure; | |
161 | const char *db_message; | |
162 | const char *db_panic_str; | |
163 | va_list *db_panic_args; | |
164 | void *db_panic_data_ptr; | |
165 | unsigned long db_panic_caller; | |
166 | /* incremented whenever we panic or call Debugger (current CPU panic level) */ | |
167 | uint32_t db_entry_count; | |
168 | kern_return_t db_op_return; | |
169 | }; | |
170 | static struct debugger_state PERCPU_DATA(debugger_state); | |
171 | ||
172 | /* __pure2 is correct if this function is called with preemption disabled */ | |
173 | static inline __pure2 struct debugger_state * | |
174 | current_debugger_state(void) | |
175 | { | |
176 | return PERCPU_GET(debugger_state); | |
177 | } | |
178 | ||
179 | #define CPUDEBUGGEROP current_debugger_state()->db_current_op | |
180 | #define CPUDEBUGGERMSG current_debugger_state()->db_message | |
181 | #define CPUPANICSTR current_debugger_state()->db_panic_str | |
182 | #define CPUPANICARGS current_debugger_state()->db_panic_args | |
183 | #define CPUPANICOPTS current_debugger_state()->db_panic_options | |
184 | #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr | |
185 | #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure | |
186 | #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count | |
187 | #define CPUDEBUGGERRET current_debugger_state()->db_op_return | |
188 | #define CPUPANICCALLER current_debugger_state()->db_panic_caller | |
189 | ||
190 | #if DEVELOPMENT || DEBUG | |
191 | #define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ | |
192 | MACRO_BEGIN \ | |
193 | if (requested) { \ | |
194 | volatile int *badpointer = (int *)4; \ | |
195 | *badpointer = 0; \ | |
196 | } \ | |
197 | MACRO_END | |
198 | #endif /* DEVELOPMENT || DEBUG */ | |
199 | ||
200 | debugger_op debugger_current_op = DBOP_NONE; | |
201 | const char *debugger_panic_str = NULL; | |
202 | va_list *debugger_panic_args = NULL; | |
203 | void *debugger_panic_data = NULL; | |
204 | uint64_t debugger_panic_options = 0; | |
205 | const char *debugger_message = NULL; | |
206 | unsigned long debugger_panic_caller = 0; | |
207 | ||
208 | void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, | |
209 | unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data, | |
210 | unsigned long panic_caller) __dead2; | |
211 | static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags); | |
212 | void panic_spin_forever(void) __dead2; | |
213 | extern kern_return_t do_stackshot(void); | |
214 | extern void PE_panic_hook(const char*); | |
215 | ||
216 | #define NESTEDDEBUGGERENTRYMAX 5 | |
217 | static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; | |
218 | ||
219 | #if defined(__arm__) || defined(__arm64__) | |
220 | #define DEBUG_BUF_SIZE (4096) | |
221 | ||
222 | /* debug_buf is directly linked with iBoot panic region for arm targets */ | |
223 | char *debug_buf_base = NULL; | |
224 | char *debug_buf_ptr = NULL; | |
225 | unsigned int debug_buf_size = 0; | |
226 | ||
227 | SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE; | |
228 | #else /* defined(__arm__) || defined(__arm64__) */ | |
229 | #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data)) | |
230 | /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */ | |
231 | static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements"); | |
232 | ||
233 | char debug_buf[DEBUG_BUF_SIZE]; | |
234 | struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf; | |
235 | char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
236 | char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
237 | ||
238 | /* | |
239 | * We don't include the size of the panic header in the length of the data we actually write. | |
240 | * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of | |
241 | * the end of the log because we only support writing (3*PAGESIZE) bytes. | |
242 | */ | |
243 | unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); | |
244 | ||
245 | boolean_t extended_debug_log_enabled = FALSE; | |
246 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
247 | ||
248 | #if defined(XNU_TARGET_OS_OSX) | |
249 | #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace" | |
250 | #else | |
251 | #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace" | |
252 | #endif | |
253 | ||
254 | /* Debugger state */ | |
255 | atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU); | |
256 | boolean_t debugger_allcpus_halted = FALSE; | |
257 | boolean_t debugger_safe_to_return = TRUE; | |
258 | unsigned int debugger_context = 0; | |
259 | ||
260 | static char model_name[64]; | |
261 | unsigned char *kernel_uuid; | |
262 | ||
263 | boolean_t kernelcache_uuid_valid = FALSE; | |
264 | uuid_t kernelcache_uuid; | |
265 | uuid_string_t kernelcache_uuid_string; | |
266 | ||
267 | boolean_t pageablekc_uuid_valid = FALSE; | |
268 | uuid_t pageablekc_uuid; | |
269 | uuid_string_t pageablekc_uuid_string; | |
270 | ||
271 | boolean_t auxkc_uuid_valid = FALSE; | |
272 | uuid_t auxkc_uuid; | |
273 | uuid_string_t auxkc_uuid_string; | |
274 | ||
275 | /* | |
276 | * By default we treat Debugger() the same as calls to panic(), unless | |
277 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
278 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
279 | * | |
280 | * Return from Debugger() is currently only implemented on x86 | |
281 | */ | |
282 | static boolean_t debugger_is_panic = TRUE; | |
283 | ||
284 | TUNABLE(unsigned int, debug_boot_arg, "debug", 0); | |
285 | ||
286 | char kernel_uuid_string[37]; /* uuid_string_t */ | |
287 | char kernelcache_uuid_string[37]; /* uuid_string_t */ | |
288 | char panic_disk_error_description[512]; | |
289 | size_t panic_disk_error_description_size = sizeof(panic_disk_error_description); | |
290 | ||
291 | extern unsigned int write_trace_on_panic; | |
292 | int kext_assertions_enable = | |
293 | #if DEBUG || DEVELOPMENT | |
294 | TRUE; | |
295 | #else | |
296 | FALSE; | |
297 | #endif | |
298 | ||
299 | /* | |
300 | * Maintain the physically-contiguous carveout for the `phys_carveout_mb` | |
301 | * boot-arg. | |
302 | */ | |
303 | SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0; | |
304 | SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0; | |
305 | SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0; | |
306 | ||
307 | boolean_t | |
308 | kernel_debugging_allowed(void) | |
309 | { | |
310 | #if XNU_TARGET_OS_OSX | |
311 | #if CONFIG_CSR | |
312 | if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) { | |
313 | return FALSE; | |
314 | } | |
315 | #endif /* CONFIG_CSR */ | |
316 | return TRUE; | |
317 | #else /* XNU_TARGET_OS_OSX */ | |
318 | return PE_i_can_has_debugger(NULL); | |
319 | #endif /* XNU_TARGET_OS_OSX */ | |
320 | } | |
321 | ||
322 | __startup_func | |
323 | static void | |
324 | panic_init(void) | |
325 | { | |
326 | unsigned long uuidlen = 0; | |
327 | void *uuid; | |
328 | ||
329 | uuid = getuuidfromheader(&_mh_execute_header, &uuidlen); | |
330 | if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) { | |
331 | kernel_uuid = uuid; | |
332 | uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string); | |
333 | } | |
334 | ||
335 | /* | |
336 | * Take the value of the debug boot-arg into account | |
337 | */ | |
338 | #if MACH_KDP | |
339 | if (kernel_debugging_allowed() && debug_boot_arg) { | |
340 | if (debug_boot_arg & DB_HALT) { | |
341 | halt_in_debugger = 1; | |
342 | } | |
343 | ||
344 | #if defined(__arm__) || defined(__arm64__) | |
345 | if (debug_boot_arg & DB_NMI) { | |
346 | panicDebugging = TRUE; | |
347 | } | |
348 | #else | |
349 | panicDebugging = TRUE; | |
350 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
351 | } | |
352 | ||
353 | if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) { | |
354 | max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; | |
355 | } | |
356 | ||
357 | #if defined(__arm__) || defined(__arm64__) | |
358 | char kdpname[80]; | |
359 | ||
360 | kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)); | |
361 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
362 | ||
363 | #endif /* MACH_KDP */ | |
364 | ||
365 | #if defined (__x86_64__) | |
366 | /* | |
367 | * By default we treat Debugger() the same as calls to panic(), unless | |
368 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
369 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
370 | * This is because writing an on-device corefile is a destructive operation. | |
371 | * | |
372 | * Return from Debugger() is currently only implemented on x86 | |
373 | */ | |
374 | if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) { | |
375 | debugger_is_panic = FALSE; | |
376 | } | |
377 | #endif | |
378 | } | |
379 | STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init); | |
380 | ||
381 | #if defined (__x86_64__) | |
382 | void | |
383 | extended_debug_log_init(void) | |
384 | { | |
385 | assert(coprocessor_paniclog_flush); | |
386 | /* | |
387 | * Allocate an extended panic log buffer that has space for the panic | |
388 | * stackshot at the end. Update the debug buf pointers appropriately | |
389 | * to point at this new buffer. | |
390 | * | |
391 | * iBoot pre-initializes the panic region with the NULL character. We set this here | |
392 | * so we can accurately calculate the CRC for the region without needing to flush the | |
393 | * full region over SMC. | |
394 | */ | |
395 | char *new_debug_buf = kalloc_flags(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO); | |
396 | ||
397 | panic_info = (struct macos_panic_header *)new_debug_buf; | |
398 | debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
399 | debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); | |
400 | ||
401 | extended_debug_log_enabled = TRUE; | |
402 | ||
403 | /* | |
404 | * Insert a compiler barrier so we don't free the other panic stackshot buffer | |
405 | * until after we've marked the new one as available | |
406 | */ | |
407 | __compiler_barrier(); | |
408 | kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len); | |
409 | panic_stackshot_buf = 0; | |
410 | panic_stackshot_buf_len = 0; | |
411 | } | |
412 | #endif /* defined (__x86_64__) */ | |
413 | ||
414 | void | |
415 | debug_log_init(void) | |
416 | { | |
417 | #if defined(__arm__) || defined(__arm64__) | |
418 | if (!gPanicBase) { | |
419 | printf("debug_log_init: Error!! gPanicBase is still not initialized\n"); | |
420 | return; | |
421 | } | |
422 | /* Shift debug buf start location and size by the length of the panic header */ | |
423 | debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header); | |
424 | debug_buf_ptr = debug_buf_base; | |
425 | debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header); | |
426 | #else | |
427 | kern_return_t kr = KERN_SUCCESS; | |
428 | bzero(panic_info, DEBUG_BUF_SIZE); | |
429 | ||
430 | assert(debug_buf_base != NULL); | |
431 | assert(debug_buf_ptr != NULL); | |
432 | assert(debug_buf_size != 0); | |
433 | ||
434 | /* | |
435 | * We allocate a buffer to store a panic time stackshot. If we later discover that this is a | |
436 | * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory | |
437 | * as it's not necessary on this platform. This information won't be available until the IOPlatform has come | |
438 | * up. | |
439 | */ | |
440 | kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG); | |
441 | assert(kr == KERN_SUCCESS); | |
442 | if (kr == KERN_SUCCESS) { | |
443 | panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE; | |
444 | } | |
445 | #endif | |
446 | } | |
447 | ||
448 | void | |
449 | phys_carveout_init(void) | |
450 | { | |
451 | if (!PE_i_can_has_debugger(NULL)) { | |
452 | return; | |
453 | } | |
454 | ||
455 | unsigned int phys_carveout_mb = 0; | |
456 | ||
457 | if (!PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb, | |
458 | sizeof(phys_carveout_mb))) { | |
459 | return; | |
460 | } | |
461 | if (phys_carveout_mb == 0) { | |
462 | return; | |
463 | } | |
464 | ||
465 | size_t size = 0; | |
466 | if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &size)) { | |
467 | printf("phys_carveout_mb size overflowed (%uMB)\n", | |
468 | phys_carveout_mb); | |
469 | return; | |
470 | } | |
471 | ||
472 | kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, size, | |
473 | VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT, | |
474 | VM_KERN_MEMORY_DIAG); | |
475 | if (kr != KERN_SUCCESS) { | |
476 | printf("failed to allocate %uMB for phys_carveout_mb: %u\n", | |
477 | phys_carveout_mb, (unsigned int)kr); | |
478 | return; | |
479 | } | |
480 | ||
481 | phys_carveout_pa = kvtophys(phys_carveout); | |
482 | phys_carveout_size = size; | |
483 | } | |
484 | ||
485 | static void | |
486 | DebuggerLock(void) | |
487 | { | |
488 | int my_cpu = cpu_number(); | |
489 | int debugger_exp_cpu = DEBUGGER_NO_CPU; | |
490 | assert(ml_get_interrupts_enabled() == FALSE); | |
491 | ||
492 | if (atomic_load(&debugger_cpu) == my_cpu) { | |
493 | return; | |
494 | } | |
495 | ||
496 | while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) { | |
497 | debugger_exp_cpu = DEBUGGER_NO_CPU; | |
498 | } | |
499 | ||
500 | return; | |
501 | } | |
502 | ||
503 | static void | |
504 | DebuggerUnlock(void) | |
505 | { | |
506 | assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number()); | |
507 | ||
508 | /* | |
509 | * We don't do an atomic exchange here in case | |
510 | * there's another CPU spinning to acquire the debugger_lock | |
511 | * and we never get a chance to update it. We already have the | |
512 | * lock so we can simply store DEBUGGER_NO_CPU and follow with | |
513 | * a barrier. | |
514 | */ | |
515 | atomic_store(&debugger_cpu, DEBUGGER_NO_CPU); | |
516 | OSMemoryBarrier(); | |
517 | ||
518 | return; | |
519 | } | |
520 | ||
521 | static kern_return_t | |
522 | DebuggerHaltOtherCores(boolean_t proceed_on_failure) | |
523 | { | |
524 | #if defined(__arm__) || defined(__arm64__) | |
525 | return DebuggerXCallEnter(proceed_on_failure); | |
526 | #else /* defined(__arm__) || defined(__arm64__) */ | |
527 | #pragma unused(proceed_on_failure) | |
528 | mp_kdp_enter(proceed_on_failure); | |
529 | return KERN_SUCCESS; | |
530 | #endif | |
531 | } | |
532 | ||
533 | static void | |
534 | DebuggerResumeOtherCores(void) | |
535 | { | |
536 | #if defined(__arm__) || defined(__arm64__) | |
537 | DebuggerXCallReturn(); | |
538 | #else /* defined(__arm__) || defined(__arm64__) */ | |
539 | mp_kdp_exit(); | |
540 | #endif | |
541 | } | |
542 | ||
543 | static void | |
544 | DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
545 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, | |
546 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
547 | { | |
548 | CPUDEBUGGEROP = db_op; | |
549 | ||
550 | /* Preserve the original panic message */ | |
551 | if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) { | |
552 | CPUDEBUGGERMSG = db_message; | |
553 | CPUPANICSTR = db_panic_str; | |
554 | CPUPANICARGS = db_panic_args; | |
555 | CPUPANICDATAPTR = db_panic_data_ptr; | |
556 | CPUPANICCALLER = db_panic_caller; | |
557 | } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) { | |
558 | kprintf("Nested panic detected:"); | |
559 | if (db_panic_str != NULL) { | |
560 | _doprnt(db_panic_str, db_panic_args, PE_kputc, 0); | |
561 | } | |
562 | } | |
563 | ||
564 | CPUDEBUGGERSYNC = db_proceed_on_sync_failure; | |
565 | CPUDEBUGGERRET = KERN_SUCCESS; | |
566 | ||
567 | /* Reset these on any nested panics */ | |
568 | CPUPANICOPTS = db_panic_options; | |
569 | ||
570 | return; | |
571 | } | |
572 | ||
573 | /* | |
574 | * Save the requested debugger state/action into the current processor's | |
575 | * percu state and trap to the debugger. | |
576 | */ | |
577 | kern_return_t | |
578 | DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
579 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, | |
580 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
581 | { | |
582 | kern_return_t ret; | |
583 | ||
584 | assert(ml_get_interrupts_enabled() == FALSE); | |
585 | DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args, | |
586 | db_panic_options, db_panic_data_ptr, | |
587 | db_proceed_on_sync_failure, db_panic_caller); | |
588 | ||
589 | /* | |
590 | * On ARM this generates an uncategorized exception -> sleh code -> | |
591 | * DebuggerCall -> kdp_trap -> handle_debugger_trap | |
592 | * So that is how XNU ensures that only one core can panic. | |
593 | * The rest of the cores are halted by IPI if possible; if that | |
594 | * fails it will fall back to dbgwrap. | |
595 | */ | |
596 | TRAP_DEBUGGER; | |
597 | ||
598 | ret = CPUDEBUGGERRET; | |
599 | ||
600 | DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0); | |
601 | ||
602 | return ret; | |
603 | } | |
604 | ||
605 | void __attribute__((noinline)) | |
606 | Assert( | |
607 | const char *file, | |
608 | int line, | |
609 | const char *expression | |
610 | ) | |
611 | { | |
612 | #if CONFIG_NONFATAL_ASSERTS | |
613 | static TUNABLE(bool, mach_assert, "assertions", true); | |
614 | ||
615 | if (!mach_assert) { | |
616 | kprintf("%s:%d non-fatal Assertion: %s", file, line, expression); | |
617 | return; | |
618 | } | |
619 | #endif | |
620 | ||
621 | panic_plain("%s:%d Assertion failed: %s", file, line, expression); | |
622 | } | |
623 | ||
624 | boolean_t | |
625 | debug_is_current_cpu_in_panic_state(void) | |
626 | { | |
627 | return current_debugger_state()->db_entry_count > 0; | |
628 | } | |
629 | ||
630 | void | |
631 | Debugger(const char *message) | |
632 | { | |
633 | DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE); | |
634 | } | |
635 | ||
636 | void | |
637 | DebuggerWithContext(unsigned int reason, void *ctx, const char *message, | |
638 | uint64_t debugger_options_mask) | |
639 | { | |
640 | spl_t previous_interrupts_state; | |
641 | boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; | |
642 | ||
643 | previous_interrupts_state = ml_set_interrupts_enabled(FALSE); | |
644 | disable_preemption(); | |
645 | ||
646 | CPUDEBUGGERCOUNT++; | |
647 | ||
648 | if (CPUDEBUGGERCOUNT > max_debugger_entry_count) { | |
649 | static boolean_t in_panic_kprintf = FALSE; | |
650 | ||
651 | /* Notify any listeners that we've started a panic */ | |
652 | uint32_t panic_details = 0; | |
653 | if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
654 | panic_details |= kPanicDetailsForcePowerOff; | |
655 | } | |
656 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
657 | ||
658 | if (!in_panic_kprintf) { | |
659 | in_panic_kprintf = TRUE; | |
660 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
661 | max_debugger_entry_count); | |
662 | in_panic_kprintf = FALSE; | |
663 | } | |
664 | ||
665 | if (!panicDebugging) { | |
666 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask); | |
667 | } | |
668 | ||
669 | panic_spin_forever(); | |
670 | } | |
671 | ||
672 | /* Handle any necessary platform specific actions before we proceed */ | |
673 | PEInitiatePanic(); | |
674 | ||
675 | #if DEVELOPMENT || DEBUG | |
676 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
677 | #endif | |
678 | ||
679 | doprnt_hide_pointers = FALSE; | |
680 | ||
681 | if (ctx != NULL) { | |
682 | DebuggerSaveState(DBOP_DEBUGGER, message, | |
683 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); | |
684 | handle_debugger_trap(reason, 0, 0, ctx); | |
685 | DebuggerSaveState(DBOP_NONE, NULL, NULL, | |
686 | NULL, 0, NULL, FALSE, 0); | |
687 | } else { | |
688 | DebuggerTrapWithState(DBOP_DEBUGGER, message, | |
689 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); | |
690 | } | |
691 | ||
692 | CPUDEBUGGERCOUNT--; | |
693 | doprnt_hide_pointers = old_doprnt_hide_pointers; | |
694 | enable_preemption(); | |
695 | ml_set_interrupts_enabled(previous_interrupts_state); | |
696 | } | |
697 | ||
698 | static struct kdp_callout { | |
699 | struct kdp_callout * callout_next; | |
700 | kdp_callout_fn_t callout_fn; | |
701 | boolean_t callout_in_progress; | |
702 | void * callout_arg; | |
703 | } * kdp_callout_list = NULL; | |
704 | ||
705 | /* | |
706 | * Called from kernel context to register a kdp event callout. | |
707 | */ | |
708 | void | |
709 | kdp_register_callout(kdp_callout_fn_t fn, void * arg) | |
710 | { | |
711 | struct kdp_callout * kcp; | |
712 | struct kdp_callout * list_head; | |
713 | ||
714 | kcp = kalloc(sizeof(*kcp)); | |
715 | if (kcp == NULL) { | |
716 | panic("kdp_register_callout() kalloc failed"); | |
717 | } | |
718 | ||
719 | kcp->callout_fn = fn; | |
720 | kcp->callout_arg = arg; | |
721 | kcp->callout_in_progress = FALSE; | |
722 | ||
723 | /* Lock-less list insertion using compare and exchange. */ | |
724 | do { | |
725 | list_head = kdp_callout_list; | |
726 | kcp->callout_next = list_head; | |
727 | } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list)); | |
728 | } | |
729 | ||
730 | static void | |
731 | kdp_callouts(kdp_event_t event) | |
732 | { | |
733 | struct kdp_callout *kcp = kdp_callout_list; | |
734 | ||
735 | while (kcp) { | |
736 | if (!kcp->callout_in_progress) { | |
737 | kcp->callout_in_progress = TRUE; | |
738 | kcp->callout_fn(kcp->callout_arg, event); | |
739 | kcp->callout_in_progress = FALSE; | |
740 | } | |
741 | kcp = kcp->callout_next; | |
742 | } | |
743 | } | |
744 | ||
745 | #if defined(__arm__) || defined(__arm64__) | |
746 | /* | |
747 | * Register an additional buffer with data to include in the panic log | |
748 | * | |
749 | * <rdar://problem/50137705> tracks supporting more than one buffer | |
750 | * | |
751 | * Note that producer_name and buf should never be de-allocated as we reference these during panic. | |
752 | */ | |
753 | void | |
754 | register_additional_panic_data_buffer(const char *producer_name, void *buf, int len) | |
755 | { | |
756 | if (panic_data_buffers != NULL) { | |
757 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
758 | } | |
759 | ||
760 | if (producer_name == NULL || (strlen(producer_name) == 0)) { | |
761 | panic("register_additional_panic_data_buffer called with invalid producer_name"); | |
762 | } | |
763 | ||
764 | if (buf == NULL) { | |
765 | panic("register_additional_panic_data_buffer called with invalid buffer pointer"); | |
766 | } | |
767 | ||
768 | if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) { | |
769 | panic("register_additional_panic_data_buffer called with invalid length"); | |
770 | } | |
771 | ||
772 | struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer)); | |
773 | new_panic_data_buffer->producer_name = producer_name; | |
774 | new_panic_data_buffer->buf = buf; | |
775 | new_panic_data_buffer->len = len; | |
776 | ||
777 | if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) { | |
778 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
779 | } | |
780 | ||
781 | return; | |
782 | } | |
783 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
784 | ||
785 | /* | |
786 | * An overview of the xnu panic path: | |
787 | * | |
788 | * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger(). | |
789 | * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior | |
790 | * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap() | |
791 | * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu. | |
792 | * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and | |
793 | * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed | |
794 | * according to the device's boot-args. | |
795 | */ | |
796 | #undef panic | |
797 | void | |
798 | panic(const char *str, ...) | |
799 | { | |
800 | va_list panic_str_args; | |
801 | ||
802 | va_start(panic_str_args, str); | |
803 | panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0)); | |
804 | va_end(panic_str_args); | |
805 | } | |
806 | ||
807 | void | |
808 | panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...) | |
809 | { | |
810 | va_list panic_str_args; | |
811 | ||
812 | va_start(panic_str_args, str); | |
813 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK), | |
814 | NULL, (unsigned long)(char *)__builtin_return_address(0)); | |
815 | va_end(panic_str_args); | |
816 | } | |
817 | ||
818 | #if defined (__x86_64__) | |
819 | /* | |
820 | * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog. | |
821 | * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot | |
822 | * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the | |
823 | * thread when writing the panic log. | |
824 | * | |
825 | * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread. | |
826 | */ | |
827 | void | |
828 | panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...) | |
829 | { | |
830 | va_list panic_str_args; | |
831 | __assert_only os_ref_count_t th_ref_count; | |
832 | ||
833 | assert_thread_magic(thread); | |
834 | th_ref_count = os_ref_get_count(&thread->ref_count); | |
835 | assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count); | |
836 | ||
837 | /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */ | |
838 | thread_reference(thread); | |
839 | ||
840 | va_start(panic_str_args, str); | |
841 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE), | |
842 | thread, (unsigned long)(char *)__builtin_return_address(0)); | |
843 | ||
844 | va_end(panic_str_args); | |
845 | } | |
846 | #endif /* defined (__x86_64__) */ | |
847 | ||
848 | #pragma clang diagnostic push | |
849 | #pragma clang diagnostic ignored "-Wmissing-noreturn" | |
850 | void | |
851 | panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, | |
852 | uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller) | |
853 | { | |
854 | #pragma clang diagnostic pop | |
855 | ||
856 | #if defined(__x86_64__) && (DEVELOPMENT || DEBUG) | |
857 | /* Turn off I/O tracing once we've panicked */ | |
858 | mmiotrace_enabled = 0; | |
859 | #endif | |
860 | ||
861 | ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); | |
862 | ||
863 | CPUDEBUGGERCOUNT++; | |
864 | ||
865 | if (CPUDEBUGGERCOUNT > max_debugger_entry_count) { | |
866 | static boolean_t in_panic_kprintf = FALSE; | |
867 | ||
868 | /* Notify any listeners that we've started a panic */ | |
869 | uint32_t panic_details = 0; | |
870 | if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
871 | panic_details |= kPanicDetailsForcePowerOff; | |
872 | } | |
873 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
874 | ||
875 | if (!in_panic_kprintf) { | |
876 | in_panic_kprintf = TRUE; | |
877 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
878 | max_debugger_entry_count); | |
879 | in_panic_kprintf = FALSE; | |
880 | } | |
881 | ||
882 | if (!panicDebugging) { | |
883 | kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask); | |
884 | } | |
885 | ||
886 | panic_spin_forever(); | |
887 | } | |
888 | ||
889 | /* Handle any necessary platform specific actions before we proceed */ | |
890 | PEInitiatePanic(); | |
891 | ||
892 | #if DEVELOPMENT || DEBUG | |
893 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
894 | #endif | |
895 | ||
896 | PE_panic_hook(panic_format_str); | |
897 | ||
898 | #if defined (__x86_64__) | |
899 | plctrace_disable(); | |
900 | #endif | |
901 | ||
902 | if (write_trace_on_panic && kdebug_enable) { | |
903 | if (get_preemption_level() == 0 && !ml_at_interrupt_context()) { | |
904 | ml_set_interrupts_enabled(TRUE); | |
905 | KDBG_RELEASE(TRACE_PANIC); | |
906 | kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME); | |
907 | } | |
908 | } | |
909 | ||
910 | ml_set_interrupts_enabled(FALSE); | |
911 | disable_preemption(); | |
912 | ||
913 | #if defined (__x86_64__) | |
914 | pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE); | |
915 | #endif /* defined (__x86_64__) */ | |
916 | ||
917 | /* Never hide pointers from panic logs. */ | |
918 | doprnt_hide_pointers = FALSE; | |
919 | ||
920 | if (ctx != NULL) { | |
921 | /* | |
922 | * We called into panic from a trap, no need to trap again. Set the | |
923 | * state on the current CPU and then jump to handle_debugger_trap. | |
924 | */ | |
925 | DebuggerSaveState(DBOP_PANIC, "panic", | |
926 | panic_format_str, panic_args, | |
927 | panic_options_mask, panic_data_ptr, TRUE, panic_caller); | |
928 | handle_debugger_trap(reason, 0, 0, ctx); | |
929 | } | |
930 | ||
931 | #if defined(__arm64__) | |
932 | /* | |
933 | * Signal to fastsim that it should open debug ports (nop on hardware) | |
934 | */ | |
935 | __asm__ volatile ("HINT 0x45"); | |
936 | #endif /* defined(__arm64__) */ | |
937 | ||
938 | DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str, | |
939 | panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller); | |
940 | ||
941 | /* | |
942 | * Not reached. | |
943 | */ | |
944 | panic_stop(); | |
945 | __builtin_unreachable(); | |
946 | } | |
947 | ||
948 | void | |
949 | panic_spin_forever(void) | |
950 | { | |
951 | paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n"); | |
952 | ||
953 | for (;;) { | |
954 | } | |
955 | } | |
956 | ||
957 | static void | |
958 | kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags) | |
959 | { | |
960 | printf("Attempting system restart...\n"); | |
961 | if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) { | |
962 | PEHaltRestart(kPEPanicRestartCPUNoCallouts); | |
963 | } else { | |
964 | PEHaltRestart(type); | |
965 | } | |
966 | halt_all_cpus(TRUE); | |
967 | } | |
968 | ||
969 | void | |
970 | kdp_machine_reboot(void) | |
971 | { | |
972 | kdp_machine_reboot_type(kPEPanicRestartCPU, 0); | |
973 | } | |
974 | ||
975 | /* | |
976 | * Gather and save diagnostic information about a panic (or Debugger call). | |
977 | * | |
978 | * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can | |
979 | * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no | |
980 | * paniclog is written and no core is written unless we request a core on NMI. | |
981 | * | |
982 | * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured), | |
983 | * and calling out to any other functions we have for collecting diagnostic info. | |
984 | */ | |
985 | static void | |
986 | debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state) | |
987 | { | |
988 | #if DEVELOPMENT || DEBUG | |
989 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG)); | |
990 | #endif | |
991 | ||
992 | #if defined(__x86_64__) | |
993 | kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); | |
994 | #endif | |
995 | /* | |
996 | * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate | |
997 | * a coredump/paniclog for this type of debugger entry. If KDP isn't configured, | |
998 | * we'll just spin in kdp_raise_exception. | |
999 | */ | |
1000 | if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) { | |
1001 | kdp_raise_exception(exception, code, subcode, state); | |
1002 | if (debugger_safe_to_return && !debugger_is_panic) { | |
1003 | return; | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | if ((debugger_current_op == DBOP_PANIC) || | |
1008 | ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
1009 | /* | |
1010 | * Attempt to notify listeners once and only once that we've started | |
1011 | * panicking. Only do this for Debugger() calls if we're treating | |
1012 | * Debugger() calls like panic(). | |
1013 | */ | |
1014 | uint32_t panic_details = 0; | |
1015 | if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
1016 | panic_details |= kPanicDetailsForcePowerOff; | |
1017 | } | |
1018 | PEHaltRestartInternal(kPEPanicBegin, panic_details); | |
1019 | ||
1020 | /* | |
1021 | * Set the begin pointer in the panic log structure. We key off of this | |
1022 | * static variable rather than contents from the panic header itself in case someone | |
1023 | * has stomped over the panic_info structure. Also initializes the header magic. | |
1024 | */ | |
1025 | static boolean_t began_writing_paniclog = FALSE; | |
1026 | if (!began_writing_paniclog) { | |
1027 | PE_init_panicheader(); | |
1028 | began_writing_paniclog = TRUE; | |
1029 | } else { | |
1030 | /* | |
1031 | * If we reached here, update the panic header to keep it as consistent | |
1032 | * as possible during a nested panic | |
1033 | */ | |
1034 | PE_update_panicheader_nestedpanic(); | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Write panic string if this was a panic. | |
1040 | * | |
1041 | * TODO: Consider moving to SavePanicInfo as this is part of the panic log. | |
1042 | */ | |
1043 | if (debugger_current_op == DBOP_PANIC) { | |
1044 | paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller); | |
1045 | if (debugger_panic_str) { | |
1046 | _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0); | |
1047 | } | |
1048 | paniclog_append_noflush("\n"); | |
1049 | } | |
1050 | #if defined(__x86_64__) | |
1051 | else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
1052 | paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Debugger() is treated like panic() on embedded -- for example we use it for WDT | |
1057 | * panics (so we need to write a paniclog). On desktop Debugger() is used in the | |
1058 | * conventional sense. | |
1059 | */ | |
1060 | if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) | |
1061 | #endif /* __x86_64__ */ | |
1062 | { | |
1063 | kdp_callouts(KDP_EVENT_PANICLOG); | |
1064 | ||
1065 | /* | |
1066 | * Write paniclog and panic stackshot (if supported) | |
1067 | * TODO: Need to clear panic log when return from debugger | |
1068 | * hooked up for embedded | |
1069 | */ | |
1070 | SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options); | |
1071 | ||
1072 | #if DEVELOPMENT || DEBUG | |
1073 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG)); | |
1074 | #endif | |
1075 | ||
1076 | /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */ | |
1077 | if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { | |
1078 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1079 | PEHaltRestart(kPEPanicRestartCPUNoCallouts); | |
1080 | } | |
1081 | } | |
1082 | ||
1083 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING | |
1084 | /* | |
1085 | * If reboot on panic is enabled and the caller of panic indicated that we should skip | |
1086 | * local coredumps, don't try to write these and instead go straight to reboot. This | |
1087 | * allows us to persist any data that's stored in the panic log. | |
1088 | */ | |
1089 | if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) && | |
1090 | (debug_boot_arg & DB_REBOOT_POST_CORE)) { | |
1091 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1092 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); | |
1093 | } | |
1094 | ||
1095 | /* | |
1096 | * Consider generating a local corefile if the infrastructure is configured | |
1097 | * and we haven't disabled on-device coredumps. | |
1098 | */ | |
1099 | if (on_device_corefile_enabled()) { | |
1100 | if (!kdp_has_polled_corefile()) { | |
1101 | if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) { | |
1102 | paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)\n", | |
1103 | kdp_polled_corefile_error()); | |
1104 | #if defined(__arm__) || defined(__arm64__) | |
1105 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
1106 | paniclog_flush(); | |
1107 | #else /* defined(__arm__) || defined(__arm64__) */ | |
1108 | if (panic_info->mph_panic_log_offset != 0) { | |
1109 | panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
1110 | paniclog_flush(); | |
1111 | } | |
1112 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
1113 | } | |
1114 | } | |
1115 | #if XNU_MONITOR | |
1116 | else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) { | |
1117 | paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state\n"); | |
1118 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
1119 | paniclog_flush(); | |
1120 | } | |
1121 | #endif /* XNU_MONITOR */ | |
1122 | else { | |
1123 | int ret = -1; | |
1124 | ||
1125 | #if defined (__x86_64__) | |
1126 | /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */ | |
1127 | if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI)) | |
1128 | #endif | |
1129 | { | |
1130 | /* | |
1131 | * Doing an on-device coredump leaves the disk driver in a state | |
1132 | * that can not be resumed. | |
1133 | */ | |
1134 | debugger_safe_to_return = FALSE; | |
1135 | begin_panic_transfer(); | |
1136 | ret = kern_dump(KERN_DUMP_DISK); | |
1137 | abort_panic_transfer(); | |
1138 | ||
1139 | #if DEVELOPMENT || DEBUG | |
1140 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE)); | |
1141 | #endif | |
1142 | } | |
1143 | ||
1144 | /* | |
1145 | * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved | |
1146 | * or if option to ignore failures is set. | |
1147 | */ | |
1148 | if ((debug_boot_arg & DB_REBOOT_POST_CORE) && | |
1149 | ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) { | |
1150 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1151 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); | |
1152 | } | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | if (debugger_current_op == DBOP_PANIC || | |
1157 | ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
1158 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1159 | } | |
1160 | ||
1161 | if (debug_boot_arg & DB_REBOOT_ALWAYS) { | |
1162 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); | |
1163 | } | |
1164 | ||
1165 | /* If KDP is configured, try to trap to the debugger */ | |
1166 | #if defined(__arm__) || defined(__arm64__) | |
1167 | if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) { | |
1168 | #else | |
1169 | if (current_debugger != NO_CUR_DB) { | |
1170 | #endif | |
1171 | kdp_raise_exception(exception, code, subcode, state); | |
1172 | /* | |
1173 | * Only return if we entered via Debugger and it's safe to return | |
1174 | * (we halted the other cores successfully, this isn't a nested panic, etc) | |
1175 | */ | |
1176 | if (debugger_current_op == DBOP_DEBUGGER && | |
1177 | debugger_safe_to_return && | |
1178 | kernel_debugger_entry_count == 1 && | |
1179 | !debugger_is_panic) { | |
1180 | return; | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | #if defined(__arm__) || defined(__arm64__) | |
1185 | if (PE_i_can_has_debugger(NULL) && panicDebugging) { | |
1186 | /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */ | |
1187 | panic_spin_shmcon(); | |
1188 | } | |
1189 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
1190 | ||
1191 | #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
1192 | ||
1193 | PEHaltRestart(kPEPanicDiagnosticsDone); | |
1194 | ||
1195 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
1196 | ||
1197 | if (!panicDebugging) { | |
1198 | kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options); | |
1199 | } | |
1200 | ||
1201 | panic_spin_forever(); | |
1202 | } | |
1203 | ||
1204 | #if INTERRUPT_MASKED_DEBUG | |
1205 | uint64_t debugger_trap_timestamps[9]; | |
1206 | # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time(); | |
1207 | #else | |
1208 | # define DEBUGGER_TRAP_TIMESTAMP(i) | |
1209 | #endif | |
1210 | ||
1211 | void | |
1212 | handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state) | |
1213 | { | |
1214 | unsigned int initial_not_in_kdp = not_in_kdp; | |
1215 | kern_return_t ret; | |
1216 | debugger_op db_prev_op = debugger_current_op; | |
1217 | ||
1218 | DEBUGGER_TRAP_TIMESTAMP(0); | |
1219 | ||
1220 | DebuggerLock(); | |
1221 | ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC); | |
1222 | ||
1223 | DEBUGGER_TRAP_TIMESTAMP(1); | |
1224 | ||
1225 | #if INTERRUPT_MASKED_DEBUG | |
1226 | if (serialmode & SERIALMODE_OUTPUT) { | |
1227 | ml_spin_debug_reset(current_thread()); | |
1228 | } | |
1229 | #endif | |
1230 | if (ret != KERN_SUCCESS) { | |
1231 | CPUDEBUGGERRET = ret; | |
1232 | DebuggerUnlock(); | |
1233 | return; | |
1234 | } | |
1235 | ||
1236 | /* Update the global panic/debugger nested entry level */ | |
1237 | kernel_debugger_entry_count = CPUDEBUGGERCOUNT; | |
1238 | if (kernel_debugger_entry_count > 0) { | |
1239 | console_suspend(); | |
1240 | } | |
1241 | ||
1242 | /* | |
1243 | * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice | |
1244 | * should we call into the debugger if it's configured and then reboot if the panic log has been written? | |
1245 | */ | |
1246 | ||
1247 | if (CPUDEBUGGEROP == DBOP_NONE) { | |
1248 | /* If there was no debugger context setup, we trapped due to a software breakpoint */ | |
1249 | debugger_current_op = DBOP_BREAKPOINT; | |
1250 | } else { | |
1251 | /* Not safe to return from a nested panic/debugger call */ | |
1252 | if (debugger_current_op == DBOP_PANIC || | |
1253 | debugger_current_op == DBOP_DEBUGGER) { | |
1254 | debugger_safe_to_return = FALSE; | |
1255 | } | |
1256 | ||
1257 | debugger_current_op = CPUDEBUGGEROP; | |
1258 | ||
1259 | /* Only overwrite the panic message if there is none already - save the data from the first call */ | |
1260 | if (debugger_panic_str == NULL) { | |
1261 | debugger_panic_str = CPUPANICSTR; | |
1262 | debugger_panic_args = CPUPANICARGS; | |
1263 | debugger_panic_data = CPUPANICDATAPTR; | |
1264 | debugger_message = CPUDEBUGGERMSG; | |
1265 | debugger_panic_caller = CPUPANICCALLER; | |
1266 | } | |
1267 | ||
1268 | debugger_panic_options = CPUPANICOPTS; | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * Clear the op from the processor debugger context so we can handle | |
1273 | * breakpoints in the debugger | |
1274 | */ | |
1275 | CPUDEBUGGEROP = DBOP_NONE; | |
1276 | ||
1277 | DEBUGGER_TRAP_TIMESTAMP(2); | |
1278 | ||
1279 | kdp_callouts(KDP_EVENT_ENTER); | |
1280 | not_in_kdp = 0; | |
1281 | ||
1282 | DEBUGGER_TRAP_TIMESTAMP(3); | |
1283 | ||
1284 | if (debugger_current_op == DBOP_BREAKPOINT) { | |
1285 | kdp_raise_exception(exception, code, subcode, state); | |
1286 | } else if (debugger_current_op == DBOP_STACKSHOT) { | |
1287 | CPUDEBUGGERRET = do_stackshot(); | |
1288 | #if PGO | |
1289 | } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) { | |
1290 | CPUDEBUGGERRET = do_pgo_reset_counters(); | |
1291 | #endif | |
1292 | } else { | |
1293 | debugger_collect_diagnostics(exception, code, subcode, state); | |
1294 | } | |
1295 | ||
1296 | DEBUGGER_TRAP_TIMESTAMP(4); | |
1297 | ||
1298 | not_in_kdp = initial_not_in_kdp; | |
1299 | kdp_callouts(KDP_EVENT_EXIT); | |
1300 | ||
1301 | DEBUGGER_TRAP_TIMESTAMP(5); | |
1302 | ||
1303 | if (debugger_current_op != DBOP_BREAKPOINT) { | |
1304 | debugger_panic_str = NULL; | |
1305 | debugger_panic_args = NULL; | |
1306 | debugger_panic_data = NULL; | |
1307 | debugger_panic_options = 0; | |
1308 | debugger_message = NULL; | |
1309 | } | |
1310 | ||
1311 | /* Restore the previous debugger state */ | |
1312 | debugger_current_op = db_prev_op; | |
1313 | ||
1314 | DEBUGGER_TRAP_TIMESTAMP(6); | |
1315 | ||
1316 | DebuggerResumeOtherCores(); | |
1317 | ||
1318 | DEBUGGER_TRAP_TIMESTAMP(7); | |
1319 | ||
1320 | DebuggerUnlock(); | |
1321 | ||
1322 | DEBUGGER_TRAP_TIMESTAMP(8); | |
1323 | ||
1324 | return; | |
1325 | } | |
1326 | ||
1327 | __attribute__((noinline, not_tail_called)) | |
1328 | void | |
1329 | log(__unused int level, char *fmt, ...) | |
1330 | { | |
1331 | void *caller = __builtin_return_address(0); | |
1332 | va_list listp; | |
1333 | va_list listp2; | |
1334 | ||
1335 | ||
1336 | #ifdef lint | |
1337 | level++; | |
1338 | #endif /* lint */ | |
1339 | #ifdef MACH_BSD | |
1340 | va_start(listp, fmt); | |
1341 | va_copy(listp2, listp); | |
1342 | ||
1343 | disable_preemption(); | |
1344 | _doprnt(fmt, &listp, cons_putc_locked, 0); | |
1345 | enable_preemption(); | |
1346 | ||
1347 | va_end(listp); | |
1348 | ||
1349 | os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); | |
1350 | va_end(listp2); | |
1351 | #endif | |
1352 | } | |
1353 | ||
1354 | /* | |
1355 | * Per <rdar://problem/24974766>, skip appending log messages to | |
1356 | * the new logging infrastructure in contexts where safety is | |
1357 | * uncertain. These contexts include: | |
1358 | * - When we're in the debugger | |
1359 | * - We're in a panic | |
1360 | * - Interrupts are disabled | |
1361 | * - Or Pre-emption is disabled | |
1362 | * In all the above cases, it is potentially unsafe to log messages. | |
1363 | */ | |
1364 | ||
1365 | boolean_t | |
1366 | oslog_is_safe(void) | |
1367 | { | |
1368 | return kernel_debugger_entry_count == 0 && | |
1369 | not_in_kdp == 1 && | |
1370 | get_preemption_level() == 0 && | |
1371 | ml_get_interrupts_enabled() == TRUE; | |
1372 | } | |
1373 | ||
1374 | boolean_t | |
1375 | debug_mode_active(void) | |
1376 | { | |
1377 | return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp); | |
1378 | } | |
1379 | ||
1380 | void | |
1381 | debug_putc(char c) | |
1382 | { | |
1383 | if ((debug_buf_size != 0) && | |
1384 | ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) { | |
1385 | *debug_buf_ptr = c; | |
1386 | debug_buf_ptr++; | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | #if defined (__x86_64__) | |
1391 | struct pasc { | |
1392 | unsigned a: 7; | |
1393 | unsigned b: 7; | |
1394 | unsigned c: 7; | |
1395 | unsigned d: 7; | |
1396 | unsigned e: 7; | |
1397 | unsigned f: 7; | |
1398 | unsigned g: 7; | |
1399 | unsigned h: 7; | |
1400 | } __attribute__((packed)); | |
1401 | ||
1402 | typedef struct pasc pasc_t; | |
1403 | ||
1404 | /* | |
1405 | * In-place packing routines -- inefficient, but they're called at most once. | |
1406 | * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86. | |
1407 | */ | |
1408 | int | |
1409 | packA(char *inbuf, uint32_t length, uint32_t buflen) | |
1410 | { | |
1411 | unsigned int i, j = 0; | |
1412 | pasc_t pack; | |
1413 | ||
1414 | length = MIN(((length + 7) & ~7), buflen); | |
1415 | ||
1416 | for (i = 0; i < length; i += 8) { | |
1417 | pack.a = inbuf[i]; | |
1418 | pack.b = inbuf[i + 1]; | |
1419 | pack.c = inbuf[i + 2]; | |
1420 | pack.d = inbuf[i + 3]; | |
1421 | pack.e = inbuf[i + 4]; | |
1422 | pack.f = inbuf[i + 5]; | |
1423 | pack.g = inbuf[i + 6]; | |
1424 | pack.h = inbuf[i + 7]; | |
1425 | bcopy((char *) &pack, inbuf + j, 7); | |
1426 | j += 7; | |
1427 | } | |
1428 | return j; | |
1429 | } | |
1430 | ||
1431 | void | |
1432 | unpackA(char *inbuf, uint32_t length) | |
1433 | { | |
1434 | pasc_t packs; | |
1435 | unsigned i = 0; | |
1436 | length = (length * 8) / 7; | |
1437 | ||
1438 | while (i < length) { | |
1439 | packs = *(pasc_t *)&inbuf[i]; | |
1440 | bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8))); | |
1441 | inbuf[i++] = packs.a; | |
1442 | inbuf[i++] = packs.b; | |
1443 | inbuf[i++] = packs.c; | |
1444 | inbuf[i++] = packs.d; | |
1445 | inbuf[i++] = packs.e; | |
1446 | inbuf[i++] = packs.f; | |
1447 | inbuf[i++] = packs.g; | |
1448 | inbuf[i++] = packs.h; | |
1449 | } | |
1450 | } | |
1451 | #endif /* defined (__x86_64__) */ | |
1452 | ||
1453 | extern char *proc_name_address(void *); | |
1454 | extern char *proc_longname_address(void *); | |
1455 | ||
1456 | __private_extern__ void | |
1457 | panic_display_process_name(void) | |
1458 | { | |
1459 | proc_name_t proc_name = {}; | |
1460 | task_t ctask = 0; | |
1461 | void *cbsd_info = 0; | |
1462 | vm_size_t size; | |
1463 | ||
1464 | size = ml_nofault_copy((vm_offset_t)¤t_thread()->task, | |
1465 | (vm_offset_t)&ctask, sizeof(task_t)); | |
1466 | if (size != sizeof(task_t)) { | |
1467 | goto out; | |
1468 | } | |
1469 | ||
1470 | size = ml_nofault_copy((vm_offset_t)&ctask->bsd_info, | |
1471 | (vm_offset_t)&cbsd_info, sizeof(cbsd_info)); | |
1472 | if (size != sizeof(cbsd_info)) { | |
1473 | goto out; | |
1474 | } | |
1475 | ||
1476 | if (cbsd_info == NULL) { | |
1477 | goto out; | |
1478 | } | |
1479 | ||
1480 | size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info), | |
1481 | (vm_offset_t)&proc_name, sizeof(proc_name)); | |
1482 | ||
1483 | if (size == 0 || proc_name[0] == '\0') { | |
1484 | size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info), | |
1485 | (vm_offset_t)&proc_name, | |
1486 | MIN(sizeof(command_t), sizeof(proc_name))); | |
1487 | if (size > 0) { | |
1488 | proc_name[size - 1] = '\0'; | |
1489 | } | |
1490 | } | |
1491 | ||
1492 | out: | |
1493 | proc_name[sizeof(proc_name) - 1] = '\0'; | |
1494 | paniclog_append_noflush("\nProcess name corresponding to current thread: %s\n", | |
1495 | proc_name[0] != '\0' ? proc_name : "Unknown"); | |
1496 | } | |
1497 | ||
1498 | unsigned | |
1499 | panic_active(void) | |
1500 | { | |
1501 | return debugger_panic_str != (char *) 0; | |
1502 | } | |
1503 | ||
1504 | void | |
1505 | populate_model_name(char *model_string) | |
1506 | { | |
1507 | strlcpy(model_name, model_string, sizeof(model_name)); | |
1508 | } | |
1509 | ||
1510 | void | |
1511 | panic_display_model_name(void) | |
1512 | { | |
1513 | char tmp_model_name[sizeof(model_name)]; | |
1514 | ||
1515 | if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) { | |
1516 | return; | |
1517 | } | |
1518 | ||
1519 | tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; | |
1520 | ||
1521 | if (tmp_model_name[0] != 0) { | |
1522 | paniclog_append_noflush("System model name: %s\n", tmp_model_name); | |
1523 | } | |
1524 | } | |
1525 | ||
1526 | void | |
1527 | panic_display_kernel_uuid(void) | |
1528 | { | |
1529 | char tmp_kernel_uuid[sizeof(kernel_uuid_string)]; | |
1530 | ||
1531 | if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) { | |
1532 | return; | |
1533 | } | |
1534 | ||
1535 | if (tmp_kernel_uuid[0] != '\0') { | |
1536 | paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid); | |
1537 | } | |
1538 | } | |
1539 | ||
1540 | void | |
1541 | panic_display_kernel_aslr(void) | |
1542 | { | |
1543 | kc_format_t kc_format; | |
1544 | ||
1545 | PE_get_primary_kc_format(&kc_format); | |
1546 | ||
1547 | if (kc_format == KCFormatFileset) { | |
1548 | void *kch = PE_get_kc_header(KCKindPrimary); | |
1549 | ||
1550 | paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); | |
1551 | paniclog_append_noflush("KernelCache base: %p\n", (void*) kch); | |
1552 | paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide); | |
1553 | } else if (vm_kernel_slide) { | |
1554 | paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); | |
1555 | } | |
1556 | paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); | |
1557 | #if defined(__arm64__) | |
1558 | if (kc_format == KCFormatFileset) { | |
1559 | extern vm_offset_t segTEXTEXECB; | |
1560 | paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB); | |
1561 | } | |
1562 | #endif | |
1563 | } | |
1564 | ||
1565 | void | |
1566 | panic_display_hibb(void) | |
1567 | { | |
1568 | #if defined(__i386__) || defined (__x86_64__) | |
1569 | paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); | |
1570 | #endif | |
1571 | } | |
1572 | ||
1573 | extern unsigned int stack_total; | |
1574 | extern unsigned long long stack_allocs; | |
1575 | ||
1576 | #if defined (__x86_64__) | |
1577 | extern unsigned int inuse_ptepages_count; | |
1578 | extern long long alloc_ptepages_count; | |
1579 | #endif | |
1580 | ||
1581 | __private_extern__ void | |
1582 | panic_display_zprint(void) | |
1583 | { | |
1584 | if (panic_include_zprint == TRUE) { | |
1585 | struct zone zone_copy; | |
1586 | ||
1587 | paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); | |
1588 | zone_index_foreach(i) { | |
1589 | if (ml_nofault_copy((vm_offset_t)&zone_array[i], | |
1590 | (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { | |
1591 | if (zone_copy.z_wired_cur > atop(1024 * 1024)) { | |
1592 | paniclog_append_noflush("%-8s%-20s %10llu %10lu\n", | |
1593 | zone_heap_name(&zone_copy), | |
1594 | zone_copy.z_name, (uint64_t)zone_size_wired(&zone_copy), | |
1595 | (uintptr_t)zone_size_free(&zone_copy)); | |
1596 | } | |
1597 | } | |
1598 | } | |
1599 | ||
1600 | paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", | |
1601 | (uintptr_t)(kernel_stack_size * stack_total)); | |
1602 | #if defined (__x86_64__) | |
1603 | paniclog_append_noflush("%-20s %10lu\n", "PageTables", | |
1604 | (uintptr_t)ptoa(inuse_ptepages_count)); | |
1605 | #endif | |
1606 | paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", | |
1607 | (uintptr_t)kalloc_large_total); | |
1608 | ||
1609 | if (panic_kext_memory_info) { | |
1610 | mach_memory_info_t *mem_info = panic_kext_memory_info; | |
1611 | paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size"); | |
1612 | for (uint32_t i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) { | |
1613 | if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && | |
1614 | (mem_info[i].size > (1024 * 1024))) { | |
1615 | paniclog_append_noflush("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size); | |
1616 | } | |
1617 | } | |
1618 | } | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | #if CONFIG_ECC_LOGGING | |
1623 | __private_extern__ void | |
1624 | panic_display_ecc_errors(void) | |
1625 | { | |
1626 | uint32_t count = ecc_log_get_correction_count(); | |
1627 | ||
1628 | if (count > 0) { | |
1629 | paniclog_append_noflush("ECC Corrections:%u\n", count); | |
1630 | } | |
1631 | } | |
1632 | #endif /* CONFIG_ECC_LOGGING */ | |
1633 | ||
1634 | #if CONFIG_ZLEAKS | |
1635 | void panic_print_symbol_name(vm_address_t search); | |
1636 | ||
1637 | /* | |
1638 | * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. | |
1639 | * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c | |
1640 | */ | |
1641 | __private_extern__ void | |
1642 | panic_display_ztrace(void) | |
1643 | { | |
1644 | if (panic_include_ztrace == TRUE) { | |
1645 | unsigned int i = 0; | |
1646 | boolean_t keepsyms = FALSE; | |
1647 | ||
1648 | PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms)); | |
1649 | struct ztrace top_ztrace_copy; | |
1650 | ||
1651 | /* Make sure not to trip another panic if there's something wrong with memory */ | |
1652 | if (ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { | |
1653 | paniclog_append_noflush("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); | |
1654 | /* Print the backtrace addresses */ | |
1655 | for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH); i++) { | |
1656 | paniclog_append_noflush("%p ", top_ztrace_copy.zt_stack[i]); | |
1657 | if (keepsyms) { | |
1658 | panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); | |
1659 | } | |
1660 | paniclog_append_noflush("\n"); | |
1661 | } | |
1662 | /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ | |
1663 | kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); | |
1664 | } else { | |
1665 | paniclog_append_noflush("\nCan't access top_ztrace...\n"); | |
1666 | } | |
1667 | paniclog_append_noflush("\n"); | |
1668 | } | |
1669 | } | |
1670 | #endif /* CONFIG_ZLEAKS */ | |
1671 | ||
1672 | #if !CONFIG_TELEMETRY | |
1673 | int | |
1674 | telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean_t mark __unused) | |
1675 | { | |
1676 | return KERN_NOT_SUPPORTED; | |
1677 | } | |
1678 | #endif | |
1679 | ||
1680 | #include <machine/machine_cpu.h> | |
1681 | ||
1682 | uint32_t kern_feature_overrides = 0; | |
1683 | ||
1684 | boolean_t | |
1685 | kern_feature_override(uint32_t fmask) | |
1686 | { | |
1687 | if (kern_feature_overrides == 0) { | |
1688 | uint32_t fdisables = 0; | |
1689 | /* | |
1690 | * Expected to be first invoked early, in a single-threaded | |
1691 | * environment | |
1692 | */ | |
1693 | if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) { | |
1694 | fdisables |= KF_INITIALIZED; | |
1695 | kern_feature_overrides = fdisables; | |
1696 | } else { | |
1697 | kern_feature_overrides |= KF_INITIALIZED; | |
1698 | } | |
1699 | } | |
1700 | return (kern_feature_overrides & fmask) == fmask; | |
1701 | } | |
1702 | ||
1703 | boolean_t | |
1704 | on_device_corefile_enabled(void) | |
1705 | { | |
1706 | assert(startup_phase >= STARTUP_SUB_TUNABLES); | |
1707 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING | |
1708 | if (debug_boot_arg == 0) { | |
1709 | return FALSE; | |
1710 | } | |
1711 | if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) { | |
1712 | return FALSE; | |
1713 | } | |
1714 | #if !XNU_TARGET_OS_OSX | |
1715 | /* | |
1716 | * outside of macOS, if there's a debug boot-arg set and local | |
1717 | * cores aren't explicitly disabled, we always write a corefile. | |
1718 | */ | |
1719 | return TRUE; | |
1720 | #else /* !XNU_TARGET_OS_OSX */ | |
1721 | /* | |
1722 | * on macOS, if corefiles on panic are requested and local cores | |
1723 | * aren't disabled we write a local core. | |
1724 | */ | |
1725 | if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) { | |
1726 | return TRUE; | |
1727 | } | |
1728 | #endif /* !XNU_TARGET_OS_OSX */ | |
1729 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
1730 | return FALSE; | |
1731 | } | |
1732 | ||
1733 | boolean_t | |
1734 | panic_stackshot_to_disk_enabled(void) | |
1735 | { | |
1736 | assert(startup_phase >= STARTUP_SUB_TUNABLES); | |
1737 | #if defined(__x86_64__) | |
1738 | if (PEGetCoprocessorVersion() < kCoprocessorVersion2) { | |
1739 | /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */ | |
1740 | if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) { | |
1741 | return FALSE; | |
1742 | } | |
1743 | ||
1744 | return TRUE; | |
1745 | } | |
1746 | #endif | |
1747 | return FALSE; | |
1748 | } | |
1749 | ||
1750 | #if DEBUG || DEVELOPMENT | |
1751 | const char * | |
1752 | sysctl_debug_get_preoslog(size_t *size) | |
1753 | { | |
1754 | int result = 0; | |
1755 | void *preoslog_pa = NULL; | |
1756 | int preoslog_size = 0; | |
1757 | ||
1758 | result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size); | |
1759 | if (result || preoslog_pa == NULL || preoslog_size == 0) { | |
1760 | kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size); | |
1761 | *size = 0; | |
1762 | return NULL; | |
1763 | } | |
1764 | ||
1765 | /* | |
1766 | * Beware: | |
1767 | * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer. | |
1768 | * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. | |
1769 | */ | |
1770 | *size = preoslog_size; | |
1771 | return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa))); | |
1772 | } | |
1773 | #endif /* DEBUG || DEVELOPMENT */ |