]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39037602 | 2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <mach_assert.h> | |
1c79356b | 58 | #include <mach_kdp.h> |
5ba3f43e A |
59 | #include <kdp/kdp.h> |
60 | #include <kdp/kdp_core.h> | |
61 | #include <kdp/kdp_internal.h> | |
62 | #include <kdp/kdp_callout.h> | |
1c79356b | 63 | #include <kern/cpu_number.h> |
b0d623f7 | 64 | #include <kern/kalloc.h> |
1c79356b A |
65 | #include <kern/spl.h> |
66 | #include <kern/thread.h> | |
67 | #include <kern/assert.h> | |
68 | #include <kern/sched_prim.h> | |
69 | #include <kern/misc_protos.h> | |
c910b4d9 | 70 | #include <kern/clock.h> |
39236c6e | 71 | #include <kern/telemetry.h> |
fe8ab488 | 72 | #include <kern/ecc.h> |
3e170ce0 | 73 | #include <kern/kern_cdata.h> |
39037602 | 74 | #include <kern/zalloc.h> |
9bccf70c | 75 | #include <vm/vm_kern.h> |
91447636 | 76 | #include <vm/pmap.h> |
1c79356b | 77 | #include <stdarg.h> |
5ba3f43e A |
78 | #include <stdatomic.h> |
79 | #include <sys/pgo.h> | |
80 | #include <console/serial_protos.h> | |
81 | ||
fe8ab488 | 82 | #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) |
2d21ac55 A |
83 | #include <kdp/kdp_udp.h> |
84 | #endif | |
5ba3f43e | 85 | #include <kern/processor.h> |
1c79356b | 86 | |
b0d623f7 | 87 | #if defined(__i386__) || defined(__x86_64__) |
2d21ac55 A |
88 | #include <i386/cpu_threads.h> |
89 | #include <i386/pmCPU.h> | |
90 | #endif | |
91 | ||
92 | #include <IOKit/IOPlatformExpert.h> | |
6d2010ae | 93 | #include <machine/pal_routines.h> |
2d21ac55 | 94 | |
b0d623f7 A |
95 | #include <sys/kdebug.h> |
96 | #include <libkern/OSKextLibPrivate.h> | |
6d2010ae A |
97 | #include <libkern/OSAtomic.h> |
98 | #include <libkern/kernel_mach_header.h> | |
cc8bc92a | 99 | #include <libkern/section_keywords.h> |
6d2010ae | 100 | #include <uuid/uuid.h> |
3e170ce0 | 101 | #include <mach_debug/zone_info.h> |
b0d623f7 | 102 | |
39037602 A |
103 | #include <os/log_private.h> |
104 | ||
5ba3f43e | 105 | #if CONFIG_EMBEDDED |
fe8ab488 | 106 | #include <pexpert/pexpert.h> /* For gPanicBase */ |
5ba3f43e A |
107 | #include <arm/caches_internal.h> |
108 | #include <arm/misc_protos.h> | |
109 | extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info; | |
fe8ab488 A |
110 | #endif |
111 | ||
d9a64523 A |
112 | #if CONFIG_XNUPOST |
113 | #include <tests/xnupost.h> | |
114 | extern int vsnprintf(char *, size_t, const char *, va_list); | |
115 | #endif | |
39037602 | 116 | |
0a7de745 A |
117 | unsigned int halt_in_debugger = 0; |
118 | unsigned int current_debugger = 0; | |
119 | unsigned int active_debugger = 0; | |
120 | unsigned int panicDebugging = FALSE; | |
121 | unsigned int kdebug_serial = FALSE; | |
122 | unsigned int kernel_debugger_entry_count = 0; | |
5ba3f43e | 123 | |
0a7de745 A |
124 | #if !defined (__x86_64__) |
125 | struct additional_panic_data_buffer *panic_data_buffers = NULL; | |
126 | #endif | |
5ba3f43e A |
127 | |
128 | #if defined(__arm__) | |
129 | #define TRAP_DEBUGGER __asm__ volatile("trap") | |
130 | #elif defined(__arm64__) | |
131 | /* | |
132 | * Magic number; this should be identical to the __arm__ encoding for trap. | |
133 | */ | |
134 | #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff") | |
135 | #elif defined (__x86_64__) | |
136 | #define TRAP_DEBUGGER __asm__("int3") | |
137 | #else | |
138 | #error No TRAP_DEBUGGER for this architecture | |
139 | #endif | |
140 | ||
141 | #if defined(__i386__) || defined(__x86_64__) | |
0a7de745 | 142 | #define panic_stop() pmCPUHalt(PM_HALT_PANIC) |
5ba3f43e | 143 | #else |
0a7de745 | 144 | #define panic_stop() panic_spin_forever() |
5ba3f43e A |
145 | #endif |
146 | ||
147 | #define CPUDEBUGGEROP PROCESSOR_DATA(current_processor(), debugger_state).db_current_op | |
148 | #define CPUDEBUGGERMSG PROCESSOR_DATA(current_processor(), debugger_state).db_message | |
149 | #define CPUPANICSTR PROCESSOR_DATA(current_processor(), debugger_state).db_panic_str | |
150 | #define CPUPANICARGS PROCESSOR_DATA(current_processor(), debugger_state).db_panic_args | |
151 | #define CPUPANICOPTS PROCESSOR_DATA(current_processor(), debugger_state).db_panic_options | |
d9a64523 | 152 | #define CPUPANICDATAPTR PROCESSOR_DATA(current_processor(), debugger_state).db_panic_data_ptr |
5ba3f43e A |
153 | #define CPUDEBUGGERSYNC PROCESSOR_DATA(current_processor(), debugger_state).db_proceed_on_sync_failure |
154 | #define CPUDEBUGGERCOUNT PROCESSOR_DATA(current_processor(), debugger_state).db_entry_count | |
155 | #define CPUDEBUGGERRET PROCESSOR_DATA(current_processor(), debugger_state).db_op_return | |
156 | #define CPUPANICCALLER PROCESSOR_DATA(current_processor(), debugger_state).db_panic_caller | |
157 | ||
158 | #if DEVELOPMENT || DEBUG | |
0a7de745 A |
159 | #define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ |
160 | MACRO_BEGIN \ | |
161 | if (requested) { \ | |
162 | volatile int *badpointer = (int *)4; \ | |
163 | *badpointer = 0; \ | |
164 | } \ | |
5ba3f43e A |
165 | MACRO_END |
166 | #endif /* DEVELOPMENT || DEBUG */ | |
167 | ||
168 | debugger_op debugger_current_op = DBOP_NONE; | |
169 | const char *debugger_panic_str = NULL; | |
170 | va_list *debugger_panic_args = NULL; | |
d9a64523 | 171 | void *debugger_panic_data = NULL; |
5ba3f43e A |
172 | uint64_t debugger_panic_options = 0; |
173 | const char *debugger_message = NULL; | |
174 | unsigned long debugger_panic_caller = 0; | |
175 | ||
d9a64523 | 176 | void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, |
0a7de745 | 177 | uint64_t panic_options_mask, void *panic_data, unsigned long panic_caller); |
5ba3f43e A |
178 | static void kdp_machine_reboot_type(unsigned int type); |
179 | __attribute__((noreturn)) void panic_spin_forever(void); | |
180 | extern kern_return_t do_stackshot(void); | |
1c79356b A |
181 | |
182 | int mach_assert = 1; | |
183 | ||
5ba3f43e | 184 | #define NESTEDDEBUGGERENTRYMAX 5 |
1c79356b | 185 | |
5ba3f43e A |
186 | #if CONFIG_EMBEDDED |
187 | #define DEBUG_BUF_SIZE (4096) | |
188 | #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace" | |
189 | #else | |
190 | /* | |
cc8bc92a | 191 | * EXTENDED_/DEBUG_BUF_SIZE can't grow without updates to SMC and iBoot to store larger panic logs on co-processor systems */ |
5ba3f43e | 192 | #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data)) |
cc8bc92a | 193 | #define EXTENDED_DEBUG_BUF_SIZE 0x0013ff80 |
5c9f4661 | 194 | static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements"); |
5ba3f43e A |
195 | #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace" |
196 | #endif | |
c910b4d9 | 197 | |
5ba3f43e A |
198 | /* debug_buf is directly linked with iBoot panic region for embedded targets */ |
199 | #if CONFIG_EMBEDDED | |
200 | char *debug_buf_base = NULL; | |
fe8ab488 A |
201 | char *debug_buf_ptr = NULL; |
202 | unsigned int debug_buf_size = 0; | |
203 | #else | |
c910b4d9 | 204 | char debug_buf[DEBUG_BUF_SIZE]; |
5ba3f43e A |
205 | struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf; |
206 | char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
207 | char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
208 | ||
209 | /* | |
210 | * We don't include the size of the panic header in the length of the data we actually write. | |
211 | * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of | |
212 | * the end of the log because we only support writing (3*PAGESIZE) bytes. | |
213 | */ | |
cc8bc92a A |
214 | unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); |
215 | ||
216 | boolean_t extended_debug_log_enabled = FALSE; | |
fe8ab488 | 217 | #endif |
2d21ac55 | 218 | |
5ba3f43e A |
219 | /* Debugger state */ |
220 | atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU); | |
221 | boolean_t debugger_allcpus_halted = FALSE; | |
222 | boolean_t debugger_safe_to_return = TRUE; | |
223 | unsigned int debugger_context = 0; | |
39037602 | 224 | |
2d21ac55 | 225 | static char model_name[64]; |
39236c6e | 226 | unsigned char *kernel_uuid; |
2d21ac55 | 227 | |
d9a64523 A |
228 | boolean_t kernelcache_uuid_valid = FALSE; |
229 | uuid_t kernelcache_uuid; | |
230 | uuid_string_t kernelcache_uuid_string; | |
231 | ||
5ba3f43e A |
232 | /* |
233 | * By default we treat Debugger() the same as calls to panic(), unless | |
234 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
235 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
236 | * | |
237 | * Return from Debugger() is currently only implemented on x86 | |
238 | */ | |
239 | static boolean_t debugger_is_panic = TRUE; | |
316670eb | 240 | |
cc8bc92a A |
241 | #if DEVELOPMENT || DEBUG |
242 | boolean_t debug_boot_arg_inited = FALSE; | |
243 | #endif | |
244 | ||
245 | SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg; | |
2d21ac55 | 246 | |
5ba3f43e | 247 | char kernel_uuid_string[37]; /* uuid_string_t */ |
d9a64523 | 248 | char kernelcache_uuid_string[37]; /* uuid_string_t */ |
5ba3f43e A |
249 | char panic_disk_error_description[512]; |
250 | size_t panic_disk_error_description_size = sizeof(panic_disk_error_description); | |
9bccf70c | 251 | |
5ba3f43e | 252 | extern unsigned int write_trace_on_panic; |
39037602 A |
253 | int kext_assertions_enable = |
254 | #if DEBUG || DEVELOPMENT | |
0a7de745 | 255 | TRUE; |
39037602 | 256 | #else |
0a7de745 | 257 | FALSE; |
39037602 A |
258 | #endif |
259 | ||
1c79356b A |
260 | void |
261 | panic_init(void) | |
262 | { | |
6d2010ae A |
263 | unsigned long uuidlen = 0; |
264 | void *uuid; | |
265 | ||
266 | uuid = getuuidfromheader(&_mh_execute_header, &uuidlen); | |
267 | if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) { | |
39236c6e A |
268 | kernel_uuid = uuid; |
269 | uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string); | |
6d2010ae A |
270 | } |
271 | ||
3e170ce0 A |
272 | if (!PE_parse_boot_argn("assertions", &mach_assert, sizeof(mach_assert))) { |
273 | mach_assert = 1; | |
274 | } | |
5ba3f43e | 275 | |
cc8bc92a A |
276 | /* |
277 | * Initialize the value of the debug boot-arg | |
278 | */ | |
279 | debug_boot_arg = 0; | |
280 | #if ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) | |
0a7de745 | 281 | if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof(debug_boot_arg))) { |
cc8bc92a A |
282 | #if DEVELOPMENT || DEBUG |
283 | if (debug_boot_arg & DB_HALT) { | |
0a7de745 | 284 | halt_in_debugger = 1; |
cc8bc92a A |
285 | } |
286 | #endif | |
5ba3f43e | 287 | |
cc8bc92a A |
288 | #if CONFIG_EMBEDDED |
289 | if (debug_boot_arg & DB_NMI) { | |
290 | panicDebugging = TRUE; | |
291 | } | |
292 | #else | |
293 | panicDebugging = TRUE; | |
294 | #if KDEBUG_MOJO_TRACE | |
295 | if (debug_boot_arg & DB_PRT_KDEBUG) { | |
296 | kdebug_serial = TRUE; | |
297 | } | |
298 | #endif | |
299 | #endif /* CONFIG_EMBEDDED */ | |
300 | } | |
301 | #endif /* ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) */ | |
302 | ||
303 | #if DEVELOPMENT || DEBUG | |
304 | debug_boot_arg_inited = TRUE; | |
305 | #endif | |
306 | ||
307 | #if !CONFIG_EMBEDDED | |
308 | /* | |
309 | * By default we treat Debugger() the same as calls to panic(), unless | |
310 | * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. | |
311 | * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported. | |
312 | * This is because writing an on-device corefile is a destructive operation. | |
313 | * | |
314 | * Return from Debugger() is currently only implemented on x86 | |
315 | */ | |
316 | if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) { | |
5ba3f43e A |
317 | debugger_is_panic = FALSE; |
318 | } | |
319 | #endif | |
1c79356b A |
320 | } |
321 | ||
cc8bc92a A |
322 | #if defined (__x86_64__) |
323 | void | |
324 | extended_debug_log_init(void) | |
325 | { | |
326 | assert(coprocessor_paniclog_flush); | |
327 | /* | |
328 | * Allocate an extended panic log buffer that has space for the panic | |
329 | * stackshot at the end. Update the debug buf pointers appropriately | |
330 | * to point at this new buffer. | |
331 | */ | |
332 | char *new_debug_buf = kalloc(EXTENDED_DEBUG_BUF_SIZE); | |
5c9f4661 A |
333 | /* |
334 | * iBoot pre-initializes the panic region with the NULL character. We set this here | |
335 | * so we can accurately calculate the CRC for the region without needing to flush the | |
336 | * full region over SMC. | |
337 | */ | |
338 | memset(new_debug_buf, '\0', EXTENDED_DEBUG_BUF_SIZE); | |
cc8bc92a A |
339 | |
340 | panic_info = (struct macos_panic_header *)new_debug_buf; | |
341 | debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data)); | |
342 | debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); | |
343 | ||
344 | extended_debug_log_enabled = TRUE; | |
345 | } | |
346 | #endif /* defined (__x86_64__) */ | |
347 | ||
2d21ac55 A |
348 | void |
349 | debug_log_init(void) | |
350 | { | |
5ba3f43e | 351 | #if CONFIG_EMBEDDED |
fe8ab488 A |
352 | if (!gPanicBase) { |
353 | printf("debug_log_init: Error!! gPanicBase is still not initialized\n"); | |
354 | return; | |
355 | } | |
5ba3f43e A |
356 | /* Shift debug buf start location and size by the length of the panic header */ |
357 | debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header); | |
358 | debug_buf_ptr = debug_buf_base; | |
359 | debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header); | |
fe8ab488 | 360 | #else |
cc8bc92a A |
361 | bzero(panic_info, DEBUG_BUF_SIZE); |
362 | ||
5ba3f43e A |
363 | assert(debug_buf_base != NULL); |
364 | assert(debug_buf_ptr != NULL); | |
365 | assert(debug_buf_size != 0); | |
fe8ab488 | 366 | #endif |
2d21ac55 A |
367 | } |
368 | ||
5ba3f43e A |
369 | static void |
370 | DebuggerLock() | |
371 | { | |
372 | int my_cpu = cpu_number(); | |
373 | int debugger_exp_cpu = DEBUGGER_NO_CPU; | |
374 | assert(ml_get_interrupts_enabled() == FALSE); | |
375 | ||
376 | if (debugger_cpu == my_cpu) { | |
377 | return; | |
378 | } | |
379 | ||
0a7de745 | 380 | while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) { |
5ba3f43e A |
381 | debugger_exp_cpu = DEBUGGER_NO_CPU; |
382 | } | |
383 | ||
384 | return; | |
385 | } | |
386 | ||
387 | static void | |
388 | DebuggerUnlock() | |
389 | { | |
390 | assert(debugger_cpu == cpu_number()); | |
391 | ||
392 | /* | |
393 | * We don't do an atomic exchange here in case | |
394 | * there's another CPU spinning to acquire the debugger_lock | |
395 | * and we never get a chance to update it. We already have the | |
396 | * lock so we can simply store DEBUGGER_NO_CPU and follow with | |
397 | * a barrier. | |
398 | */ | |
399 | debugger_cpu = DEBUGGER_NO_CPU; | |
400 | OSMemoryBarrier(); | |
401 | ||
402 | return; | |
403 | } | |
404 | ||
405 | static kern_return_t | |
406 | DebuggerHaltOtherCores(boolean_t proceed_on_failure) | |
407 | { | |
408 | #if CONFIG_EMBEDDED | |
409 | return DebuggerXCallEnter(proceed_on_failure); | |
410 | #else /* CONFIG_EMBEDDED */ | |
411 | #pragma unused(proceed_on_failure) | |
412 | mp_kdp_enter(proceed_on_failure); | |
413 | return KERN_SUCCESS; | |
2d21ac55 | 414 | #endif |
5ba3f43e A |
415 | } |
416 | ||
417 | static void | |
418 | DebuggerResumeOtherCores() | |
419 | { | |
420 | #if CONFIG_EMBEDDED | |
421 | DebuggerXCallReturn(); | |
422 | #else /* CONFIG_EMBEDDED */ | |
423 | mp_kdp_exit(); | |
424 | #endif | |
425 | } | |
426 | ||
427 | static void | |
428 | DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
0a7de745 A |
429 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, |
430 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
5ba3f43e A |
431 | { |
432 | CPUDEBUGGEROP = db_op; | |
433 | ||
434 | /* Preserve the original panic message */ | |
435 | if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) { | |
436 | CPUDEBUGGERMSG = db_message; | |
437 | CPUPANICSTR = db_panic_str; | |
438 | CPUPANICARGS = db_panic_args; | |
d9a64523 | 439 | CPUPANICDATAPTR = db_panic_data_ptr; |
5ba3f43e A |
440 | CPUPANICCALLER = db_panic_caller; |
441 | } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) { | |
cc8bc92a | 442 | kprintf("Nested panic detected:"); |
0a7de745 | 443 | if (db_panic_str != NULL) { |
cc8bc92a | 444 | _doprnt(db_panic_str, db_panic_args, PE_kputc, 0); |
0a7de745 | 445 | } |
5ba3f43e A |
446 | } |
447 | ||
448 | CPUDEBUGGERSYNC = db_proceed_on_sync_failure; | |
449 | CPUDEBUGGERRET = KERN_SUCCESS; | |
450 | ||
451 | /* Reset these on any nested panics */ | |
452 | CPUPANICOPTS = db_panic_options; | |
453 | ||
454 | return; | |
455 | } | |
2d21ac55 | 456 | |
b0d623f7 | 457 | /* |
5ba3f43e A |
458 | * Save the requested debugger state/action into the current processor's processor_data |
459 | * and trap to the debugger. | |
b0d623f7 | 460 | */ |
5ba3f43e A |
461 | kern_return_t |
462 | DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, | |
0a7de745 A |
463 | va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, |
464 | boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) | |
5ba3f43e A |
465 | { |
466 | kern_return_t ret; | |
467 | ||
468 | assert(ml_get_interrupts_enabled() == FALSE); | |
d9a64523 | 469 | DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args, |
0a7de745 A |
470 | db_panic_options, db_panic_data_ptr, |
471 | db_proceed_on_sync_failure, db_panic_caller); | |
5ba3f43e A |
472 | |
473 | TRAP_DEBUGGER; | |
474 | ||
475 | ret = CPUDEBUGGERRET; | |
b0d623f7 | 476 | |
d9a64523 | 477 | DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0); |
5ba3f43e A |
478 | |
479 | return ret; | |
480 | } | |
481 | ||
482 | void __attribute__((noinline)) | |
483 | Assert( | |
0a7de745 A |
484 | const char *file, |
485 | int line, | |
486 | const char *expression | |
487 | ) | |
b0d623f7 | 488 | { |
5ba3f43e A |
489 | if (!mach_assert) { |
490 | kprintf("%s:%d non-fatal Assertion: %s", file, line, expression); | |
491 | return; | |
492 | } | |
493 | ||
494 | panic_plain("%s:%d Assertion failed: %s", file, line, expression); | |
b0d623f7 A |
495 | } |
496 | ||
fe8ab488 | 497 | |
5ba3f43e A |
498 | void |
499 | Debugger(const char *message) | |
1c79356b | 500 | { |
5ba3f43e A |
501 | DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE); |
502 | } | |
1c79356b | 503 | |
5ba3f43e A |
504 | void |
505 | DebuggerWithContext(unsigned int reason, void *ctx, const char *message, | |
0a7de745 | 506 | uint64_t debugger_options_mask) |
5ba3f43e A |
507 | { |
508 | spl_t previous_interrupts_state; | |
509 | boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; | |
7ddcb079 | 510 | |
5ba3f43e | 511 | previous_interrupts_state = ml_set_interrupts_enabled(FALSE); |
7ddcb079 A |
512 | disable_preemption(); |
513 | ||
5ba3f43e | 514 | CPUDEBUGGERCOUNT++; |
b0d623f7 | 515 | |
5ba3f43e A |
516 | if (CPUDEBUGGERCOUNT > NESTEDDEBUGGERENTRYMAX) { |
517 | static boolean_t in_panic_kprintf = FALSE; | |
2d21ac55 | 518 | |
5ba3f43e A |
519 | /* Notify any listeners that we've started a panic */ |
520 | PEHaltRestart(kPEPanicBegin); | |
91447636 | 521 | |
5ba3f43e A |
522 | if (!in_panic_kprintf) { |
523 | in_panic_kprintf = TRUE; | |
524 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
0a7de745 | 525 | NESTEDDEBUGGERENTRYMAX); |
5ba3f43e A |
526 | in_panic_kprintf = FALSE; |
527 | } | |
316670eb | 528 | |
5ba3f43e A |
529 | if (!panicDebugging) { |
530 | kdp_machine_reboot_type(kPEPanicRestartCPU); | |
1c79356b | 531 | } |
5ba3f43e A |
532 | |
533 | panic_spin_forever(); | |
1c79356b | 534 | } |
1c79356b | 535 | |
5ba3f43e A |
536 | #if DEVELOPMENT || DEBUG |
537 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
538 | #endif | |
539 | ||
540 | doprnt_hide_pointers = FALSE; | |
541 | ||
542 | if (ctx != NULL) { | |
543 | DebuggerSaveState(DBOP_DEBUGGER, message, | |
0a7de745 | 544 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); |
5ba3f43e A |
545 | handle_debugger_trap(reason, 0, 0, ctx); |
546 | DebuggerSaveState(DBOP_NONE, NULL, NULL, | |
0a7de745 | 547 | NULL, 0, NULL, FALSE, 0); |
5ba3f43e A |
548 | } else { |
549 | DebuggerTrapWithState(DBOP_DEBUGGER, message, | |
0a7de745 | 550 | NULL, NULL, debugger_options_mask, NULL, TRUE, 0); |
5ba3f43e | 551 | } |
39037602 | 552 | |
5ba3f43e A |
553 | CPUDEBUGGERCOUNT--; |
554 | doprnt_hide_pointers = old_doprnt_hide_pointers; | |
555 | enable_preemption(); | |
556 | ml_set_interrupts_enabled(previous_interrupts_state); | |
557 | } | |
558 | ||
559 | static struct kdp_callout { | |
560 | struct kdp_callout * callout_next; | |
561 | kdp_callout_fn_t callout_fn; | |
562 | boolean_t callout_in_progress; | |
563 | void * callout_arg; | |
564 | } * kdp_callout_list = NULL; | |
565 | ||
566 | /* | |
567 | * Called from kernel context to register a kdp event callout. | |
568 | */ | |
569 | void | |
570 | kdp_register_callout(kdp_callout_fn_t fn, void * arg) | |
571 | { | |
572 | struct kdp_callout * kcp; | |
573 | struct kdp_callout * list_head; | |
574 | ||
575 | kcp = kalloc(sizeof(*kcp)); | |
0a7de745 | 576 | if (kcp == NULL) { |
5ba3f43e | 577 | panic("kdp_register_callout() kalloc failed"); |
0a7de745 | 578 | } |
5ba3f43e A |
579 | |
580 | kcp->callout_fn = fn; | |
581 | kcp->callout_arg = arg; | |
582 | kcp->callout_in_progress = FALSE; | |
583 | ||
584 | /* Lock-less list insertion using compare and exchange. */ | |
585 | do { | |
586 | list_head = kdp_callout_list; | |
587 | kcp->callout_next = list_head; | |
588 | } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list)); | |
316670eb | 589 | } |
1c79356b | 590 | |
39037602 | 591 | static void |
5ba3f43e | 592 | kdp_callouts(kdp_event_t event) |
316670eb | 593 | { |
0a7de745 | 594 | struct kdp_callout *kcp = kdp_callout_list; |
2d21ac55 | 595 | |
5ba3f43e A |
596 | while (kcp) { |
597 | if (!kcp->callout_in_progress) { | |
598 | kcp->callout_in_progress = TRUE; | |
599 | kcp->callout_fn(kcp->callout_arg, event); | |
600 | kcp->callout_in_progress = FALSE; | |
601 | } | |
602 | kcp = kcp->callout_next; | |
55e303ae | 603 | } |
1c79356b A |
604 | } |
605 | ||
0a7de745 A |
606 | #if !defined (__x86_64__) |
607 | /* | |
608 | * Register an additional buffer with data to include in the panic log | |
609 | * | |
610 | * <rdar://problem/50137705> tracks supporting more than one buffer | |
611 | * | |
612 | * Note that producer_name and buf should never be de-allocated as we reference these during panic. | |
613 | */ | |
614 | void | |
615 | register_additional_panic_data_buffer(const char *producer_name, void *buf, int len) | |
616 | { | |
617 | if (panic_data_buffers != NULL) { | |
618 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
619 | } | |
620 | ||
621 | if (producer_name == NULL || (strlen(producer_name) == 0)) { | |
622 | panic("register_additional_panic_data_buffer called with invalid producer_name"); | |
623 | } | |
624 | ||
625 | if (buf == NULL) { | |
626 | panic("register_additional_panic_data_buffer called with invalid buffer pointer"); | |
627 | } | |
628 | ||
629 | if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) { | |
630 | panic("register_additional_panic_data_buffer called with invalid length"); | |
631 | } | |
632 | ||
633 | struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer)); | |
634 | new_panic_data_buffer->producer_name = producer_name; | |
635 | new_panic_data_buffer->buf = buf; | |
636 | new_panic_data_buffer->len = len; | |
637 | ||
638 | if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) { | |
639 | panic("register_additional_panic_data_buffer called with buffer already registered"); | |
640 | } | |
641 | ||
642 | return; | |
643 | } | |
644 | #endif /* !defined (__x86_64__) */ | |
645 | ||
5ba3f43e A |
646 | /* |
647 | * An overview of the xnu panic path: | |
648 | * | |
649 | * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger(). | |
650 | * panic_trap_to_debugger() sets the panic state in the current processor's processor_data_t prior | |
651 | * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap() | |
652 | * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu. | |
653 | * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and | |
654 | * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed | |
655 | * according to the device's boot-args. | |
656 | */ | |
657 | #undef panic | |
316670eb A |
658 | void |
659 | panic(const char *str, ...) | |
660 | { | |
5ba3f43e A |
661 | va_list panic_str_args; |
662 | ||
663 | va_start(panic_str_args, str); | |
d9a64523 | 664 | panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0)); |
5ba3f43e A |
665 | va_end(panic_str_args); |
666 | } | |
667 | ||
668 | void | |
669 | panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...) | |
670 | { | |
671 | va_list panic_str_args; | |
672 | ||
673 | va_start(panic_str_args, str); | |
d9a64523 | 674 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK), |
0a7de745 | 675 | NULL, (unsigned long)(char *)__builtin_return_address(0)); |
5ba3f43e A |
676 | va_end(panic_str_args); |
677 | } | |
678 | ||
d9a64523 A |
679 | #if defined (__x86_64__) |
680 | /* | |
681 | * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog. | |
682 | * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot | |
683 | * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the | |
684 | * thread when writing the panic log. | |
685 | * | |
686 | * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread. | |
687 | */ | |
5ba3f43e | 688 | void |
d9a64523 | 689 | panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...) |
5ba3f43e A |
690 | { |
691 | va_list panic_str_args; | |
d9a64523 A |
692 | |
693 | assert_thread_magic(thread); | |
d9a64523 A |
694 | |
695 | /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */ | |
696 | thread_reference(thread); | |
5ba3f43e A |
697 | |
698 | va_start(panic_str_args, str); | |
d9a64523 | 699 | panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE), |
0a7de745 | 700 | thread, (unsigned long)(char *)__builtin_return_address(0)); |
d9a64523 | 701 | |
5ba3f43e A |
702 | va_end(panic_str_args); |
703 | } | |
d9a64523 | 704 | #endif /* defined (__x86_64__) */ |
5ba3f43e A |
705 | |
706 | #pragma clang diagnostic push | |
707 | #pragma clang diagnostic ignored "-Wmissing-noreturn" | |
708 | void | |
d9a64523 | 709 | panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, |
0a7de745 | 710 | uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller) |
5ba3f43e A |
711 | { |
712 | #pragma clang diagnostic pop | |
713 | ||
0a7de745 A |
714 | #if defined(__x86_64__) && (DEVELOPMENT || DEBUG) |
715 | /* Turn off I/O tracing once we've panicked */ | |
716 | mmiotrace_enabled = 0; | |
717 | #endif | |
718 | ||
5ba3f43e A |
719 | if (ml_wants_panic_trap_to_debugger()) { |
720 | ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); | |
721 | ||
722 | /* | |
723 | * This should not return, but we return here for the tail call | |
724 | * as it simplifies the backtrace. | |
725 | */ | |
726 | return; | |
727 | } | |
728 | ||
729 | CPUDEBUGGERCOUNT++; | |
730 | ||
731 | if (CPUDEBUGGERCOUNT > NESTEDDEBUGGERENTRYMAX) { | |
732 | static boolean_t in_panic_kprintf = FALSE; | |
733 | ||
734 | /* Notify any listeners that we've started a panic */ | |
735 | PEHaltRestart(kPEPanicBegin); | |
736 | ||
737 | if (!in_panic_kprintf) { | |
738 | in_panic_kprintf = TRUE; | |
739 | kprintf("Detected nested debugger entry count exceeding %d\n", | |
0a7de745 | 740 | NESTEDDEBUGGERENTRYMAX); |
5ba3f43e A |
741 | in_panic_kprintf = FALSE; |
742 | } | |
743 | ||
744 | if (!panicDebugging) { | |
745 | kdp_machine_reboot_type(kPEPanicRestartCPU); | |
746 | } | |
747 | ||
748 | panic_spin_forever(); | |
749 | } | |
750 | ||
751 | #if DEVELOPMENT || DEBUG | |
752 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); | |
753 | #endif | |
754 | ||
755 | #if CONFIG_EMBEDDED | |
0a7de745 | 756 | if (PE_arm_debug_panic_hook) { |
5ba3f43e | 757 | PE_arm_debug_panic_hook(panic_format_str); |
0a7de745 | 758 | } |
5ba3f43e | 759 | #endif |
316670eb | 760 | |
39037602 A |
761 | #if defined (__x86_64__) |
762 | plctrace_disable(); | |
763 | #endif | |
39037602 | 764 | |
5ba3f43e A |
765 | if (write_trace_on_panic && kdebug_enable) { |
766 | if (get_preemption_level() == 0 && !ml_at_interrupt_context()) { | |
767 | ml_set_interrupts_enabled(TRUE); | |
a39ff7e2 | 768 | KDBG_RELEASE(TRACE_PANIC); |
5ba3f43e A |
769 | kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME); |
770 | } | |
771 | } | |
772 | ||
773 | ml_set_interrupts_enabled(FALSE); | |
774 | disable_preemption(); | |
39037602 | 775 | |
5ba3f43e A |
776 | #if defined (__x86_64__) |
777 | pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE); | |
778 | #endif /* defined (__x86_64__) */ | |
3e170ce0 A |
779 | |
780 | /* Never hide pointers from panic logs. */ | |
781 | doprnt_hide_pointers = FALSE; | |
782 | ||
5ba3f43e A |
783 | if (ctx != NULL) { |
784 | /* | |
785 | * We called into panic from a trap, no need to trap again. Set the | |
786 | * state on the current CPU and then jump to handle_debugger_trap. | |
787 | */ | |
788 | DebuggerSaveState(DBOP_PANIC, "panic", | |
0a7de745 A |
789 | panic_format_str, panic_args, |
790 | panic_options_mask, panic_data_ptr, TRUE, panic_caller); | |
5ba3f43e | 791 | handle_debugger_trap(reason, 0, 0, ctx); |
316670eb | 792 | } |
316670eb | 793 | |
5ba3f43e | 794 | #if defined(__arm64__) |
316670eb | 795 | /* |
5ba3f43e | 796 | * Signal to fastsim that it should open debug ports (nop on hardware) |
316670eb | 797 | */ |
0a7de745 | 798 | __asm__ volatile ("HINT 0x45"); |
5ba3f43e | 799 | #endif /* defined(__arm64__) */ |
3e170ce0 | 800 | |
5ba3f43e | 801 | DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str, |
0a7de745 | 802 | panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller); |
3e170ce0 | 803 | |
5ba3f43e A |
804 | /* |
805 | * Not reached. | |
806 | */ | |
807 | panic_stop(); | |
316670eb A |
808 | } |
809 | ||
39037602 | 810 | __attribute__((noreturn)) |
39037602 | 811 | void |
5ba3f43e | 812 | panic_spin_forever() |
39037602 | 813 | { |
5ba3f43e A |
814 | paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n"); |
815 | ||
0a7de745 A |
816 | for (;;) { |
817 | } | |
5ba3f43e A |
818 | } |
819 | ||
820 | static void | |
821 | kdp_machine_reboot_type(unsigned int type) | |
822 | { | |
823 | printf("Attempting system restart..."); | |
824 | PEHaltRestart(type); | |
825 | halt_all_cpus(TRUE); | |
826 | } | |
827 | ||
828 | void | |
829 | kdp_machine_reboot(void) | |
830 | { | |
831 | kdp_machine_reboot_type(kPEPanicRestartCPU); | |
832 | } | |
833 | ||
834 | /* | |
835 | * Gather and save diagnostic information about a panic (or Debugger call). | |
836 | * | |
837 | * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can | |
838 | * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no | |
839 | * paniclog is written and no core is written unless we request a core on NMI. | |
840 | * | |
841 | * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured), | |
842 | * and calling out to any other functions we have for collecting diagnostic info. | |
843 | */ | |
844 | static void | |
845 | debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state) | |
846 | { | |
847 | #if DEVELOPMENT || DEBUG | |
848 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG)); | |
849 | #endif | |
850 | ||
851 | #if defined(__x86_64__) | |
cc8bc92a | 852 | kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); |
5ba3f43e A |
853 | #endif |
854 | /* | |
855 | * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate | |
856 | * a coredump/paniclog for this type of debugger entry. If KDP isn't configured, | |
857 | * we'll just spin in kdp_raise_exception. | |
858 | */ | |
859 | if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) { | |
860 | kdp_raise_exception(exception, code, subcode, state); | |
861 | if (debugger_safe_to_return && !debugger_is_panic) { | |
862 | return; | |
863 | } | |
864 | } | |
865 | ||
866 | if ((debugger_current_op == DBOP_PANIC) || | |
0a7de745 | 867 | ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { |
5ba3f43e A |
868 | /* |
869 | * Attempt to notify listeners once and only once that we've started | |
870 | * panicking. Only do this for Debugger() calls if we're treating | |
871 | * Debugger() calls like panic(). | |
872 | */ | |
873 | PEHaltRestart(kPEPanicBegin); | |
874 | ||
875 | /* | |
876 | * Set the begin pointer in the panic log structure. We key off of this | |
877 | * static variable rather than contents from the panic header itself in case someone | |
878 | * has stomped over the panic_info structure. Also initializes the header magic. | |
879 | */ | |
880 | static boolean_t began_writing_paniclog = FALSE; | |
881 | if (!began_writing_paniclog) { | |
882 | PE_init_panicheader(); | |
883 | began_writing_paniclog = TRUE; | |
884 | } else { | |
885 | /* | |
886 | * If we reached here, update the panic header to keep it as consistent | |
887 | * as possible during a nested panic | |
888 | */ | |
889 | PE_update_panicheader_nestedpanic(); | |
890 | } | |
891 | } | |
892 | ||
893 | /* | |
894 | * Write panic string if this was a panic. | |
895 | * | |
896 | * TODO: Consider moving to SavePanicInfo as this is part of the panic log. | |
897 | */ | |
898 | if (debugger_current_op == DBOP_PANIC) { | |
899 | paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller); | |
900 | if (debugger_panic_str) { | |
901 | _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0); | |
902 | } | |
903 | paniclog_append_noflush("\n"); | |
904 | } | |
cc8bc92a A |
905 | #if defined(__x86_64__) |
906 | else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { | |
907 | paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : ""); | |
908 | } | |
5ba3f43e A |
909 | |
910 | /* | |
911 | * Debugger() is treated like panic() on embedded -- for example we use it for WDT | |
912 | * panics (so we need to write a paniclog). On desktop Debugger() is used in the | |
913 | * conventional sense. | |
914 | */ | |
cc8bc92a | 915 | if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) |
5ba3f43e A |
916 | #endif |
917 | { | |
918 | kdp_callouts(KDP_EVENT_PANICLOG); | |
39037602 | 919 | |
5ba3f43e A |
920 | /* |
921 | * Write paniclog and panic stackshot (if supported) | |
922 | * TODO: Need to clear panic log when return from debugger | |
923 | * hooked up for embedded | |
924 | */ | |
d9a64523 | 925 | SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options); |
39037602 | 926 | |
5ba3f43e A |
927 | #if DEVELOPMENT || DEBUG |
928 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG)); | |
929 | #endif | |
39037602 | 930 | |
5ba3f43e | 931 | /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */ |
0a7de745 | 932 | if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { |
5ba3f43e | 933 | PEHaltRestart(kPEPanicRestartCPU); |
0a7de745 | 934 | } |
39037602 | 935 | } |
5ba3f43e A |
936 | |
937 | #if CONFIG_KDP_INTERACTIVE_DEBUGGING | |
39037602 | 938 | /* |
5ba3f43e A |
939 | * If reboot on panic is enabled and the caller of panic indicated that we should skip |
940 | * local coredumps, don't try to write these and instead go straight to reboot. This | |
941 | * allows us to persist any data that's stored in the panic log. | |
39037602 | 942 | */ |
5ba3f43e | 943 | if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) && |
0a7de745 | 944 | (debug_boot_arg & DB_REBOOT_POST_CORE)) { |
5ba3f43e A |
945 | kdp_machine_reboot_type(kPEPanicRestartCPU); |
946 | } | |
947 | ||
948 | /* | |
949 | * Consider generating a local corefile if the infrastructure is configured | |
950 | * and we haven't disabled on-device coredumps. | |
951 | */ | |
d9a64523 A |
952 | if (!(debug_boot_arg & DB_DISABLE_LOCAL_CORE)) { |
953 | if (!kdp_has_polled_corefile()) { | |
954 | if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) { | |
955 | paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)", | |
0a7de745 | 956 | kdp_polled_corefile_error()); |
d9a64523 A |
957 | #if CONFIG_EMBEDDED |
958 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
959 | paniclog_flush(); | |
960 | #else /* CONFIG_EMBEDDED */ | |
961 | if (panic_info->mph_panic_log_offset != 0) { | |
962 | panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED; | |
963 | paniclog_flush(); | |
964 | } | |
965 | #endif /* CONFIG_EMBEDDED */ | |
966 | } | |
967 | } else { | |
968 | int ret = -1; | |
5ba3f43e A |
969 | |
970 | #if defined (__x86_64__) | |
d9a64523 A |
971 | /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */ |
972 | if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI)) | |
5ba3f43e | 973 | #endif |
d9a64523 A |
974 | { |
975 | /* | |
976 | * Doing an on-device coredump leaves the disk driver in a state | |
977 | * that can not be resumed. | |
978 | */ | |
979 | debugger_safe_to_return = FALSE; | |
980 | begin_panic_transfer(); | |
981 | ret = kern_dump(KERN_DUMP_DISK); | |
982 | abort_panic_transfer(); | |
5ba3f43e A |
983 | |
984 | #if DEVELOPMENT || DEBUG | |
d9a64523 | 985 | DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE)); |
5ba3f43e | 986 | #endif |
d9a64523 | 987 | } |
5ba3f43e | 988 | |
d9a64523 A |
989 | /* |
990 | * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved | |
991 | * or if option to ignore failures is set. | |
992 | */ | |
993 | if ((debug_boot_arg & DB_REBOOT_POST_CORE) && | |
0a7de745 | 994 | ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) { |
d9a64523 A |
995 | kdp_machine_reboot_type(kPEPanicRestartCPU); |
996 | } | |
cc8bc92a | 997 | } |
5ba3f43e A |
998 | } |
999 | ||
5c9f4661 A |
1000 | if (debug_boot_arg & DB_REBOOT_ALWAYS) { |
1001 | kdp_machine_reboot_type(kPEPanicRestartCPU); | |
1002 | } | |
1003 | ||
5ba3f43e A |
1004 | /* If KDP is configured, try to trap to the debugger */ |
1005 | if (current_debugger != NO_CUR_DB) { | |
1006 | kdp_raise_exception(exception, code, subcode, state); | |
1007 | /* | |
1008 | * Only return if we entered via Debugger and it's safe to return | |
1009 | * (we halted the other cores successfully, this isn't a nested panic, etc) | |
0a7de745 | 1010 | */ |
5ba3f43e | 1011 | if (debugger_current_op == DBOP_DEBUGGER && |
0a7de745 A |
1012 | debugger_safe_to_return && |
1013 | kernel_debugger_entry_count == 1 && | |
1014 | !debugger_is_panic) { | |
5ba3f43e A |
1015 | return; |
1016 | } | |
1017 | } | |
1018 | ||
1019 | #if CONFIG_EMBEDDED | |
1020 | if (panicDebugging) { | |
1021 | /* If panic debugging is configured, spin for astris to connect */ | |
1022 | panic_spin_shmcon(); | |
1023 | } | |
1024 | #endif /* CONFIG_EMBEDDED */ | |
1025 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ | |
1026 | ||
1027 | if (!panicDebugging) { | |
1028 | kdp_machine_reboot_type(kPEPanicRestartCPU); | |
1029 | } | |
1030 | ||
1031 | panic_spin_forever(); | |
39037602 A |
1032 | } |
1033 | ||
5ba3f43e A |
1034 | #if INTERRUPT_MASKED_DEBUG |
1035 | uint64_t debugger_trap_timestamps[9]; | |
1036 | # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time(); | |
1037 | #else | |
1038 | # define DEBUGGER_TRAP_TIMESTAMP(i) | |
39037602 | 1039 | #endif |
5ba3f43e | 1040 | |
316670eb | 1041 | void |
5ba3f43e | 1042 | handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state) |
316670eb | 1043 | { |
5ba3f43e A |
1044 | unsigned int initial_not_in_kdp = not_in_kdp; |
1045 | kern_return_t ret; | |
1046 | debugger_op db_prev_op = debugger_current_op; | |
1047 | ||
1048 | DEBUGGER_TRAP_TIMESTAMP(0); | |
1049 | ||
1050 | DebuggerLock(); | |
1051 | ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC); | |
316670eb | 1052 | |
5ba3f43e | 1053 | DEBUGGER_TRAP_TIMESTAMP(1); |
39236c6e | 1054 | |
5ba3f43e A |
1055 | #if INTERRUPT_MASKED_DEBUG |
1056 | if (serialmode & SERIALMODE_OUTPUT) { | |
1057 | ml_spin_debug_reset(current_thread()); | |
1058 | } | |
1059 | #endif | |
1060 | if (ret != KERN_SUCCESS) { | |
1061 | CPUDEBUGGERRET = ret; | |
1062 | DebuggerUnlock(); | |
1063 | return; | |
1064 | } | |
1065 | ||
1066 | /* Update the global panic/debugger nested entry level */ | |
1067 | kernel_debugger_entry_count = CPUDEBUGGERCOUNT; | |
1068 | ||
1069 | /* | |
1070 | * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice | |
1071 | * should we call into the debugger if it's configured and then reboot if the panic log has been written? | |
1072 | */ | |
1073 | ||
1074 | if (CPUDEBUGGEROP == DBOP_NONE) { | |
1075 | /* If there was no debugger context setup, we trapped due to a software breakpoint */ | |
1076 | debugger_current_op = DBOP_BREAKPOINT; | |
1077 | } else { | |
1078 | /* Not safe to return from a nested panic/debugger call */ | |
1079 | if (debugger_current_op == DBOP_PANIC || | |
0a7de745 | 1080 | debugger_current_op == DBOP_DEBUGGER) { |
5ba3f43e A |
1081 | debugger_safe_to_return = FALSE; |
1082 | } | |
1083 | ||
1084 | debugger_current_op = CPUDEBUGGEROP; | |
1085 | ||
1086 | /* Only overwrite the panic message if there is none already - save the data from the first call */ | |
1087 | if (debugger_panic_str == NULL) { | |
1088 | debugger_panic_str = CPUPANICSTR; | |
1089 | debugger_panic_args = CPUPANICARGS; | |
d9a64523 | 1090 | debugger_panic_data = CPUPANICDATAPTR; |
5ba3f43e A |
1091 | debugger_message = CPUDEBUGGERMSG; |
1092 | debugger_panic_caller = CPUPANICCALLER; | |
1093 | } | |
1094 | ||
1095 | debugger_panic_options = CPUPANICOPTS; | |
316670eb | 1096 | } |
316670eb A |
1097 | |
1098 | /* | |
5ba3f43e A |
1099 | * Clear the op from the processor debugger context so we can handle |
1100 | * breakpoints in the debugger | |
316670eb | 1101 | */ |
5ba3f43e A |
1102 | CPUDEBUGGEROP = DBOP_NONE; |
1103 | ||
1104 | DEBUGGER_TRAP_TIMESTAMP(2); | |
1105 | ||
1106 | kdp_callouts(KDP_EVENT_ENTER); | |
1107 | not_in_kdp = 0; | |
1108 | ||
1109 | DEBUGGER_TRAP_TIMESTAMP(3); | |
1110 | ||
1111 | if (debugger_current_op == DBOP_BREAKPOINT) { | |
1112 | kdp_raise_exception(exception, code, subcode, state); | |
1113 | } else if (debugger_current_op == DBOP_STACKSHOT) { | |
1114 | CPUDEBUGGERRET = do_stackshot(); | |
1115 | #if PGO | |
0a7de745 | 1116 | } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) { |
5ba3f43e A |
1117 | CPUDEBUGGERRET = do_pgo_reset_counters(); |
1118 | #endif | |
1119 | } else { | |
1120 | debugger_collect_diagnostics(exception, code, subcode, state); | |
1121 | } | |
1122 | ||
1123 | DEBUGGER_TRAP_TIMESTAMP(4); | |
1124 | ||
1125 | not_in_kdp = initial_not_in_kdp; | |
1126 | kdp_callouts(KDP_EVENT_EXIT); | |
1127 | ||
1128 | DEBUGGER_TRAP_TIMESTAMP(5); | |
1129 | ||
1130 | if (debugger_current_op != DBOP_BREAKPOINT) { | |
1131 | debugger_panic_str = NULL; | |
1132 | debugger_panic_args = NULL; | |
d9a64523 | 1133 | debugger_panic_data = NULL; |
5ba3f43e A |
1134 | debugger_panic_options = 0; |
1135 | debugger_message = NULL; | |
1136 | } | |
1137 | ||
1138 | /* Restore the previous debugger state */ | |
1139 | debugger_current_op = db_prev_op; | |
1140 | ||
1141 | DEBUGGER_TRAP_TIMESTAMP(6); | |
1142 | ||
1143 | DebuggerResumeOtherCores(); | |
1144 | ||
1145 | DEBUGGER_TRAP_TIMESTAMP(7); | |
1146 | ||
1147 | DebuggerUnlock(); | |
1148 | ||
1149 | DEBUGGER_TRAP_TIMESTAMP(8); | |
1150 | ||
1151 | return; | |
316670eb A |
1152 | } |
1153 | ||
0a7de745 A |
1154 | __attribute__((noinline, not_tail_called)) |
1155 | void | |
1156 | log(__unused int level, char *fmt, ...) | |
1c79356b | 1157 | { |
39037602 | 1158 | void *caller = __builtin_return_address(0); |
0a7de745 A |
1159 | va_list listp; |
1160 | va_list listp2; | |
39037602 | 1161 | |
1c79356b A |
1162 | |
1163 | #ifdef lint | |
1164 | level++; | |
1165 | #endif /* lint */ | |
0a7de745 | 1166 | #ifdef MACH_BSD |
1c79356b | 1167 | va_start(listp, fmt); |
39037602 A |
1168 | va_copy(listp2, listp); |
1169 | ||
1170 | disable_preemption(); | |
1171 | _doprnt(fmt, &listp, cons_putc_locked, 0); | |
1c79356b | 1172 | enable_preemption(); |
39037602 A |
1173 | |
1174 | va_end(listp); | |
1175 | ||
1176 | os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); | |
1177 | va_end(listp2); | |
1c79356b A |
1178 | #endif |
1179 | } | |
9bccf70c | 1180 | |
39037602 | 1181 | /* |
5ba3f43e A |
1182 | * Per <rdar://problem/24974766>, skip appending log messages to |
1183 | * the new logging infrastructure in contexts where safety is | |
1184 | * uncertain. These contexts include: | |
39037602 A |
1185 | * - When we're in the debugger |
1186 | * - We're in a panic | |
1187 | * - Interrupts are disabled | |
1188 | * - Or Pre-emption is disabled | |
1189 | * In all the above cases, it is potentially unsafe to log messages. | |
1190 | */ | |
1191 | ||
5ba3f43e | 1192 | boolean_t |
0a7de745 A |
1193 | oslog_is_safe(void) |
1194 | { | |
1195 | return kernel_debugger_entry_count == 0 && | |
1196 | not_in_kdp == 1 && | |
1197 | get_preemption_level() == 0 && | |
1198 | ml_get_interrupts_enabled() == TRUE; | |
39037602 A |
1199 | } |
1200 | ||
5ba3f43e A |
1201 | boolean_t |
1202 | debug_mode_active(void) | |
1203 | { | |
0a7de745 | 1204 | return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp); |
5ba3f43e A |
1205 | } |
1206 | ||
9bccf70c A |
1207 | void |
1208 | debug_putc(char c) | |
1209 | { | |
2d21ac55 | 1210 | if ((debug_buf_size != 0) && |
0a7de745 A |
1211 | ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) { |
1212 | *debug_buf_ptr = c; | |
9bccf70c A |
1213 | debug_buf_ptr++; |
1214 | } | |
1215 | } | |
2d21ac55 | 1216 | |
5ba3f43e A |
1217 | #if defined (__x86_64__) |
1218 | struct pasc { | |
0a7de745 A |
1219 | unsigned a: 7; |
1220 | unsigned b: 7; | |
1221 | unsigned c: 7; | |
1222 | unsigned d: 7; | |
1223 | unsigned e: 7; | |
1224 | unsigned f: 7; | |
1225 | unsigned g: 7; | |
1226 | unsigned h: 7; | |
5ba3f43e | 1227 | } __attribute__((packed)); |
2d21ac55 | 1228 | |
5ba3f43e A |
1229 | typedef struct pasc pasc_t; |
1230 | ||
1231 | /* | |
1232 | * In-place packing routines -- inefficient, but they're called at most once. | |
1233 | * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86. | |
1234 | */ | |
1235 | int | |
1236 | packA(char *inbuf, uint32_t length, uint32_t buflen) | |
2d21ac55 | 1237 | { |
0a7de745 A |
1238 | unsigned int i, j = 0; |
1239 | pasc_t pack; | |
1240 | ||
1241 | length = MIN(((length + 7) & ~7), buflen); | |
1242 | ||
1243 | for (i = 0; i < length; i += 8) { | |
1244 | pack.a = inbuf[i]; | |
1245 | pack.b = inbuf[i + 1]; | |
1246 | pack.c = inbuf[i + 2]; | |
1247 | pack.d = inbuf[i + 3]; | |
1248 | pack.e = inbuf[i + 4]; | |
1249 | pack.f = inbuf[i + 5]; | |
1250 | pack.g = inbuf[i + 6]; | |
1251 | pack.h = inbuf[i + 7]; | |
1252 | bcopy((char *) &pack, inbuf + j, 7); | |
1253 | j += 7; | |
1254 | } | |
1255 | return j; | |
2d21ac55 A |
1256 | } |
1257 | ||
5ba3f43e A |
1258 | void |
1259 | unpackA(char *inbuf, uint32_t length) | |
2d21ac55 A |
1260 | { |
1261 | pasc_t packs; | |
1262 | unsigned i = 0; | |
0a7de745 | 1263 | length = (length * 8) / 7; |
2d21ac55 A |
1264 | |
1265 | while (i < length) { | |
0a7de745 A |
1266 | packs = *(pasc_t *)&inbuf[i]; |
1267 | bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8))); | |
1268 | inbuf[i++] = packs.a; | |
1269 | inbuf[i++] = packs.b; | |
1270 | inbuf[i++] = packs.c; | |
1271 | inbuf[i++] = packs.d; | |
1272 | inbuf[i++] = packs.e; | |
1273 | inbuf[i++] = packs.f; | |
1274 | inbuf[i++] = packs.g; | |
1275 | inbuf[i++] = packs.h; | |
2d21ac55 A |
1276 | } |
1277 | } | |
5ba3f43e | 1278 | #endif /* defined (__x86_64__) */ |
2d21ac55 A |
1279 | |
1280 | extern void *proc_name_address(void *p); | |
1281 | ||
1282 | static void | |
0a7de745 A |
1283 | panic_display_process_name(void) |
1284 | { | |
3e170ce0 A |
1285 | /* because of scoping issues len(p_comm) from proc_t is hard coded here */ |
1286 | char proc_name[17] = "Unknown"; | |
2d21ac55 A |
1287 | task_t ctask = 0; |
1288 | void *cbsd_info = 0; | |
1289 | ||
0a7de745 A |
1290 | if (ml_nofault_copy((vm_offset_t)¤t_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t)) { |
1291 | if (ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info)) { | |
1292 | if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0)) { | |
2d21ac55 | 1293 | proc_name[sizeof(proc_name) - 1] = '\0'; |
0a7de745 A |
1294 | } |
1295 | } | |
1296 | } | |
5ba3f43e | 1297 | paniclog_append_noflush("\nBSD process name corresponding to current thread: %s\n", proc_name); |
2d21ac55 A |
1298 | } |
1299 | ||
5ba3f43e | 1300 | unsigned |
0a7de745 A |
1301 | panic_active(void) |
1302 | { | |
1303 | return debugger_panic_str != (char *) 0; | |
2d21ac55 A |
1304 | } |
1305 | ||
5ba3f43e | 1306 | void |
0a7de745 A |
1307 | populate_model_name(char *model_string) |
1308 | { | |
2d21ac55 A |
1309 | strlcpy(model_name, model_string, sizeof(model_name)); |
1310 | } | |
1311 | ||
5ba3f43e | 1312 | void |
0a7de745 A |
1313 | panic_display_model_name(void) |
1314 | { | |
2d21ac55 A |
1315 | char tmp_model_name[sizeof(model_name)]; |
1316 | ||
0a7de745 | 1317 | if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) { |
2d21ac55 | 1318 | return; |
0a7de745 | 1319 | } |
2d21ac55 | 1320 | |
6d2010ae A |
1321 | tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; |
1322 | ||
0a7de745 | 1323 | if (tmp_model_name[0] != 0) { |
5ba3f43e | 1324 | paniclog_append_noflush("System model name: %s\n", tmp_model_name); |
0a7de745 | 1325 | } |
6d2010ae A |
1326 | } |
1327 | ||
5ba3f43e | 1328 | void |
0a7de745 A |
1329 | panic_display_kernel_uuid(void) |
1330 | { | |
39236c6e | 1331 | char tmp_kernel_uuid[sizeof(kernel_uuid_string)]; |
6d2010ae | 1332 | |
0a7de745 | 1333 | if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) { |
6d2010ae | 1334 | return; |
0a7de745 | 1335 | } |
2d21ac55 | 1336 | |
0a7de745 | 1337 | if (tmp_kernel_uuid[0] != '\0') { |
5ba3f43e | 1338 | paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid); |
0a7de745 | 1339 | } |
2d21ac55 A |
1340 | } |
1341 | ||
5ba3f43e | 1342 | void |
0a7de745 A |
1343 | panic_display_kernel_aslr(void) |
1344 | { | |
316670eb | 1345 | if (vm_kernel_slide) { |
5ba3f43e A |
1346 | paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); |
1347 | paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); | |
316670eb | 1348 | } |
316670eb A |
1349 | } |
1350 | ||
5ba3f43e | 1351 | void |
0a7de745 A |
1352 | panic_display_hibb(void) |
1353 | { | |
fe8ab488 | 1354 | #if defined(__i386__) || defined (__x86_64__) |
5ba3f43e | 1355 | paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); |
fe8ab488 A |
1356 | #endif |
1357 | } | |
1358 | ||
5ba3f43e | 1359 | static void |
0a7de745 A |
1360 | panic_display_uptime(void) |
1361 | { | |
1362 | uint64_t uptime; | |
c910b4d9 A |
1363 | absolutetime_to_nanoseconds(mach_absolute_time(), &uptime); |
1364 | ||
5ba3f43e | 1365 | paniclog_append_noflush("\nSystem uptime in nanoseconds: %llu\n", uptime); |
c910b4d9 A |
1366 | } |
1367 | ||
5ba3f43e | 1368 | static void |
0a7de745 A |
1369 | panic_display_disk_errors(void) |
1370 | { | |
d190cdc3 A |
1371 | if (panic_disk_error_description[0]) { |
1372 | panic_disk_error_description[sizeof(panic_disk_error_description) - 1] = '\0'; | |
5ba3f43e | 1373 | paniclog_append_noflush("Root disk errors: \"%s\"\n", panic_disk_error_description); |
d190cdc3 A |
1374 | } |
1375 | }; | |
1376 | ||
2d21ac55 A |
1377 | extern const char version[]; |
1378 | extern char osversion[]; | |
1379 | ||
6d2010ae A |
1380 | static volatile uint32_t config_displayed = 0; |
1381 | ||
5ba3f43e | 1382 | __private_extern__ void |
0a7de745 A |
1383 | panic_display_system_configuration(boolean_t launchd_exit) |
1384 | { | |
1385 | if (!launchd_exit) { | |
1386 | panic_display_process_name(); | |
1387 | } | |
6d2010ae A |
1388 | if (OSCompareAndSwap(0, 1, &config_displayed)) { |
1389 | char buf[256]; | |
0a7de745 | 1390 | if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) { |
5ba3f43e | 1391 | paniclog_append_noflush("Boot args: %s\n", buf); |
0a7de745 | 1392 | } |
5ba3f43e | 1393 | paniclog_append_noflush("\nMac OS version:\n%s\n", |
2d21ac55 | 1394 | (osversion[0] != 0) ? osversion : "Not yet set"); |
0a7de745 | 1395 | paniclog_append_noflush("\nKernel version:\n%s\n", version); |
6d2010ae | 1396 | panic_display_kernel_uuid(); |
d190cdc3 A |
1397 | if (!launchd_exit) { |
1398 | panic_display_kernel_aslr(); | |
1399 | panic_display_hibb(); | |
1400 | panic_display_pal_info(); | |
1401 | } | |
2d21ac55 | 1402 | panic_display_model_name(); |
d190cdc3 A |
1403 | panic_display_disk_errors(); |
1404 | if (!launchd_exit) { | |
1405 | panic_display_uptime(); | |
1406 | panic_display_zprint(); | |
6d2010ae | 1407 | #if CONFIG_ZLEAKS |
d190cdc3 | 1408 | panic_display_ztrace(); |
6d2010ae | 1409 | #endif /* CONFIG_ZLEAKS */ |
5ba3f43e | 1410 | kext_dump_panic_lists(&paniclog_append_noflush); |
d190cdc3 | 1411 | } |
2d21ac55 A |
1412 | } |
1413 | } | |
1414 | ||
0a7de745 | 1415 | extern unsigned int stack_total; |
6d2010ae | 1416 | extern unsigned long long stack_allocs; |
c910b4d9 | 1417 | |
5ba3f43e | 1418 | #if defined (__x86_64__) |
0a7de745 | 1419 | extern unsigned int inuse_ptepages_count; |
6d2010ae | 1420 | extern long long alloc_ptepages_count; |
c910b4d9 A |
1421 | #endif |
1422 | ||
5ba3f43e A |
1423 | extern boolean_t panic_include_zprint; |
1424 | extern mach_memory_info_t *panic_kext_memory_info; | |
1425 | extern vm_size_t panic_kext_memory_size; | |
c910b4d9 | 1426 | |
5ba3f43e A |
1427 | __private_extern__ void |
1428 | panic_display_zprint() | |
c910b4d9 | 1429 | { |
0a7de745 A |
1430 | if (panic_include_zprint == TRUE) { |
1431 | unsigned int i; | |
1432 | struct zone zone_copy; | |
c910b4d9 | 1433 | |
5ba3f43e | 1434 | paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); |
39037602 | 1435 | for (i = 0; i < num_zones; i++) { |
0a7de745 A |
1436 | if (ml_nofault_copy((vm_offset_t)(&zone_array[i]), (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { |
1437 | if (zone_copy.cur_size > (1024 * 1024)) { | |
1438 | paniclog_append_noflush("%-20s %10lu %10lu\n", zone_copy.zone_name, (uintptr_t)zone_copy.cur_size, (uintptr_t)(zone_copy.countfree * zone_copy.elem_size)); | |
c910b4d9 | 1439 | } |
0a7de745 | 1440 | } |
c910b4d9 A |
1441 | } |
1442 | ||
5ba3f43e | 1443 | paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size * stack_total)); |
b0d623f7 | 1444 | |
5ba3f43e | 1445 | #if defined (__x86_64__) |
0a7de745 | 1446 | paniclog_append_noflush("%-20s %10lu\n", "PageTables", (uintptr_t)(PAGE_SIZE * inuse_ptepages_count)); |
c910b4d9 | 1447 | #endif |
b0d623f7 | 1448 | |
5ba3f43e | 1449 | paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", (uintptr_t)kalloc_large_total); |
3e170ce0 | 1450 | if (panic_kext_memory_info) { |
5ba3f43e A |
1451 | mach_memory_info_t *mem_info = panic_kext_memory_info; |
1452 | paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size"); | |
1453 | for (i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) { | |
3e170ce0 | 1454 | if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && (mem_info[i].size > (1024 * 1024))) { |
5ba3f43e | 1455 | paniclog_append_noflush("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size); |
3e170ce0 A |
1456 | } |
1457 | } | |
1458 | } | |
c910b4d9 A |
1459 | } |
1460 | } | |
1461 | ||
fe8ab488 | 1462 | #if CONFIG_ECC_LOGGING |
5ba3f43e | 1463 | __private_extern__ void |
0a7de745 | 1464 | panic_display_ecc_errors() |
fe8ab488 A |
1465 | { |
1466 | uint32_t count = ecc_log_get_correction_count(); | |
1467 | ||
1468 | if (count > 0) { | |
5ba3f43e | 1469 | paniclog_append_noflush("ECC Corrections:%u\n", count); |
fe8ab488 A |
1470 | } |
1471 | } | |
1472 | #endif /* CONFIG_ECC_LOGGING */ | |
1473 | ||
6d2010ae | 1474 | #if CONFIG_ZLEAKS |
0a7de745 | 1475 | extern boolean_t panic_include_ztrace; |
6d2010ae | 1476 | extern struct ztrace* top_ztrace; |
04b8595b A |
1477 | void panic_print_symbol_name(vm_address_t search); |
1478 | ||
6d2010ae A |
1479 | /* |
1480 | * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. | |
1481 | * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c | |
1482 | */ | |
5ba3f43e A |
1483 | __private_extern__ void |
1484 | panic_display_ztrace(void) | |
6d2010ae | 1485 | { |
0a7de745 | 1486 | if (panic_include_ztrace == TRUE) { |
6d2010ae | 1487 | unsigned int i = 0; |
0a7de745 | 1488 | boolean_t keepsyms = FALSE; |
04b8595b | 1489 | |
0a7de745 | 1490 | PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms)); |
6d2010ae | 1491 | struct ztrace top_ztrace_copy; |
0a7de745 | 1492 | |
6d2010ae | 1493 | /* Make sure not to trip another panic if there's something wrong with memory */ |
0a7de745 | 1494 | if (ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { |
5ba3f43e | 1495 | paniclog_append_noflush("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); |
6d2010ae | 1496 | /* Print the backtrace addresses */ |
0a7de745 | 1497 | for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH); i++) { |
5ba3f43e | 1498 | paniclog_append_noflush("%p ", top_ztrace_copy.zt_stack[i]); |
04b8595b A |
1499 | if (keepsyms) { |
1500 | panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); | |
1501 | } | |
5ba3f43e | 1502 | paniclog_append_noflush("\n"); |
6d2010ae A |
1503 | } |
1504 | /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ | |
1505 | kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); | |
0a7de745 | 1506 | } else { |
5ba3f43e | 1507 | paniclog_append_noflush("\nCan't access top_ztrace...\n"); |
6d2010ae | 1508 | } |
5ba3f43e | 1509 | paniclog_append_noflush("\n"); |
6d2010ae A |
1510 | } |
1511 | } | |
1512 | #endif /* CONFIG_ZLEAKS */ | |
1513 | ||
39236c6e | 1514 | #if !CONFIG_TELEMETRY |
5ba3f43e A |
1515 | int |
1516 | telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean_t mark __unused) | |
39236c6e A |
1517 | { |
1518 | return KERN_NOT_SUPPORTED; | |
1519 | } | |
1520 | #endif | |
5ba3f43e A |
1521 | |
1522 | #include <machine/machine_cpu.h> | |
1523 | ||
1524 | uint32_t kern_feature_overrides = 0; | |
1525 | ||
0a7de745 A |
1526 | boolean_t |
1527 | kern_feature_override(uint32_t fmask) | |
1528 | { | |
5ba3f43e A |
1529 | if (kern_feature_overrides == 0) { |
1530 | uint32_t fdisables = 0; | |
1531 | /* Expected to be first invoked early, in a single-threaded | |
1532 | * environment | |
1533 | */ | |
1534 | if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) { | |
1535 | fdisables |= KF_INITIALIZED; | |
1536 | kern_feature_overrides = fdisables; | |
1537 | } else { | |
1538 | kern_feature_overrides |= KF_INITIALIZED; | |
1539 | } | |
1540 | } | |
0a7de745 | 1541 | return (kern_feature_overrides & fmask) == fmask; |
5ba3f43e | 1542 | } |