]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/debug.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / kern / debug.c
CommitLineData
1c79356b 1/*
f427ee49 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_assert.h>
1c79356b 58#include <mach_kdp.h>
5ba3f43e
A
59#include <kdp/kdp.h>
60#include <kdp/kdp_core.h>
61#include <kdp/kdp_internal.h>
62#include <kdp/kdp_callout.h>
1c79356b 63#include <kern/cpu_number.h>
b0d623f7 64#include <kern/kalloc.h>
f427ee49 65#include <kern/percpu.h>
1c79356b
A
66#include <kern/spl.h>
67#include <kern/thread.h>
68#include <kern/assert.h>
69#include <kern/sched_prim.h>
70#include <kern/misc_protos.h>
c910b4d9 71#include <kern/clock.h>
39236c6e 72#include <kern/telemetry.h>
fe8ab488 73#include <kern/ecc.h>
3e170ce0 74#include <kern/kern_cdata.h>
f427ee49 75#include <kern/zalloc_internal.h>
9bccf70c 76#include <vm/vm_kern.h>
cb323159 77#include <vm/vm_map.h>
91447636 78#include <vm/pmap.h>
1c79356b 79#include <stdarg.h>
5ba3f43e
A
80#include <stdatomic.h>
81#include <sys/pgo.h>
82#include <console/serial_protos.h>
83
fe8ab488 84#if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
2d21ac55
A
85#include <kdp/kdp_udp.h>
86#endif
5ba3f43e 87#include <kern/processor.h>
1c79356b 88
b0d623f7 89#if defined(__i386__) || defined(__x86_64__)
cb323159
A
90#include <IOKit/IOBSD.h>
91
2d21ac55
A
92#include <i386/cpu_threads.h>
93#include <i386/pmCPU.h>
94#endif
95
96#include <IOKit/IOPlatformExpert.h>
6d2010ae 97#include <machine/pal_routines.h>
2d21ac55 98
b0d623f7
A
99#include <sys/kdebug.h>
100#include <libkern/OSKextLibPrivate.h>
6d2010ae
A
101#include <libkern/OSAtomic.h>
102#include <libkern/kernel_mach_header.h>
cc8bc92a 103#include <libkern/section_keywords.h>
6d2010ae 104#include <uuid/uuid.h>
3e170ce0 105#include <mach_debug/zone_info.h>
f427ee49 106#include <mach/resource_monitors.h>
b0d623f7 107
39037602
A
108#include <os/log_private.h>
109
f427ee49 110#if defined(__arm__) || defined(__arm64__)
fe8ab488 111#include <pexpert/pexpert.h> /* For gPanicBase */
5ba3f43e
A
112#include <arm/caches_internal.h>
113#include <arm/misc_protos.h>
114extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
fe8ab488
A
115#endif
116
d9a64523
A
117#if CONFIG_XNUPOST
118#include <tests/xnupost.h>
119extern int vsnprintf(char *, size_t, const char *, va_list);
120#endif
39037602 121
f427ee49
A
122#if CONFIG_CSR
123#include <sys/csr.h>
124#endif
125
126extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
127
0a7de745
A
128unsigned int halt_in_debugger = 0;
129unsigned int current_debugger = 0;
130unsigned int active_debugger = 0;
131unsigned int panicDebugging = FALSE;
0a7de745 132unsigned int kernel_debugger_entry_count = 0;
5ba3f43e 133
f427ee49 134#if defined(__arm__) || defined(__arm64__)
0a7de745
A
135struct additional_panic_data_buffer *panic_data_buffers = NULL;
136#endif
5ba3f43e
A
137
138#if defined(__arm__)
139#define TRAP_DEBUGGER __asm__ volatile("trap")
140#elif defined(__arm64__)
141/*
142 * Magic number; this should be identical to the __arm__ encoding for trap.
143 */
144#define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
145#elif defined (__x86_64__)
146#define TRAP_DEBUGGER __asm__("int3")
147#else
148#error No TRAP_DEBUGGER for this architecture
149#endif
150
151#if defined(__i386__) || defined(__x86_64__)
0a7de745 152#define panic_stop() pmCPUHalt(PM_HALT_PANIC)
5ba3f43e 153#else
0a7de745 154#define panic_stop() panic_spin_forever()
5ba3f43e
A
155#endif
156
f427ee49
A
157struct debugger_state {
158 uint64_t db_panic_options;
159 debugger_op db_current_op;
160 boolean_t db_proceed_on_sync_failure;
161 const char *db_message;
162 const char *db_panic_str;
163 va_list *db_panic_args;
164 void *db_panic_data_ptr;
165 unsigned long db_panic_caller;
166 /* incremented whenever we panic or call Debugger (current CPU panic level) */
167 uint32_t db_entry_count;
168 kern_return_t db_op_return;
169};
170static struct debugger_state PERCPU_DATA(debugger_state);
171
172/* __pure2 is correct if this function is called with preemption disabled */
173static inline __pure2 struct debugger_state *
174current_debugger_state(void)
175{
176 return PERCPU_GET(debugger_state);
177}
178
179#define CPUDEBUGGEROP current_debugger_state()->db_current_op
180#define CPUDEBUGGERMSG current_debugger_state()->db_message
181#define CPUPANICSTR current_debugger_state()->db_panic_str
182#define CPUPANICARGS current_debugger_state()->db_panic_args
183#define CPUPANICOPTS current_debugger_state()->db_panic_options
184#define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
185#define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
186#define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
187#define CPUDEBUGGERRET current_debugger_state()->db_op_return
188#define CPUPANICCALLER current_debugger_state()->db_panic_caller
5ba3f43e
A
189
190#if DEVELOPMENT || DEBUG
f427ee49
A
191#define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \
192MACRO_BEGIN \
193 if (requested) { \
194 volatile int *badpointer = (int *)4; \
195 *badpointer = 0; \
196 } \
5ba3f43e
A
197MACRO_END
198#endif /* DEVELOPMENT || DEBUG */
199
200debugger_op debugger_current_op = DBOP_NONE;
201const char *debugger_panic_str = NULL;
202va_list *debugger_panic_args = NULL;
d9a64523 203void *debugger_panic_data = NULL;
5ba3f43e
A
204uint64_t debugger_panic_options = 0;
205const char *debugger_message = NULL;
206unsigned long debugger_panic_caller = 0;
207
cb323159
A
208void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
209 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
210 unsigned long panic_caller) __dead2;
211static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
212void panic_spin_forever(void) __dead2;
5ba3f43e 213extern kern_return_t do_stackshot(void);
cb323159 214extern void PE_panic_hook(const char*);
1c79356b 215
5ba3f43e 216#define NESTEDDEBUGGERENTRYMAX 5
cb323159 217static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
1c79356b 218
f427ee49 219#if defined(__arm__) || defined(__arm64__)
5ba3f43e 220#define DEBUG_BUF_SIZE (4096)
c910b4d9 221
f427ee49 222/* debug_buf is directly linked with iBoot panic region for arm targets */
5ba3f43e 223char *debug_buf_base = NULL;
fe8ab488
A
224char *debug_buf_ptr = NULL;
225unsigned int debug_buf_size = 0;
f427ee49
A
226
227SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
228#else /* defined(__arm__) || defined(__arm64__) */
229#define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
230/* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
231static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
232
c910b4d9 233char debug_buf[DEBUG_BUF_SIZE];
5ba3f43e
A
234struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
235char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
236char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
237
238/*
239 * We don't include the size of the panic header in the length of the data we actually write.
240 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
241 * the end of the log because we only support writing (3*PAGESIZE) bytes.
242 */
cc8bc92a
A
243unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
244
245boolean_t extended_debug_log_enabled = FALSE;
f427ee49
A
246#endif /* defined(__arm__) || defined(__arm64__) */
247
248#if defined(XNU_TARGET_OS_OSX)
249#define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
250#else
251#define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
fe8ab488 252#endif
2d21ac55 253
5ba3f43e
A
254/* Debugger state */
255atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
256boolean_t debugger_allcpus_halted = FALSE;
257boolean_t debugger_safe_to_return = TRUE;
258unsigned int debugger_context = 0;
39037602 259
2d21ac55 260static char model_name[64];
39236c6e 261unsigned char *kernel_uuid;
2d21ac55 262
d9a64523
A
263boolean_t kernelcache_uuid_valid = FALSE;
264uuid_t kernelcache_uuid;
265uuid_string_t kernelcache_uuid_string;
266
f427ee49
A
267boolean_t pageablekc_uuid_valid = FALSE;
268uuid_t pageablekc_uuid;
269uuid_string_t pageablekc_uuid_string;
270
271boolean_t auxkc_uuid_valid = FALSE;
272uuid_t auxkc_uuid;
273uuid_string_t auxkc_uuid_string;
274
5ba3f43e
A
275/*
276 * By default we treat Debugger() the same as calls to panic(), unless
277 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
278 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
279 *
280 * Return from Debugger() is currently only implemented on x86
281 */
282static boolean_t debugger_is_panic = TRUE;
316670eb 283
f427ee49 284TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
2d21ac55 285
5ba3f43e 286char kernel_uuid_string[37]; /* uuid_string_t */
d9a64523 287char kernelcache_uuid_string[37]; /* uuid_string_t */
5ba3f43e
A
288char panic_disk_error_description[512];
289size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
9bccf70c 290
5ba3f43e 291extern unsigned int write_trace_on_panic;
39037602
A
292int kext_assertions_enable =
293#if DEBUG || DEVELOPMENT
0a7de745 294 TRUE;
39037602 295#else
0a7de745 296 FALSE;
39037602
A
297#endif
298
cb323159
A
299/*
300 * Maintain the physically-contiguous carveout for the `phys_carveout_mb`
301 * boot-arg.
302 */
303SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
304SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
305SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
306
f427ee49
A
307boolean_t
308kernel_debugging_allowed(void)
309{
310#if XNU_TARGET_OS_OSX
311#if CONFIG_CSR
312 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
313 return FALSE;
314 }
315#endif /* CONFIG_CSR */
316 return TRUE;
317#else /* XNU_TARGET_OS_OSX */
318 return PE_i_can_has_debugger(NULL);
319#endif /* XNU_TARGET_OS_OSX */
320}
321
322__startup_func
323static void
1c79356b
A
324panic_init(void)
325{
6d2010ae
A
326 unsigned long uuidlen = 0;
327 void *uuid;
328
329 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
330 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
39236c6e
A
331 kernel_uuid = uuid;
332 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
6d2010ae
A
333 }
334
cc8bc92a 335 /*
f427ee49 336 * Take the value of the debug boot-arg into account
cc8bc92a 337 */
f427ee49
A
338#if MACH_KDP
339 if (kernel_debugging_allowed() && debug_boot_arg) {
cc8bc92a 340 if (debug_boot_arg & DB_HALT) {
0a7de745 341 halt_in_debugger = 1;
cc8bc92a 342 }
5ba3f43e 343
f427ee49 344#if defined(__arm__) || defined(__arm64__)
cc8bc92a
A
345 if (debug_boot_arg & DB_NMI) {
346 panicDebugging = TRUE;
347 }
348#else
349 panicDebugging = TRUE;
f427ee49 350#endif /* defined(__arm__) || defined(__arm64__) */
cc8bc92a 351 }
cb323159
A
352
353 if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) {
354 max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
355 }
356
f427ee49
A
357#if defined(__arm__) || defined(__arm64__)
358 char kdpname[80];
cc8bc92a 359
f427ee49
A
360 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
361#endif /* defined(__arm__) || defined(__arm64__) */
362
363#endif /* MACH_KDP */
cc8bc92a 364
f427ee49 365#if defined (__x86_64__)
cc8bc92a
A
366 /*
367 * By default we treat Debugger() the same as calls to panic(), unless
368 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
369 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
370 * This is because writing an on-device corefile is a destructive operation.
371 *
372 * Return from Debugger() is currently only implemented on x86
373 */
374 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
5ba3f43e
A
375 debugger_is_panic = FALSE;
376 }
377#endif
1c79356b 378}
f427ee49 379STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
1c79356b 380
cc8bc92a
A
381#if defined (__x86_64__)
382void
383extended_debug_log_init(void)
384{
385 assert(coprocessor_paniclog_flush);
386 /*
387 * Allocate an extended panic log buffer that has space for the panic
388 * stackshot at the end. Update the debug buf pointers appropriately
389 * to point at this new buffer.
f427ee49 390 *
5c9f4661
A
391 * iBoot pre-initializes the panic region with the NULL character. We set this here
392 * so we can accurately calculate the CRC for the region without needing to flush the
393 * full region over SMC.
394 */
f427ee49 395 char *new_debug_buf = kalloc_flags(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
cc8bc92a
A
396
397 panic_info = (struct macos_panic_header *)new_debug_buf;
398 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
399 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
400
401 extended_debug_log_enabled = TRUE;
cb323159
A
402
403 /*
404 * Insert a compiler barrier so we don't free the other panic stackshot buffer
405 * until after we've marked the new one as available
406 */
407 __compiler_barrier();
408 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
409 panic_stackshot_buf = 0;
410 panic_stackshot_buf_len = 0;
cc8bc92a
A
411}
412#endif /* defined (__x86_64__) */
413
2d21ac55
A
414void
415debug_log_init(void)
416{
f427ee49 417#if defined(__arm__) || defined(__arm64__)
fe8ab488
A
418 if (!gPanicBase) {
419 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
420 return;
421 }
5ba3f43e
A
422 /* Shift debug buf start location and size by the length of the panic header */
423 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
424 debug_buf_ptr = debug_buf_base;
425 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
fe8ab488 426#else
cb323159 427 kern_return_t kr = KERN_SUCCESS;
cc8bc92a
A
428 bzero(panic_info, DEBUG_BUF_SIZE);
429
5ba3f43e
A
430 assert(debug_buf_base != NULL);
431 assert(debug_buf_ptr != NULL);
432 assert(debug_buf_size != 0);
cb323159
A
433
434 /*
435 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
436 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
437 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
438 * up.
439 */
440 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
441 assert(kr == KERN_SUCCESS);
442 if (kr == KERN_SUCCESS) {
443 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
444 }
fe8ab488 445#endif
2d21ac55
A
446}
447
cb323159
A
448void
449phys_carveout_init(void)
450{
451 if (!PE_i_can_has_debugger(NULL)) {
452 return;
453 }
454
455 unsigned int phys_carveout_mb = 0;
456
457 if (!PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb,
458 sizeof(phys_carveout_mb))) {
459 return;
460 }
461 if (phys_carveout_mb == 0) {
462 return;
463 }
464
465 size_t size = 0;
466 if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &size)) {
467 printf("phys_carveout_mb size overflowed (%uMB)\n",
468 phys_carveout_mb);
469 return;
470 }
471
472 kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, size,
473 VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT,
474 VM_KERN_MEMORY_DIAG);
475 if (kr != KERN_SUCCESS) {
476 printf("failed to allocate %uMB for phys_carveout_mb: %u\n",
477 phys_carveout_mb, (unsigned int)kr);
478 return;
479 }
480
481 phys_carveout_pa = kvtophys(phys_carveout);
482 phys_carveout_size = size;
483}
484
5ba3f43e 485static void
f427ee49 486DebuggerLock(void)
5ba3f43e
A
487{
488 int my_cpu = cpu_number();
489 int debugger_exp_cpu = DEBUGGER_NO_CPU;
490 assert(ml_get_interrupts_enabled() == FALSE);
491
cb323159 492 if (atomic_load(&debugger_cpu) == my_cpu) {
5ba3f43e
A
493 return;
494 }
495
0a7de745 496 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
5ba3f43e
A
497 debugger_exp_cpu = DEBUGGER_NO_CPU;
498 }
499
500 return;
501}
502
503static void
f427ee49 504DebuggerUnlock(void)
5ba3f43e 505{
cb323159 506 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
5ba3f43e
A
507
508 /*
509 * We don't do an atomic exchange here in case
510 * there's another CPU spinning to acquire the debugger_lock
511 * and we never get a chance to update it. We already have the
512 * lock so we can simply store DEBUGGER_NO_CPU and follow with
513 * a barrier.
514 */
cb323159 515 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
5ba3f43e
A
516 OSMemoryBarrier();
517
518 return;
519}
520
521static kern_return_t
522DebuggerHaltOtherCores(boolean_t proceed_on_failure)
523{
f427ee49 524#if defined(__arm__) || defined(__arm64__)
5ba3f43e 525 return DebuggerXCallEnter(proceed_on_failure);
f427ee49 526#else /* defined(__arm__) || defined(__arm64__) */
5ba3f43e
A
527#pragma unused(proceed_on_failure)
528 mp_kdp_enter(proceed_on_failure);
529 return KERN_SUCCESS;
2d21ac55 530#endif
5ba3f43e
A
531}
532
533static void
f427ee49 534DebuggerResumeOtherCores(void)
5ba3f43e 535{
f427ee49 536#if defined(__arm__) || defined(__arm64__)
5ba3f43e 537 DebuggerXCallReturn();
f427ee49 538#else /* defined(__arm__) || defined(__arm64__) */
5ba3f43e
A
539 mp_kdp_exit();
540#endif
541}
542
543static void
544DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
0a7de745
A
545 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
546 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
5ba3f43e
A
547{
548 CPUDEBUGGEROP = db_op;
549
550 /* Preserve the original panic message */
551 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
552 CPUDEBUGGERMSG = db_message;
553 CPUPANICSTR = db_panic_str;
554 CPUPANICARGS = db_panic_args;
d9a64523 555 CPUPANICDATAPTR = db_panic_data_ptr;
5ba3f43e
A
556 CPUPANICCALLER = db_panic_caller;
557 } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
cc8bc92a 558 kprintf("Nested panic detected:");
0a7de745 559 if (db_panic_str != NULL) {
cc8bc92a 560 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
0a7de745 561 }
5ba3f43e
A
562 }
563
564 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
565 CPUDEBUGGERRET = KERN_SUCCESS;
566
567 /* Reset these on any nested panics */
568 CPUPANICOPTS = db_panic_options;
569
570 return;
571}
2d21ac55 572
b0d623f7 573/*
f427ee49
A
574 * Save the requested debugger state/action into the current processor's
575 * percu state and trap to the debugger.
b0d623f7 576 */
5ba3f43e
A
577kern_return_t
578DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
0a7de745
A
579 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
580 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
5ba3f43e
A
581{
582 kern_return_t ret;
583
584 assert(ml_get_interrupts_enabled() == FALSE);
d9a64523 585 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
0a7de745
A
586 db_panic_options, db_panic_data_ptr,
587 db_proceed_on_sync_failure, db_panic_caller);
5ba3f43e 588
f427ee49
A
589 /*
590 * On ARM this generates an uncategorized exception -> sleh code ->
591 * DebuggerCall -> kdp_trap -> handle_debugger_trap
592 * So that is how XNU ensures that only one core can panic.
593 * The rest of the cores are halted by IPI if possible; if that
594 * fails it will fall back to dbgwrap.
595 */
5ba3f43e
A
596 TRAP_DEBUGGER;
597
598 ret = CPUDEBUGGERRET;
b0d623f7 599
d9a64523 600 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
5ba3f43e
A
601
602 return ret;
603}
604
605void __attribute__((noinline))
606Assert(
0a7de745
A
607 const char *file,
608 int line,
609 const char *expression
610 )
b0d623f7 611{
cb323159 612#if CONFIG_NONFATAL_ASSERTS
f427ee49
A
613 static TUNABLE(bool, mach_assert, "assertions", true);
614
5ba3f43e
A
615 if (!mach_assert) {
616 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
617 return;
618 }
cb323159 619#endif
5ba3f43e
A
620
621 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
b0d623f7
A
622}
623
f427ee49
A
624boolean_t
625debug_is_current_cpu_in_panic_state(void)
626{
627 return current_debugger_state()->db_entry_count > 0;
628}
fe8ab488 629
5ba3f43e
A
630void
631Debugger(const char *message)
1c79356b 632{
5ba3f43e
A
633 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE);
634}
1c79356b 635
5ba3f43e
A
636void
637DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
0a7de745 638 uint64_t debugger_options_mask)
5ba3f43e
A
639{
640 spl_t previous_interrupts_state;
641 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
7ddcb079 642
5ba3f43e 643 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
7ddcb079
A
644 disable_preemption();
645
5ba3f43e 646 CPUDEBUGGERCOUNT++;
b0d623f7 647
cb323159 648 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
5ba3f43e 649 static boolean_t in_panic_kprintf = FALSE;
2d21ac55 650
5ba3f43e 651 /* Notify any listeners that we've started a panic */
f427ee49
A
652 uint32_t panic_details = 0;
653 if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
654 panic_details |= kPanicDetailsForcePowerOff;
655 }
656 PEHaltRestartInternal(kPEPanicBegin, panic_details);
91447636 657
5ba3f43e
A
658 if (!in_panic_kprintf) {
659 in_panic_kprintf = TRUE;
660 kprintf("Detected nested debugger entry count exceeding %d\n",
cb323159 661 max_debugger_entry_count);
5ba3f43e
A
662 in_panic_kprintf = FALSE;
663 }
316670eb 664
5ba3f43e 665 if (!panicDebugging) {
cb323159 666 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
1c79356b 667 }
5ba3f43e
A
668
669 panic_spin_forever();
1c79356b 670 }
1c79356b 671
f427ee49
A
672 /* Handle any necessary platform specific actions before we proceed */
673 PEInitiatePanic();
674
5ba3f43e
A
675#if DEVELOPMENT || DEBUG
676 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
677#endif
678
679 doprnt_hide_pointers = FALSE;
680
681 if (ctx != NULL) {
682 DebuggerSaveState(DBOP_DEBUGGER, message,
0a7de745 683 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
5ba3f43e
A
684 handle_debugger_trap(reason, 0, 0, ctx);
685 DebuggerSaveState(DBOP_NONE, NULL, NULL,
0a7de745 686 NULL, 0, NULL, FALSE, 0);
5ba3f43e
A
687 } else {
688 DebuggerTrapWithState(DBOP_DEBUGGER, message,
0a7de745 689 NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
5ba3f43e 690 }
39037602 691
5ba3f43e
A
692 CPUDEBUGGERCOUNT--;
693 doprnt_hide_pointers = old_doprnt_hide_pointers;
694 enable_preemption();
695 ml_set_interrupts_enabled(previous_interrupts_state);
696}
697
698static struct kdp_callout {
699 struct kdp_callout * callout_next;
700 kdp_callout_fn_t callout_fn;
701 boolean_t callout_in_progress;
702 void * callout_arg;
703} * kdp_callout_list = NULL;
704
705/*
706 * Called from kernel context to register a kdp event callout.
707 */
708void
709kdp_register_callout(kdp_callout_fn_t fn, void * arg)
710{
711 struct kdp_callout * kcp;
712 struct kdp_callout * list_head;
713
714 kcp = kalloc(sizeof(*kcp));
0a7de745 715 if (kcp == NULL) {
5ba3f43e 716 panic("kdp_register_callout() kalloc failed");
0a7de745 717 }
5ba3f43e
A
718
719 kcp->callout_fn = fn;
720 kcp->callout_arg = arg;
721 kcp->callout_in_progress = FALSE;
722
723 /* Lock-less list insertion using compare and exchange. */
724 do {
725 list_head = kdp_callout_list;
726 kcp->callout_next = list_head;
727 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
316670eb 728}
1c79356b 729
39037602 730static void
5ba3f43e 731kdp_callouts(kdp_event_t event)
316670eb 732{
0a7de745 733 struct kdp_callout *kcp = kdp_callout_list;
2d21ac55 734
5ba3f43e
A
735 while (kcp) {
736 if (!kcp->callout_in_progress) {
737 kcp->callout_in_progress = TRUE;
738 kcp->callout_fn(kcp->callout_arg, event);
739 kcp->callout_in_progress = FALSE;
740 }
741 kcp = kcp->callout_next;
55e303ae 742 }
1c79356b
A
743}
744
f427ee49 745#if defined(__arm__) || defined(__arm64__)
0a7de745
A
746/*
747 * Register an additional buffer with data to include in the panic log
748 *
749 * <rdar://problem/50137705> tracks supporting more than one buffer
750 *
751 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
752 */
753void
754register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
755{
756 if (panic_data_buffers != NULL) {
757 panic("register_additional_panic_data_buffer called with buffer already registered");
758 }
759
760 if (producer_name == NULL || (strlen(producer_name) == 0)) {
761 panic("register_additional_panic_data_buffer called with invalid producer_name");
762 }
763
764 if (buf == NULL) {
765 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
766 }
767
768 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
769 panic("register_additional_panic_data_buffer called with invalid length");
770 }
771
772 struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer));
773 new_panic_data_buffer->producer_name = producer_name;
774 new_panic_data_buffer->buf = buf;
775 new_panic_data_buffer->len = len;
776
777 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
778 panic("register_additional_panic_data_buffer called with buffer already registered");
779 }
780
781 return;
782}
f427ee49 783#endif /* defined(__arm__) || defined(__arm64__) */
0a7de745 784
5ba3f43e
A
785/*
786 * An overview of the xnu panic path:
787 *
788 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
f427ee49 789 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
5ba3f43e
A
790 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
791 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
792 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
793 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
794 * according to the device's boot-args.
795 */
796#undef panic
316670eb
A
797void
798panic(const char *str, ...)
799{
5ba3f43e
A
800 va_list panic_str_args;
801
802 va_start(panic_str_args, str);
d9a64523 803 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
5ba3f43e
A
804 va_end(panic_str_args);
805}
806
807void
808panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
809{
810 va_list panic_str_args;
811
812 va_start(panic_str_args, str);
d9a64523 813 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
0a7de745 814 NULL, (unsigned long)(char *)__builtin_return_address(0));
5ba3f43e
A
815 va_end(panic_str_args);
816}
817
d9a64523
A
818#if defined (__x86_64__)
819/*
820 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
821 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
822 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
823 * thread when writing the panic log.
824 *
825 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
826 */
5ba3f43e 827void
d9a64523 828panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
5ba3f43e
A
829{
830 va_list panic_str_args;
cb323159 831 __assert_only os_ref_count_t th_ref_count;
d9a64523
A
832
833 assert_thread_magic(thread);
cb323159
A
834 th_ref_count = os_ref_get_count(&thread->ref_count);
835 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
d9a64523
A
836
837 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
838 thread_reference(thread);
5ba3f43e
A
839
840 va_start(panic_str_args, str);
d9a64523 841 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
0a7de745 842 thread, (unsigned long)(char *)__builtin_return_address(0));
d9a64523 843
5ba3f43e
A
844 va_end(panic_str_args);
845}
d9a64523 846#endif /* defined (__x86_64__) */
5ba3f43e
A
847
848#pragma clang diagnostic push
849#pragma clang diagnostic ignored "-Wmissing-noreturn"
850void
d9a64523 851panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
0a7de745 852 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
5ba3f43e
A
853{
854#pragma clang diagnostic pop
855
0a7de745
A
856#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
857 /* Turn off I/O tracing once we've panicked */
858 mmiotrace_enabled = 0;
859#endif
860
f427ee49 861 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
5ba3f43e
A
862
863 CPUDEBUGGERCOUNT++;
864
cb323159 865 if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
5ba3f43e
A
866 static boolean_t in_panic_kprintf = FALSE;
867
868 /* Notify any listeners that we've started a panic */
f427ee49
A
869 uint32_t panic_details = 0;
870 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
871 panic_details |= kPanicDetailsForcePowerOff;
872 }
873 PEHaltRestartInternal(kPEPanicBegin, panic_details);
5ba3f43e
A
874
875 if (!in_panic_kprintf) {
876 in_panic_kprintf = TRUE;
877 kprintf("Detected nested debugger entry count exceeding %d\n",
cb323159 878 max_debugger_entry_count);
5ba3f43e
A
879 in_panic_kprintf = FALSE;
880 }
881
882 if (!panicDebugging) {
cb323159 883 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
5ba3f43e
A
884 }
885
886 panic_spin_forever();
887 }
888
f427ee49
A
889 /* Handle any necessary platform specific actions before we proceed */
890 PEInitiatePanic();
891
5ba3f43e
A
892#if DEVELOPMENT || DEBUG
893 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
894#endif
895
cb323159 896 PE_panic_hook(panic_format_str);
316670eb 897
39037602
A
898#if defined (__x86_64__)
899 plctrace_disable();
900#endif
39037602 901
5ba3f43e
A
902 if (write_trace_on_panic && kdebug_enable) {
903 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
904 ml_set_interrupts_enabled(TRUE);
a39ff7e2 905 KDBG_RELEASE(TRACE_PANIC);
5ba3f43e
A
906 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME);
907 }
908 }
909
910 ml_set_interrupts_enabled(FALSE);
911 disable_preemption();
39037602 912
5ba3f43e
A
913#if defined (__x86_64__)
914 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
915#endif /* defined (__x86_64__) */
3e170ce0
A
916
917 /* Never hide pointers from panic logs. */
918 doprnt_hide_pointers = FALSE;
919
5ba3f43e
A
920 if (ctx != NULL) {
921 /*
922 * We called into panic from a trap, no need to trap again. Set the
923 * state on the current CPU and then jump to handle_debugger_trap.
924 */
925 DebuggerSaveState(DBOP_PANIC, "panic",
0a7de745
A
926 panic_format_str, panic_args,
927 panic_options_mask, panic_data_ptr, TRUE, panic_caller);
5ba3f43e 928 handle_debugger_trap(reason, 0, 0, ctx);
316670eb 929 }
316670eb 930
5ba3f43e 931#if defined(__arm64__)
316670eb 932 /*
5ba3f43e 933 * Signal to fastsim that it should open debug ports (nop on hardware)
316670eb 934 */
0a7de745 935 __asm__ volatile ("HINT 0x45");
5ba3f43e 936#endif /* defined(__arm64__) */
3e170ce0 937
5ba3f43e 938 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
0a7de745 939 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller);
3e170ce0 940
5ba3f43e
A
941 /*
942 * Not reached.
943 */
944 panic_stop();
cb323159 945 __builtin_unreachable();
316670eb
A
946}
947
39037602 948void
cb323159 949panic_spin_forever(void)
39037602 950{
5ba3f43e
A
951 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
952
0a7de745
A
953 for (;;) {
954 }
5ba3f43e
A
955}
956
957static void
cb323159 958kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
5ba3f43e 959{
f427ee49 960 printf("Attempting system restart...\n");
cb323159 961 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
f427ee49 962 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
cb323159
A
963 } else {
964 PEHaltRestart(type);
965 }
5ba3f43e
A
966 halt_all_cpus(TRUE);
967}
968
969void
970kdp_machine_reboot(void)
971{
cb323159 972 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
5ba3f43e
A
973}
974
975/*
976 * Gather and save diagnostic information about a panic (or Debugger call).
977 *
978 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
979 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
980 * paniclog is written and no core is written unless we request a core on NMI.
981 *
982 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
983 * and calling out to any other functions we have for collecting diagnostic info.
984 */
985static void
986debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
987{
988#if DEVELOPMENT || DEBUG
989 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_PRELOG));
990#endif
991
992#if defined(__x86_64__)
cc8bc92a 993 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
5ba3f43e
A
994#endif
995 /*
996 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
997 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
998 * we'll just spin in kdp_raise_exception.
999 */
1000 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1001 kdp_raise_exception(exception, code, subcode, state);
1002 if (debugger_safe_to_return && !debugger_is_panic) {
1003 return;
1004 }
1005 }
1006
1007 if ((debugger_current_op == DBOP_PANIC) ||
0a7de745 1008 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
5ba3f43e
A
1009 /*
1010 * Attempt to notify listeners once and only once that we've started
1011 * panicking. Only do this for Debugger() calls if we're treating
1012 * Debugger() calls like panic().
1013 */
f427ee49
A
1014 uint32_t panic_details = 0;
1015 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1016 panic_details |= kPanicDetailsForcePowerOff;
1017 }
1018 PEHaltRestartInternal(kPEPanicBegin, panic_details);
5ba3f43e
A
1019
1020 /*
1021 * Set the begin pointer in the panic log structure. We key off of this
1022 * static variable rather than contents from the panic header itself in case someone
1023 * has stomped over the panic_info structure. Also initializes the header magic.
1024 */
1025 static boolean_t began_writing_paniclog = FALSE;
1026 if (!began_writing_paniclog) {
1027 PE_init_panicheader();
1028 began_writing_paniclog = TRUE;
1029 } else {
1030 /*
1031 * If we reached here, update the panic header to keep it as consistent
1032 * as possible during a nested panic
1033 */
1034 PE_update_panicheader_nestedpanic();
1035 }
1036 }
1037
1038 /*
1039 * Write panic string if this was a panic.
1040 *
1041 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1042 */
1043 if (debugger_current_op == DBOP_PANIC) {
1044 paniclog_append_noflush("panic(cpu %d caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1045 if (debugger_panic_str) {
1046 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1047 }
1048 paniclog_append_noflush("\n");
1049 }
cc8bc92a
A
1050#if defined(__x86_64__)
1051 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1052 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1053 }
5ba3f43e
A
1054
1055 /*
1056 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1057 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1058 * conventional sense.
1059 */
cc8bc92a 1060 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
5ba3f43e
A
1061#endif
1062 {
1063 kdp_callouts(KDP_EVENT_PANICLOG);
39037602 1064
5ba3f43e
A
1065 /*
1066 * Write paniclog and panic stackshot (if supported)
1067 * TODO: Need to clear panic log when return from debugger
1068 * hooked up for embedded
1069 */
d9a64523 1070 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options);
39037602 1071
5ba3f43e
A
1072#if DEVELOPMENT || DEBUG
1073 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTLOG));
1074#endif
39037602 1075
5ba3f43e 1076 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
0a7de745 1077 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
cb323159 1078 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
0a7de745 1079 }
39037602 1080 }
5ba3f43e
A
1081
1082#if CONFIG_KDP_INTERACTIVE_DEBUGGING
39037602 1083 /*
5ba3f43e
A
1084 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1085 * local coredumps, don't try to write these and instead go straight to reboot. This
1086 * allows us to persist any data that's stored in the panic log.
39037602 1087 */
5ba3f43e 1088 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
0a7de745 1089 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
cb323159 1090 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
5ba3f43e
A
1091 }
1092
1093 /*
1094 * Consider generating a local corefile if the infrastructure is configured
1095 * and we haven't disabled on-device coredumps.
1096 */
cb323159 1097 if (on_device_corefile_enabled()) {
d9a64523
A
1098 if (!kdp_has_polled_corefile()) {
1099 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1100 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)",
0a7de745 1101 kdp_polled_corefile_error());
f427ee49 1102#if defined(__arm__) || defined(__arm64__)
d9a64523
A
1103 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1104 paniclog_flush();
f427ee49 1105#else /* defined(__arm__) || defined(__arm64__) */
d9a64523
A
1106 if (panic_info->mph_panic_log_offset != 0) {
1107 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1108 paniclog_flush();
1109 }
f427ee49 1110#endif /* defined(__arm__) || defined(__arm64__) */
d9a64523 1111 }
f427ee49
A
1112 }
1113#if XNU_MONITOR
1114 else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) {
1115 paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state");
1116 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1117 paniclog_flush();
1118 }
1119#endif /* XNU_MONITOR */
1120 else {
d9a64523 1121 int ret = -1;
5ba3f43e
A
1122
1123#if defined (__x86_64__)
d9a64523
A
1124 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1125 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
5ba3f43e 1126#endif
d9a64523
A
1127 {
1128 /*
1129 * Doing an on-device coredump leaves the disk driver in a state
1130 * that can not be resumed.
1131 */
1132 debugger_safe_to_return = FALSE;
1133 begin_panic_transfer();
1134 ret = kern_dump(KERN_DUMP_DISK);
1135 abort_panic_transfer();
5ba3f43e
A
1136
1137#if DEVELOPMENT || DEBUG
d9a64523 1138 DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_panic_options & DEBUGGER_OPTION_RECURPANIC_POSTCORE));
5ba3f43e 1139#endif
d9a64523 1140 }
5ba3f43e 1141
d9a64523
A
1142 /*
1143 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1144 * or if option to ignore failures is set.
1145 */
1146 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
0a7de745 1147 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
cb323159 1148 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
d9a64523 1149 }
cc8bc92a 1150 }
5ba3f43e
A
1151 }
1152
5c9f4661 1153 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
cb323159 1154 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
5c9f4661
A
1155 }
1156
5ba3f43e 1157 /* If KDP is configured, try to trap to the debugger */
f427ee49
A
1158#if defined(__arm__) || defined(__arm64__)
1159 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1160#else
5ba3f43e 1161 if (current_debugger != NO_CUR_DB) {
f427ee49 1162#endif
5ba3f43e
A
1163 kdp_raise_exception(exception, code, subcode, state);
1164 /*
1165 * Only return if we entered via Debugger and it's safe to return
1166 * (we halted the other cores successfully, this isn't a nested panic, etc)
0a7de745 1167 */
5ba3f43e 1168 if (debugger_current_op == DBOP_DEBUGGER &&
0a7de745
A
1169 debugger_safe_to_return &&
1170 kernel_debugger_entry_count == 1 &&
1171 !debugger_is_panic) {
5ba3f43e
A
1172 return;
1173 }
1174 }
1175
f427ee49
A
1176#if defined(__arm__) || defined(__arm64__)
1177 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1178 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
5ba3f43e
A
1179 panic_spin_shmcon();
1180 }
f427ee49 1181#endif /* defined(__arm__) || defined(__arm64__) */
5ba3f43e
A
1182#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1183
1184 if (!panicDebugging) {
cb323159 1185 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
5ba3f43e
A
1186 }
1187
1188 panic_spin_forever();
39037602
A
1189}
1190
5ba3f43e
A
1191#if INTERRUPT_MASKED_DEBUG
1192uint64_t debugger_trap_timestamps[9];
1193# define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1194#else
1195# define DEBUGGER_TRAP_TIMESTAMP(i)
39037602 1196#endif
5ba3f43e 1197
316670eb 1198void
5ba3f43e 1199handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
316670eb 1200{
5ba3f43e
A
1201 unsigned int initial_not_in_kdp = not_in_kdp;
1202 kern_return_t ret;
1203 debugger_op db_prev_op = debugger_current_op;
1204
1205 DEBUGGER_TRAP_TIMESTAMP(0);
1206
1207 DebuggerLock();
1208 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC);
316670eb 1209
5ba3f43e 1210 DEBUGGER_TRAP_TIMESTAMP(1);
39236c6e 1211
5ba3f43e
A
1212#if INTERRUPT_MASKED_DEBUG
1213 if (serialmode & SERIALMODE_OUTPUT) {
1214 ml_spin_debug_reset(current_thread());
1215 }
1216#endif
1217 if (ret != KERN_SUCCESS) {
1218 CPUDEBUGGERRET = ret;
1219 DebuggerUnlock();
1220 return;
1221 }
1222
1223 /* Update the global panic/debugger nested entry level */
1224 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
f427ee49
A
1225 if (kernel_debugger_entry_count > 0) {
1226 console_suspend();
1227 }
5ba3f43e
A
1228
1229 /*
1230 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1231 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1232 */
1233
1234 if (CPUDEBUGGEROP == DBOP_NONE) {
1235 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1236 debugger_current_op = DBOP_BREAKPOINT;
1237 } else {
1238 /* Not safe to return from a nested panic/debugger call */
1239 if (debugger_current_op == DBOP_PANIC ||
0a7de745 1240 debugger_current_op == DBOP_DEBUGGER) {
5ba3f43e
A
1241 debugger_safe_to_return = FALSE;
1242 }
1243
1244 debugger_current_op = CPUDEBUGGEROP;
1245
1246 /* Only overwrite the panic message if there is none already - save the data from the first call */
1247 if (debugger_panic_str == NULL) {
1248 debugger_panic_str = CPUPANICSTR;
1249 debugger_panic_args = CPUPANICARGS;
d9a64523 1250 debugger_panic_data = CPUPANICDATAPTR;
5ba3f43e
A
1251 debugger_message = CPUDEBUGGERMSG;
1252 debugger_panic_caller = CPUPANICCALLER;
1253 }
1254
1255 debugger_panic_options = CPUPANICOPTS;
316670eb 1256 }
316670eb
A
1257
1258 /*
5ba3f43e
A
1259 * Clear the op from the processor debugger context so we can handle
1260 * breakpoints in the debugger
316670eb 1261 */
5ba3f43e
A
1262 CPUDEBUGGEROP = DBOP_NONE;
1263
1264 DEBUGGER_TRAP_TIMESTAMP(2);
1265
1266 kdp_callouts(KDP_EVENT_ENTER);
1267 not_in_kdp = 0;
1268
1269 DEBUGGER_TRAP_TIMESTAMP(3);
1270
1271 if (debugger_current_op == DBOP_BREAKPOINT) {
1272 kdp_raise_exception(exception, code, subcode, state);
1273 } else if (debugger_current_op == DBOP_STACKSHOT) {
1274 CPUDEBUGGERRET = do_stackshot();
1275#if PGO
0a7de745 1276 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
5ba3f43e
A
1277 CPUDEBUGGERRET = do_pgo_reset_counters();
1278#endif
1279 } else {
1280 debugger_collect_diagnostics(exception, code, subcode, state);
1281 }
1282
1283 DEBUGGER_TRAP_TIMESTAMP(4);
1284
1285 not_in_kdp = initial_not_in_kdp;
1286 kdp_callouts(KDP_EVENT_EXIT);
1287
1288 DEBUGGER_TRAP_TIMESTAMP(5);
1289
1290 if (debugger_current_op != DBOP_BREAKPOINT) {
1291 debugger_panic_str = NULL;
1292 debugger_panic_args = NULL;
d9a64523 1293 debugger_panic_data = NULL;
5ba3f43e
A
1294 debugger_panic_options = 0;
1295 debugger_message = NULL;
1296 }
1297
1298 /* Restore the previous debugger state */
1299 debugger_current_op = db_prev_op;
1300
1301 DEBUGGER_TRAP_TIMESTAMP(6);
1302
1303 DebuggerResumeOtherCores();
1304
1305 DEBUGGER_TRAP_TIMESTAMP(7);
1306
1307 DebuggerUnlock();
1308
1309 DEBUGGER_TRAP_TIMESTAMP(8);
1310
1311 return;
316670eb
A
1312}
1313
0a7de745
A
1314__attribute__((noinline, not_tail_called))
1315void
1316log(__unused int level, char *fmt, ...)
1c79356b 1317{
39037602 1318 void *caller = __builtin_return_address(0);
0a7de745
A
1319 va_list listp;
1320 va_list listp2;
39037602 1321
1c79356b
A
1322
1323#ifdef lint
1324 level++;
1325#endif /* lint */
0a7de745 1326#ifdef MACH_BSD
1c79356b 1327 va_start(listp, fmt);
39037602
A
1328 va_copy(listp2, listp);
1329
1330 disable_preemption();
1331 _doprnt(fmt, &listp, cons_putc_locked, 0);
1c79356b 1332 enable_preemption();
39037602
A
1333
1334 va_end(listp);
1335
1336 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1337 va_end(listp2);
1c79356b
A
1338#endif
1339}
9bccf70c 1340
39037602 1341/*
5ba3f43e
A
1342 * Per <rdar://problem/24974766>, skip appending log messages to
1343 * the new logging infrastructure in contexts where safety is
1344 * uncertain. These contexts include:
39037602
A
1345 * - When we're in the debugger
1346 * - We're in a panic
1347 * - Interrupts are disabled
1348 * - Or Pre-emption is disabled
1349 * In all the above cases, it is potentially unsafe to log messages.
1350 */
1351
5ba3f43e 1352boolean_t
0a7de745
A
1353oslog_is_safe(void)
1354{
1355 return kernel_debugger_entry_count == 0 &&
1356 not_in_kdp == 1 &&
1357 get_preemption_level() == 0 &&
1358 ml_get_interrupts_enabled() == TRUE;
39037602
A
1359}
1360
5ba3f43e
A
1361boolean_t
1362debug_mode_active(void)
1363{
0a7de745 1364 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
5ba3f43e
A
1365}
1366
9bccf70c
A
1367void
1368debug_putc(char c)
1369{
2d21ac55 1370 if ((debug_buf_size != 0) &&
0a7de745
A
1371 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) {
1372 *debug_buf_ptr = c;
9bccf70c
A
1373 debug_buf_ptr++;
1374 }
1375}
2d21ac55 1376
5ba3f43e
A
1377#if defined (__x86_64__)
1378struct pasc {
0a7de745
A
1379 unsigned a: 7;
1380 unsigned b: 7;
1381 unsigned c: 7;
1382 unsigned d: 7;
1383 unsigned e: 7;
1384 unsigned f: 7;
1385 unsigned g: 7;
1386 unsigned h: 7;
5ba3f43e 1387} __attribute__((packed));
2d21ac55 1388
5ba3f43e
A
1389typedef struct pasc pasc_t;
1390
1391/*
1392 * In-place packing routines -- inefficient, but they're called at most once.
1393 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1394 */
1395int
1396packA(char *inbuf, uint32_t length, uint32_t buflen)
2d21ac55 1397{
0a7de745
A
1398 unsigned int i, j = 0;
1399 pasc_t pack;
1400
1401 length = MIN(((length + 7) & ~7), buflen);
1402
1403 for (i = 0; i < length; i += 8) {
1404 pack.a = inbuf[i];
1405 pack.b = inbuf[i + 1];
1406 pack.c = inbuf[i + 2];
1407 pack.d = inbuf[i + 3];
1408 pack.e = inbuf[i + 4];
1409 pack.f = inbuf[i + 5];
1410 pack.g = inbuf[i + 6];
1411 pack.h = inbuf[i + 7];
1412 bcopy((char *) &pack, inbuf + j, 7);
1413 j += 7;
1414 }
1415 return j;
2d21ac55
A
1416}
1417
5ba3f43e
A
1418void
1419unpackA(char *inbuf, uint32_t length)
2d21ac55
A
1420{
1421 pasc_t packs;
1422 unsigned i = 0;
0a7de745 1423 length = (length * 8) / 7;
2d21ac55
A
1424
1425 while (i < length) {
0a7de745
A
1426 packs = *(pasc_t *)&inbuf[i];
1427 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1428 inbuf[i++] = packs.a;
1429 inbuf[i++] = packs.b;
1430 inbuf[i++] = packs.c;
1431 inbuf[i++] = packs.d;
1432 inbuf[i++] = packs.e;
1433 inbuf[i++] = packs.f;
1434 inbuf[i++] = packs.g;
1435 inbuf[i++] = packs.h;
2d21ac55
A
1436 }
1437}
5ba3f43e 1438#endif /* defined (__x86_64__) */
2d21ac55 1439
f427ee49
A
1440extern char *proc_name_address(void *);
1441extern char *proc_longname_address(void *);
2d21ac55 1442
f427ee49 1443__private_extern__ void
0a7de745
A
1444panic_display_process_name(void)
1445{
f427ee49 1446 proc_name_t proc_name = {};
2d21ac55
A
1447 task_t ctask = 0;
1448 void *cbsd_info = 0;
f427ee49 1449 vm_size_t size;
2d21ac55 1450
f427ee49
A
1451 size = ml_nofault_copy((vm_offset_t)&current_thread()->task,
1452 (vm_offset_t)&ctask, sizeof(task_t));
1453 if (size != sizeof(task_t)) {
1454 goto out;
1455 }
1456
1457 size = ml_nofault_copy((vm_offset_t)&ctask->bsd_info,
1458 (vm_offset_t)&cbsd_info, sizeof(cbsd_info));
1459 if (size != sizeof(cbsd_info)) {
1460 goto out;
1461 }
1462
1463 if (cbsd_info == NULL) {
1464 goto out;
1465 }
1466
1467 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1468 (vm_offset_t)&proc_name, sizeof(proc_name));
1469
1470 if (size == 0 || proc_name[0] == '\0') {
1471 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1472 (vm_offset_t)&proc_name,
1473 MIN(sizeof(command_t), sizeof(proc_name)));
1474 if (size > 0) {
1475 proc_name[size - 1] = '\0';
0a7de745
A
1476 }
1477 }
f427ee49
A
1478
1479out:
1480 proc_name[sizeof(proc_name) - 1] = '\0';
1481 paniclog_append_noflush("\nProcess name corresponding to current thread: %s\n",
1482 proc_name[0] != '\0' ? proc_name : "Unknown");
2d21ac55
A
1483}
1484
5ba3f43e 1485unsigned
0a7de745
A
1486panic_active(void)
1487{
1488 return debugger_panic_str != (char *) 0;
2d21ac55
A
1489}
1490
5ba3f43e 1491void
0a7de745
A
1492populate_model_name(char *model_string)
1493{
2d21ac55
A
1494 strlcpy(model_name, model_string, sizeof(model_name));
1495}
1496
5ba3f43e 1497void
0a7de745
A
1498panic_display_model_name(void)
1499{
2d21ac55
A
1500 char tmp_model_name[sizeof(model_name)];
1501
0a7de745 1502 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
2d21ac55 1503 return;
0a7de745 1504 }
2d21ac55 1505
6d2010ae
A
1506 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1507
0a7de745 1508 if (tmp_model_name[0] != 0) {
5ba3f43e 1509 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
0a7de745 1510 }
6d2010ae
A
1511}
1512
5ba3f43e 1513void
0a7de745
A
1514panic_display_kernel_uuid(void)
1515{
39236c6e 1516 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
6d2010ae 1517
0a7de745 1518 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
6d2010ae 1519 return;
0a7de745 1520 }
2d21ac55 1521
0a7de745 1522 if (tmp_kernel_uuid[0] != '\0') {
5ba3f43e 1523 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
0a7de745 1524 }
2d21ac55
A
1525}
1526
5ba3f43e 1527void
0a7de745
A
1528panic_display_kernel_aslr(void)
1529{
f427ee49 1530 kc_format_t kc_format;
316670eb 1531
f427ee49 1532 PE_get_primary_kc_format(&kc_format);
fe8ab488 1533
f427ee49
A
1534 if (kc_format == KCFormatFileset) {
1535 void *kch = PE_get_kc_header(KCKindPrimary);
c910b4d9 1536
f427ee49
A
1537 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
1538 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
1539 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
1540 } else if (vm_kernel_slide) {
1541 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
d190cdc3 1542 }
f427ee49
A
1543 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
1544#if defined(__arm64__)
1545 if (kc_format == KCFormatFileset) {
1546 extern vm_offset_t segTEXTEXECB;
1547 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
cb323159
A
1548 }
1549#endif
1550}
d190cdc3 1551
f427ee49
A
1552void
1553panic_display_hibb(void)
0a7de745 1554{
f427ee49
A
1555#if defined(__i386__) || defined (__x86_64__)
1556 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
1557#endif
2d21ac55
A
1558}
1559
0a7de745 1560extern unsigned int stack_total;
6d2010ae 1561extern unsigned long long stack_allocs;
c910b4d9 1562
5ba3f43e 1563#if defined (__x86_64__)
0a7de745 1564extern unsigned int inuse_ptepages_count;
6d2010ae 1565extern long long alloc_ptepages_count;
c910b4d9
A
1566#endif
1567
5ba3f43e
A
1568extern boolean_t panic_include_zprint;
1569extern mach_memory_info_t *panic_kext_memory_info;
1570extern vm_size_t panic_kext_memory_size;
c910b4d9 1571
5ba3f43e 1572__private_extern__ void
f427ee49 1573panic_display_zprint(void)
c910b4d9 1574{
0a7de745 1575 if (panic_include_zprint == TRUE) {
0a7de745 1576 struct zone zone_copy;
c910b4d9 1577
5ba3f43e 1578 paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size");
f427ee49
A
1579 zone_index_foreach(i) {
1580 if (ml_nofault_copy((vm_offset_t)&zone_array[i],
1581 (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
1582 if (zone_copy.page_count > atop(1024 * 1024)) {
1583 paniclog_append_noflush("%-8s%-20s %10llu %10lu\n",
1584 zone_heap_name(&zone_copy),
1585 zone_copy.z_name, ptoa_64(zone_copy.page_count),
1586 (uintptr_t)zone_size_free(&zone_copy));
c910b4d9 1587 }
0a7de745 1588 }
c910b4d9
A
1589 }
1590
f427ee49
A
1591 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
1592 (uintptr_t)(kernel_stack_size * stack_total));
5ba3f43e 1593#if defined (__x86_64__)
f427ee49
A
1594 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
1595 (uintptr_t)ptoa(inuse_ptepages_count));
c910b4d9 1596#endif
f427ee49
A
1597 paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
1598 (uintptr_t)kalloc_large_total);
b0d623f7 1599
3e170ce0 1600 if (panic_kext_memory_info) {
5ba3f43e
A
1601 mach_memory_info_t *mem_info = panic_kext_memory_info;
1602 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
f427ee49
A
1603 for (uint32_t i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) {
1604 if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) &&
1605 (mem_info[i].size > (1024 * 1024))) {
5ba3f43e 1606 paniclog_append_noflush("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size);
3e170ce0
A
1607 }
1608 }
1609 }
c910b4d9
A
1610 }
1611}
1612
fe8ab488 1613#if CONFIG_ECC_LOGGING
5ba3f43e 1614__private_extern__ void
f427ee49 1615panic_display_ecc_errors(void)
fe8ab488
A
1616{
1617 uint32_t count = ecc_log_get_correction_count();
1618
1619 if (count > 0) {
5ba3f43e 1620 paniclog_append_noflush("ECC Corrections:%u\n", count);
fe8ab488
A
1621 }
1622}
1623#endif /* CONFIG_ECC_LOGGING */
1624
6d2010ae 1625#if CONFIG_ZLEAKS
0a7de745 1626extern boolean_t panic_include_ztrace;
6d2010ae 1627extern struct ztrace* top_ztrace;
04b8595b
A
1628void panic_print_symbol_name(vm_address_t search);
1629
6d2010ae
A
1630/*
1631 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
1632 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
1633 */
5ba3f43e
A
1634__private_extern__ void
1635panic_display_ztrace(void)
6d2010ae 1636{
0a7de745 1637 if (panic_include_ztrace == TRUE) {
6d2010ae 1638 unsigned int i = 0;
0a7de745 1639 boolean_t keepsyms = FALSE;
04b8595b 1640
0a7de745 1641 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
6d2010ae 1642 struct ztrace top_ztrace_copy;
0a7de745 1643
6d2010ae 1644 /* Make sure not to trip another panic if there's something wrong with memory */
0a7de745 1645 if (ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
5ba3f43e 1646 paniclog_append_noflush("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
6d2010ae 1647 /* Print the backtrace addresses */
0a7de745 1648 for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH); i++) {
5ba3f43e 1649 paniclog_append_noflush("%p ", top_ztrace_copy.zt_stack[i]);
04b8595b
A
1650 if (keepsyms) {
1651 panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]);
1652 }
5ba3f43e 1653 paniclog_append_noflush("\n");
6d2010ae
A
1654 }
1655 /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
1656 kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
0a7de745 1657 } else {
5ba3f43e 1658 paniclog_append_noflush("\nCan't access top_ztrace...\n");
6d2010ae 1659 }
5ba3f43e 1660 paniclog_append_noflush("\n");
6d2010ae
A
1661 }
1662}
1663#endif /* CONFIG_ZLEAKS */
1664
39236c6e 1665#if !CONFIG_TELEMETRY
5ba3f43e
A
1666int
1667telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean_t mark __unused)
39236c6e
A
1668{
1669 return KERN_NOT_SUPPORTED;
1670}
1671#endif
5ba3f43e
A
1672
1673#include <machine/machine_cpu.h>
1674
1675uint32_t kern_feature_overrides = 0;
1676
0a7de745
A
1677boolean_t
1678kern_feature_override(uint32_t fmask)
1679{
5ba3f43e
A
1680 if (kern_feature_overrides == 0) {
1681 uint32_t fdisables = 0;
cb323159
A
1682 /*
1683 * Expected to be first invoked early, in a single-threaded
5ba3f43e
A
1684 * environment
1685 */
1686 if (PE_parse_boot_argn("validation_disables", &fdisables, sizeof(fdisables))) {
1687 fdisables |= KF_INITIALIZED;
1688 kern_feature_overrides = fdisables;
1689 } else {
1690 kern_feature_overrides |= KF_INITIALIZED;
1691 }
1692 }
0a7de745 1693 return (kern_feature_overrides & fmask) == fmask;
5ba3f43e 1694}
cb323159
A
1695
1696boolean_t
1697on_device_corefile_enabled(void)
1698{
f427ee49 1699 assert(startup_phase >= STARTUP_SUB_TUNABLES);
cb323159 1700#if CONFIG_KDP_INTERACTIVE_DEBUGGING
f427ee49
A
1701 if (debug_boot_arg == 0) {
1702 return FALSE;
1703 }
1704 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
1705 return FALSE;
1706 }
1707#if !XNU_TARGET_OS_OSX
1708 /*
1709 * outside of macOS, if there's a debug boot-arg set and local
1710 * cores aren't explicitly disabled, we always write a corefile.
1711 */
1712 return TRUE;
1713#else /* !XNU_TARGET_OS_OSX */
1714 /*
1715 * on macOS, if corefiles on panic are requested and local cores
1716 * aren't disabled we write a local core.
1717 */
1718 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
cb323159
A
1719 return TRUE;
1720 }
f427ee49
A
1721#endif /* !XNU_TARGET_OS_OSX */
1722#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
cb323159
A
1723 return FALSE;
1724}
1725
1726boolean_t
1727panic_stackshot_to_disk_enabled(void)
1728{
f427ee49 1729 assert(startup_phase >= STARTUP_SUB_TUNABLES);
cb323159
A
1730#if defined(__x86_64__)
1731 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
1732 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
1733 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
1734 return FALSE;
1735 }
1736
1737 return TRUE;
1738 }
1739#endif
1740 return FALSE;
1741}
f427ee49
A
1742
1743#if DEBUG || DEVELOPMENT
1744const char *
1745sysctl_debug_get_preoslog(size_t *size)
1746{
1747 int result = 0;
1748 void *preoslog_pa = NULL;
1749 int preoslog_size = 0;
1750
1751 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
1752 if (result || preoslog_pa == NULL || preoslog_size == 0) {
1753 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
1754 *size = 0;
1755 return NULL;
1756 }
1757
1758 /*
1759 * Beware:
1760 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
1761 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
1762 */
1763 *size = preoslog_size;
1764 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
1765}
1766#endif /* DEBUG || DEVELOPMENT */