+struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
+char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
+char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
+
+/*
+ * We don't include the size of the panic header in the length of the data we actually write.
+ * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
+ * the end of the log because we only support writing (3*PAGESIZE) bytes.
+ */
+unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
+
+boolean_t extended_debug_log_enabled = FALSE;
+#endif
+
+/* Debugger state */
+atomic_int debugger_cpu = ATOMIC_VAR_INIT(DEBUGGER_NO_CPU);
+boolean_t debugger_allcpus_halted = FALSE;
+boolean_t debugger_safe_to_return = TRUE;
+unsigned int debugger_context = 0;
+
+static char model_name[64];
+unsigned char *kernel_uuid;
+
+boolean_t kernelcache_uuid_valid = FALSE;
+uuid_t kernelcache_uuid;
+uuid_string_t kernelcache_uuid_string;
+
+/*
+ * By default we treat Debugger() the same as calls to panic(), unless
+ * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
+ * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
+ *
+ * Return from Debugger() is currently only implemented on x86
+ */
+static boolean_t debugger_is_panic = TRUE;
+
+#if DEVELOPMENT || DEBUG
+boolean_t debug_boot_arg_inited = FALSE;
+#endif
+
+SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg;
+
+char kernel_uuid_string[37]; /* uuid_string_t */
+char kernelcache_uuid_string[37]; /* uuid_string_t */
+char panic_disk_error_description[512];
+size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
+
+extern unsigned int write_trace_on_panic;
+int kext_assertions_enable =
+#if DEBUG || DEVELOPMENT
+ TRUE;
+#else
+ FALSE;
+#endif
+
+/*
+ * Maintain the physically-contiguous carveout for the `phys_carveout_mb`
+ * boot-arg.
+ */
+SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
+SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
+SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
+
+void
+panic_init(void)
+{
+ unsigned long uuidlen = 0;
+ void *uuid;
+
+ uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
+ if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
+ kernel_uuid = uuid;
+ uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
+ }
+
+#if CONFIG_NONFATAL_ASSERTS
+ if (!PE_parse_boot_argn("assertions", &mach_assert, sizeof(mach_assert))) {
+ mach_assert = 1;
+ }
+#endif
+
+ /*
+ * Initialize the value of the debug boot-arg
+ */
+ debug_boot_arg = 0;
+#if ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__))
+ if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof(debug_boot_arg))) {
+#if DEVELOPMENT || DEBUG
+ if (debug_boot_arg & DB_HALT) {
+ halt_in_debugger = 1;
+ }
+#endif
+
+#if CONFIG_EMBEDDED
+ if (debug_boot_arg & DB_NMI) {
+ panicDebugging = TRUE;
+ }
+#else
+ panicDebugging = TRUE;
+#if KDEBUG_MOJO_TRACE
+ if (debug_boot_arg & DB_PRT_KDEBUG) {
+ kdebug_serial = TRUE;
+ }
+#endif
+#endif /* CONFIG_EMBEDDED */
+ }
+
+ if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) {
+ max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX;
+ }
+
+#endif /* ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) */
+
+#if DEVELOPMENT || DEBUG
+ debug_boot_arg_inited = TRUE;
+#endif
+
+#if !CONFIG_EMBEDDED
+ /*
+ * By default we treat Debugger() the same as calls to panic(), unless
+ * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
+ * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
+ * This is because writing an on-device corefile is a destructive operation.
+ *
+ * Return from Debugger() is currently only implemented on x86
+ */
+ if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
+ debugger_is_panic = FALSE;
+ }
+#endif
+}
+
+#if defined (__x86_64__)
+void
+extended_debug_log_init(void)
+{
+ assert(coprocessor_paniclog_flush);
+ /*
+ * Allocate an extended panic log buffer that has space for the panic
+ * stackshot at the end. Update the debug buf pointers appropriately
+ * to point at this new buffer.
+ */
+ char *new_debug_buf = kalloc(EXTENDED_DEBUG_BUF_SIZE);
+ /*
+ * iBoot pre-initializes the panic region with the NULL character. We set this here
+ * so we can accurately calculate the CRC for the region without needing to flush the
+ * full region over SMC.
+ */
+ memset(new_debug_buf, '\0', EXTENDED_DEBUG_BUF_SIZE);
+
+ panic_info = (struct macos_panic_header *)new_debug_buf;
+ debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
+ debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
+
+ extended_debug_log_enabled = TRUE;
+
+ /*
+ * Insert a compiler barrier so we don't free the other panic stackshot buffer
+ * until after we've marked the new one as available
+ */
+ __compiler_barrier();
+ kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
+ panic_stackshot_buf = 0;
+ panic_stackshot_buf_len = 0;
+}
+#endif /* defined (__x86_64__) */
+
+void
+debug_log_init(void)
+{
+#if CONFIG_EMBEDDED
+ if (!gPanicBase) {
+ printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
+ return;
+ }
+ /* Shift debug buf start location and size by the length of the panic header */
+ debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
+ debug_buf_ptr = debug_buf_base;
+ debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
+#else
+ kern_return_t kr = KERN_SUCCESS;
+ bzero(panic_info, DEBUG_BUF_SIZE);
+
+ assert(debug_buf_base != NULL);
+ assert(debug_buf_ptr != NULL);
+ assert(debug_buf_size != 0);
+
+ /*
+ * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
+ * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
+ * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
+ * up.
+ */
+ kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
+ assert(kr == KERN_SUCCESS);
+ if (kr == KERN_SUCCESS) {
+ panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
+ }
+#endif
+}
+
+void
+phys_carveout_init(void)
+{
+ if (!PE_i_can_has_debugger(NULL)) {
+ return;
+ }
+
+ unsigned int phys_carveout_mb = 0;
+
+ if (!PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb,
+ sizeof(phys_carveout_mb))) {
+ return;
+ }
+ if (phys_carveout_mb == 0) {
+ return;
+ }
+
+ size_t size = 0;
+ if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &size)) {
+ printf("phys_carveout_mb size overflowed (%uMB)\n",
+ phys_carveout_mb);
+ return;
+ }
+
+ kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, size,
+ VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT,
+ VM_KERN_MEMORY_DIAG);
+ if (kr != KERN_SUCCESS) {
+ printf("failed to allocate %uMB for phys_carveout_mb: %u\n",
+ phys_carveout_mb, (unsigned int)kr);
+ return;
+ }
+
+ phys_carveout_pa = kvtophys(phys_carveout);
+ phys_carveout_size = size;
+}
+
+static void
+DebuggerLock()
+{
+ int my_cpu = cpu_number();
+ int debugger_exp_cpu = DEBUGGER_NO_CPU;
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ if (atomic_load(&debugger_cpu) == my_cpu) {
+ return;
+ }
+
+ while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
+ debugger_exp_cpu = DEBUGGER_NO_CPU;
+ }
+
+ return;
+}
+
+static void
+DebuggerUnlock()
+{
+ assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
+
+ /*
+ * We don't do an atomic exchange here in case
+ * there's another CPU spinning to acquire the debugger_lock
+ * and we never get a chance to update it. We already have the
+ * lock so we can simply store DEBUGGER_NO_CPU and follow with
+ * a barrier.
+ */
+ atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
+ OSMemoryBarrier();
+
+ return;
+}
+
+static kern_return_t
+DebuggerHaltOtherCores(boolean_t proceed_on_failure)
+{
+#if CONFIG_EMBEDDED
+ return DebuggerXCallEnter(proceed_on_failure);
+#else /* CONFIG_EMBEDDED */
+#pragma unused(proceed_on_failure)
+ mp_kdp_enter(proceed_on_failure);
+ return KERN_SUCCESS;
+#endif
+}
+
+static void
+DebuggerResumeOtherCores()
+{
+#if CONFIG_EMBEDDED
+ DebuggerXCallReturn();
+#else /* CONFIG_EMBEDDED */
+ mp_kdp_exit();
+#endif
+}
+
+static void
+DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ CPUDEBUGGEROP = db_op;
+
+ /* Preserve the original panic message */
+ if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
+ CPUDEBUGGERMSG = db_message;
+ CPUPANICSTR = db_panic_str;
+ CPUPANICARGS = db_panic_args;
+ CPUPANICDATAPTR = db_panic_data_ptr;
+ CPUPANICCALLER = db_panic_caller;
+ } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
+ kprintf("Nested panic detected:");
+ if (db_panic_str != NULL) {
+ _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
+ }
+ }
+
+ CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
+ CPUDEBUGGERRET = KERN_SUCCESS;
+
+ /* Reset these on any nested panics */
+ CPUPANICOPTS = db_panic_options;
+
+ return;
+}
+
+/*
+ * Save the requested debugger state/action into the current processor's processor_data
+ * and trap to the debugger.
+ */
+kern_return_t
+DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ kern_return_t ret;
+
+ assert(ml_get_interrupts_enabled() == FALSE);
+ DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
+ db_panic_options, db_panic_data_ptr,
+ db_proceed_on_sync_failure, db_panic_caller);
+
+ TRAP_DEBUGGER;
+
+ ret = CPUDEBUGGERRET;
+
+ DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
+
+ return ret;
+}
+
+void __attribute__((noinline))
+Assert(
+ const char *file,
+ int line,
+ const char *expression
+ )
+{
+#if CONFIG_NONFATAL_ASSERTS
+ if (!mach_assert) {
+ kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
+ return;
+ }
+#endif
+
+ panic_plain("%s:%d Assertion failed: %s", file, line, expression);
+}
+
+
+void
+Debugger(const char *message)
+{
+ DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE);
+}
+
+void
+DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
+ uint64_t debugger_options_mask)
+{
+ spl_t previous_interrupts_state;
+ boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
+
+ previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
+ disable_preemption();
+
+ CPUDEBUGGERCOUNT++;
+
+ if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
+ static boolean_t in_panic_kprintf = FALSE;
+
+ /* Notify any listeners that we've started a panic */
+ PEHaltRestart(kPEPanicBegin);
+
+ if (!in_panic_kprintf) {
+ in_panic_kprintf = TRUE;
+ kprintf("Detected nested debugger entry count exceeding %d\n",
+ max_debugger_entry_count);
+ in_panic_kprintf = FALSE;
+ }
+
+ if (!panicDebugging) {
+ kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
+ }
+
+ panic_spin_forever();
+ }
+
+#if DEVELOPMENT || DEBUG
+ DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
+#endif
+
+ doprnt_hide_pointers = FALSE;
+
+ if (ctx != NULL) {
+ DebuggerSaveState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ handle_debugger_trap(reason, 0, 0, ctx);
+ DebuggerSaveState(DBOP_NONE, NULL, NULL,
+ NULL, 0, NULL, FALSE, 0);
+ } else {
+ DebuggerTrapWithState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ }
+
+ CPUDEBUGGERCOUNT--;
+ doprnt_hide_pointers = old_doprnt_hide_pointers;
+ enable_preemption();
+ ml_set_interrupts_enabled(previous_interrupts_state);
+}
+
+static struct kdp_callout {
+ struct kdp_callout * callout_next;
+ kdp_callout_fn_t callout_fn;
+ boolean_t callout_in_progress;
+ void * callout_arg;
+} * kdp_callout_list = NULL;
+
+/*
+ * Called from kernel context to register a kdp event callout.
+ */
+void
+kdp_register_callout(kdp_callout_fn_t fn, void * arg)
+{
+ struct kdp_callout * kcp;
+ struct kdp_callout * list_head;
+
+ kcp = kalloc(sizeof(*kcp));
+ if (kcp == NULL) {
+ panic("kdp_register_callout() kalloc failed");
+ }
+
+ kcp->callout_fn = fn;
+ kcp->callout_arg = arg;
+ kcp->callout_in_progress = FALSE;
+
+ /* Lock-less list insertion using compare and exchange. */
+ do {
+ list_head = kdp_callout_list;
+ kcp->callout_next = list_head;
+ } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
+}
+
+static void
+kdp_callouts(kdp_event_t event)
+{
+ struct kdp_callout *kcp = kdp_callout_list;
+
+ while (kcp) {
+ if (!kcp->callout_in_progress) {
+ kcp->callout_in_progress = TRUE;
+ kcp->callout_fn(kcp->callout_arg, event);
+ kcp->callout_in_progress = FALSE;
+ }
+ kcp = kcp->callout_next;
+ }
+}
+
+#if !defined (__x86_64__)
+/*
+ * Register an additional buffer with data to include in the panic log
+ *
+ * <rdar://problem/50137705> tracks supporting more than one buffer
+ *
+ * Note that producer_name and buf should never be de-allocated as we reference these during panic.
+ */
+void
+register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
+{
+ if (panic_data_buffers != NULL) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ if (producer_name == NULL || (strlen(producer_name) == 0)) {
+ panic("register_additional_panic_data_buffer called with invalid producer_name");
+ }
+
+ if (buf == NULL) {
+ panic("register_additional_panic_data_buffer called with invalid buffer pointer");
+ }
+
+ if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
+ panic("register_additional_panic_data_buffer called with invalid length");
+ }
+
+ struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer));
+ new_panic_data_buffer->producer_name = producer_name;
+ new_panic_data_buffer->buf = buf;
+ new_panic_data_buffer->len = len;
+
+ if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ return;
+}
+#endif /* !defined (__x86_64__) */
+
+/*
+ * An overview of the xnu panic path:
+ *
+ * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
+ * panic_trap_to_debugger() sets the panic state in the current processor's processor_data_t prior
+ * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
+ * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
+ * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
+ * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
+ * according to the device's boot-args.
+ */
+#undef panic
+void
+panic(const char *str, ...)
+{
+ va_list panic_str_args;
+
+ va_start(panic_str_args, str);
+ panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0));
+ va_end(panic_str_args);
+}
+
+void
+panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
+{
+ va_list panic_str_args;
+
+ va_start(panic_str_args, str);
+ panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
+ NULL, (unsigned long)(char *)__builtin_return_address(0));
+ va_end(panic_str_args);
+}
+
+#if defined (__x86_64__)
+/*
+ * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
+ * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
+ * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
+ * thread when writing the panic log.
+ *
+ * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
+ */
+void
+panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
+{
+ va_list panic_str_args;
+ __assert_only os_ref_count_t th_ref_count;
+
+ assert_thread_magic(thread);
+ th_ref_count = os_ref_get_count(&thread->ref_count);
+ assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
+
+ /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
+ thread_reference(thread);
+
+ va_start(panic_str_args, str);
+ panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
+ thread, (unsigned long)(char *)__builtin_return_address(0));
+
+ va_end(panic_str_args);
+}
+#endif /* defined (__x86_64__) */
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+void
+panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
+ uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller)
+{
+#pragma clang diagnostic pop
+
+#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
+ /* Turn off I/O tracing once we've panicked */
+ mmiotrace_enabled = 0;
+#endif
+
+ if (ml_wants_panic_trap_to_debugger()) {
+ ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller);
+ __builtin_trap();
+ }
+
+ CPUDEBUGGERCOUNT++;
+
+ if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
+ static boolean_t in_panic_kprintf = FALSE;
+
+ /* Notify any listeners that we've started a panic */
+ PEHaltRestart(kPEPanicBegin);
+
+ if (!in_panic_kprintf) {
+ in_panic_kprintf = TRUE;
+ kprintf("Detected nested debugger entry count exceeding %d\n",
+ max_debugger_entry_count);
+ in_panic_kprintf = FALSE;
+ }
+
+ if (!panicDebugging) {
+ kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
+ }
+
+ panic_spin_forever();
+ }
+
+#if DEVELOPMENT || DEBUG
+ DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));