+debug_log_init(void)
+{
+#if CONFIG_EMBEDDED
+ if (!gPanicBase) {
+ printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
+ return;
+ }
+ /* Shift debug buf start location and size by the length of the panic header */
+ debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
+ debug_buf_ptr = debug_buf_base;
+ debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
+#else
+ bzero(panic_info, DEBUG_BUF_SIZE);
+
+ assert(debug_buf_base != NULL);
+ assert(debug_buf_ptr != NULL);
+ assert(debug_buf_size != 0);
+#endif
+}
+
+static void
+DebuggerLock()
+{
+ int my_cpu = cpu_number();
+ int debugger_exp_cpu = DEBUGGER_NO_CPU;
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ if (debugger_cpu == my_cpu) {
+ return;
+ }
+
+ while(!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
+ debugger_exp_cpu = DEBUGGER_NO_CPU;
+ }
+
+ return;
+}
+
+static void
+DebuggerUnlock()
+{
+ assert(debugger_cpu == cpu_number());
+
+ /*
+ * We don't do an atomic exchange here in case
+ * there's another CPU spinning to acquire the debugger_lock
+ * and we never get a chance to update it. We already have the
+ * lock so we can simply store DEBUGGER_NO_CPU and follow with
+ * a barrier.
+ */
+ debugger_cpu = DEBUGGER_NO_CPU;
+ OSMemoryBarrier();
+
+ return;
+}
+
+static kern_return_t
+DebuggerHaltOtherCores(boolean_t proceed_on_failure)
+{
+#if CONFIG_EMBEDDED
+ return DebuggerXCallEnter(proceed_on_failure);
+#else /* CONFIG_EMBEDDED */
+#pragma unused(proceed_on_failure)
+ mp_kdp_enter(proceed_on_failure);
+ return KERN_SUCCESS;
+#endif
+}
+
+static void
+DebuggerResumeOtherCores()
+{
+#if CONFIG_EMBEDDED
+ DebuggerXCallReturn();
+#else /* CONFIG_EMBEDDED */
+ mp_kdp_exit();
+#endif
+}
+
+static void
+DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ CPUDEBUGGEROP = db_op;
+
+ /* Preserve the original panic message */
+ if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
+ CPUDEBUGGERMSG = db_message;
+ CPUPANICSTR = db_panic_str;
+ CPUPANICARGS = db_panic_args;
+ CPUPANICDATAPTR = db_panic_data_ptr;
+ CPUPANICCALLER = db_panic_caller;
+ } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
+ kprintf("Nested panic detected:");
+ if (db_panic_str != NULL)
+ _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
+ }
+
+ CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
+ CPUDEBUGGERRET = KERN_SUCCESS;
+
+ /* Reset these on any nested panics */
+ CPUPANICOPTS = db_panic_options;
+
+ return;
+}
+
+/*
+ * Save the requested debugger state/action into the current processor's processor_data
+ * and trap to the debugger.
+ */
+kern_return_t
+DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ kern_return_t ret;
+
+ assert(ml_get_interrupts_enabled() == FALSE);
+ DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
+ db_panic_options, db_panic_data_ptr,
+ db_proceed_on_sync_failure, db_panic_caller);
+
+ TRAP_DEBUGGER;
+
+ ret = CPUDEBUGGERRET;
+
+ DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
+
+ return ret;
+}
+
+void __attribute__((noinline))