+ * Save the requested debugger state/action into the current processor's processor_data
+ * and trap to the debugger.
+ */
+kern_return_t
+DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ kern_return_t ret;
+
+ assert(ml_get_interrupts_enabled() == FALSE);
+ DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
+ db_panic_options, db_panic_data_ptr,
+ db_proceed_on_sync_failure, db_panic_caller);
+
+ TRAP_DEBUGGER;
+
+ ret = CPUDEBUGGERRET;
+
+ DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
+
+ return ret;
+}
+
+void __attribute__((noinline))
+Assert(
+ const char *file,
+ int line,
+ const char *expression
+ )
+{
+#if CONFIG_NONFATAL_ASSERTS
+ if (!mach_assert) {
+ kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
+ return;
+ }
+#endif
+
+ panic_plain("%s:%d Assertion failed: %s", file, line, expression);
+}
+
+
+void
+Debugger(const char *message)
+{
+ DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE);
+}
+
+void
+DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
+ uint64_t debugger_options_mask)
+{
+ spl_t previous_interrupts_state;
+ boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
+
+ previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
+ disable_preemption();
+
+ CPUDEBUGGERCOUNT++;
+
+ if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
+ static boolean_t in_panic_kprintf = FALSE;
+
+ /* Notify any listeners that we've started a panic */
+ PEHaltRestart(kPEPanicBegin);
+
+ if (!in_panic_kprintf) {
+ in_panic_kprintf = TRUE;
+ kprintf("Detected nested debugger entry count exceeding %d\n",
+ max_debugger_entry_count);
+ in_panic_kprintf = FALSE;
+ }
+
+ if (!panicDebugging) {
+ kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
+ }
+
+ panic_spin_forever();
+ }
+
+#if DEVELOPMENT || DEBUG
+ DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));
+#endif
+
+ doprnt_hide_pointers = FALSE;
+
+ if (ctx != NULL) {
+ DebuggerSaveState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ handle_debugger_trap(reason, 0, 0, ctx);
+ DebuggerSaveState(DBOP_NONE, NULL, NULL,
+ NULL, 0, NULL, FALSE, 0);
+ } else {
+ DebuggerTrapWithState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ }
+
+ CPUDEBUGGERCOUNT--;
+ doprnt_hide_pointers = old_doprnt_hide_pointers;
+ enable_preemption();
+ ml_set_interrupts_enabled(previous_interrupts_state);
+}
+
+static struct kdp_callout {
+ struct kdp_callout * callout_next;
+ kdp_callout_fn_t callout_fn;
+ boolean_t callout_in_progress;
+ void * callout_arg;
+} * kdp_callout_list = NULL;
+
+/*
+ * Called from kernel context to register a kdp event callout.
+ */
+void
+kdp_register_callout(kdp_callout_fn_t fn, void * arg)
+{
+ struct kdp_callout * kcp;
+ struct kdp_callout * list_head;
+
+ kcp = kalloc(sizeof(*kcp));
+ if (kcp == NULL) {
+ panic("kdp_register_callout() kalloc failed");
+ }
+
+ kcp->callout_fn = fn;
+ kcp->callout_arg = arg;
+ kcp->callout_in_progress = FALSE;
+
+ /* Lock-less list insertion using compare and exchange. */
+ do {
+ list_head = kdp_callout_list;
+ kcp->callout_next = list_head;
+ } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
+}
+
+static void
+kdp_callouts(kdp_event_t event)
+{
+ struct kdp_callout *kcp = kdp_callout_list;
+
+ while (kcp) {
+ if (!kcp->callout_in_progress) {
+ kcp->callout_in_progress = TRUE;
+ kcp->callout_fn(kcp->callout_arg, event);
+ kcp->callout_in_progress = FALSE;
+ }
+ kcp = kcp->callout_next;
+ }
+}
+
+#if !defined (__x86_64__)
+/*
+ * Register an additional buffer with data to include in the panic log
+ *
+ * <rdar://problem/50137705> tracks supporting more than one buffer
+ *
+ * Note that producer_name and buf should never be de-allocated as we reference these during panic.
+ */
+void
+register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
+{
+ if (panic_data_buffers != NULL) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ if (producer_name == NULL || (strlen(producer_name) == 0)) {
+ panic("register_additional_panic_data_buffer called with invalid producer_name");
+ }
+
+ if (buf == NULL) {
+ panic("register_additional_panic_data_buffer called with invalid buffer pointer");
+ }
+
+ if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
+ panic("register_additional_panic_data_buffer called with invalid length");
+ }
+
+ struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer));
+ new_panic_data_buffer->producer_name = producer_name;
+ new_panic_data_buffer->buf = buf;
+ new_panic_data_buffer->len = len;
+
+ if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ return;
+}
+#endif /* !defined (__x86_64__) */
+
+/*
+ * An overview of the xnu panic path:
+ *
+ * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
+ * panic_trap_to_debugger() sets the panic state in the current processor's processor_data_t prior
+ * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
+ * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
+ * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
+ * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
+ * according to the device's boot-args.