+ doprnt_hide_pointers = FALSE;
+
+ if (ctx != NULL) {
+ DebuggerSaveState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ handle_debugger_trap(reason, 0, 0, ctx);
+ DebuggerSaveState(DBOP_NONE, NULL, NULL,
+ NULL, 0, NULL, FALSE, 0);
+ } else {
+ DebuggerTrapWithState(DBOP_DEBUGGER, message,
+ NULL, NULL, debugger_options_mask, NULL, TRUE, 0);
+ }
+
+ CPUDEBUGGERCOUNT--;
+ doprnt_hide_pointers = old_doprnt_hide_pointers;
+ enable_preemption();
+ ml_set_interrupts_enabled(previous_interrupts_state);
+}
+
+static struct kdp_callout {
+ struct kdp_callout * callout_next;
+ kdp_callout_fn_t callout_fn;
+ boolean_t callout_in_progress;
+ void * callout_arg;
+} * kdp_callout_list = NULL;
+
+/*
+ * Called from kernel context to register a kdp event callout.
+ */
+void
+kdp_register_callout(kdp_callout_fn_t fn, void * arg)
+{
+ struct kdp_callout * kcp;
+ struct kdp_callout * list_head;
+
+ kcp = kalloc(sizeof(*kcp));
+ if (kcp == NULL) {
+ panic("kdp_register_callout() kalloc failed");
+ }
+
+ kcp->callout_fn = fn;
+ kcp->callout_arg = arg;
+ kcp->callout_in_progress = FALSE;
+
+ /* Lock-less list insertion using compare and exchange. */
+ do {
+ list_head = kdp_callout_list;
+ kcp->callout_next = list_head;
+ } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
+}
+
+static void
+kdp_callouts(kdp_event_t event)
+{
+ struct kdp_callout *kcp = kdp_callout_list;
+
+ while (kcp) {
+ if (!kcp->callout_in_progress) {
+ kcp->callout_in_progress = TRUE;
+ kcp->callout_fn(kcp->callout_arg, event);
+ kcp->callout_in_progress = FALSE;
+ }
+ kcp = kcp->callout_next;
+ }
+}
+
+#if !defined (__x86_64__)
+/*
+ * Register an additional buffer with data to include in the panic log
+ *
+ * <rdar://problem/50137705> tracks supporting more than one buffer
+ *
+ * Note that producer_name and buf should never be de-allocated as we reference these during panic.
+ */
+void
+register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
+{
+ if (panic_data_buffers != NULL) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ if (producer_name == NULL || (strlen(producer_name) == 0)) {
+ panic("register_additional_panic_data_buffer called with invalid producer_name");
+ }
+
+ if (buf == NULL) {
+ panic("register_additional_panic_data_buffer called with invalid buffer pointer");
+ }
+
+ if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
+ panic("register_additional_panic_data_buffer called with invalid length");
+ }
+
+ struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer));
+ new_panic_data_buffer->producer_name = producer_name;
+ new_panic_data_buffer->buf = buf;
+ new_panic_data_buffer->len = len;
+
+ if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
+ panic("register_additional_panic_data_buffer called with buffer already registered");
+ }
+
+ return;
+}
+#endif /* !defined (__x86_64__) */
+
+/*
+ * An overview of the xnu panic path:
+ *
+ * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
+ * panic_trap_to_debugger() sets the panic state in the current processor's processor_data_t prior
+ * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
+ * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
+ * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
+ * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
+ * according to the device's boot-args.
+ */
+#undef panic