+void
+phys_carveout_init(void)
+{
+ if (!PE_i_can_has_debugger(NULL)) {
+ return;
+ }
+
+ unsigned int phys_carveout_mb = 0;
+
+ if (!PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb,
+ sizeof(phys_carveout_mb))) {
+ return;
+ }
+ if (phys_carveout_mb == 0) {
+ return;
+ }
+
+ size_t size = 0;
+ if (os_mul_overflow(phys_carveout_mb, 1024 * 1024, &size)) {
+ printf("phys_carveout_mb size overflowed (%uMB)\n",
+ phys_carveout_mb);
+ return;
+ }
+
+ kern_return_t kr = kmem_alloc_contig(kernel_map, &phys_carveout, size,
+ VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT,
+ VM_KERN_MEMORY_DIAG);
+ if (kr != KERN_SUCCESS) {
+ printf("failed to allocate %uMB for phys_carveout_mb: %u\n",
+ phys_carveout_mb, (unsigned int)kr);
+ return;
+ }
+
+ phys_carveout_pa = kvtophys(phys_carveout);
+ phys_carveout_size = size;
+}
+
+static void
+DebuggerLock()
+{
+ int my_cpu = cpu_number();
+ int debugger_exp_cpu = DEBUGGER_NO_CPU;
+ assert(ml_get_interrupts_enabled() == FALSE);
+
+ if (atomic_load(&debugger_cpu) == my_cpu) {
+ return;
+ }
+
+ while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
+ debugger_exp_cpu = DEBUGGER_NO_CPU;
+ }
+
+ return;
+}
+
+static void
+DebuggerUnlock()
+{
+ assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
+
+ /*
+ * We don't do an atomic exchange here in case
+ * there's another CPU spinning to acquire the debugger_lock
+ * and we never get a chance to update it. We already have the
+ * lock so we can simply store DEBUGGER_NO_CPU and follow with
+ * a barrier.
+ */
+ atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
+ OSMemoryBarrier();
+
+ return;
+}
+
+static kern_return_t
+DebuggerHaltOtherCores(boolean_t proceed_on_failure)
+{
+#if CONFIG_EMBEDDED
+ return DebuggerXCallEnter(proceed_on_failure);
+#else /* CONFIG_EMBEDDED */
+#pragma unused(proceed_on_failure)
+ mp_kdp_enter(proceed_on_failure);
+ return KERN_SUCCESS;
+#endif
+}
+
+static void
+DebuggerResumeOtherCores()
+{
+#if CONFIG_EMBEDDED
+ DebuggerXCallReturn();
+#else /* CONFIG_EMBEDDED */
+ mp_kdp_exit();
+#endif
+}
+
+static void
+DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ CPUDEBUGGEROP = db_op;
+
+ /* Preserve the original panic message */
+ if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
+ CPUDEBUGGERMSG = db_message;
+ CPUPANICSTR = db_panic_str;
+ CPUPANICARGS = db_panic_args;
+ CPUPANICDATAPTR = db_panic_data_ptr;
+ CPUPANICCALLER = db_panic_caller;
+ } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) {
+ kprintf("Nested panic detected:");
+ if (db_panic_str != NULL) {
+ _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
+ }
+ }
+
+ CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
+ CPUDEBUGGERRET = KERN_SUCCESS;
+
+ /* Reset these on any nested panics */
+ CPUPANICOPTS = db_panic_options;
+
+ return;
+}
+
+/*
+ * Save the requested debugger state/action into the current processor's processor_data
+ * and trap to the debugger.
+ */
+kern_return_t
+DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
+ va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
+ boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller)
+{
+ kern_return_t ret;
+
+ assert(ml_get_interrupts_enabled() == FALSE);
+ DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
+ db_panic_options, db_panic_data_ptr,
+ db_proceed_on_sync_failure, db_panic_caller);
+
+ TRAP_DEBUGGER;
+
+ ret = CPUDEBUGGERRET;
+
+ DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0);
+
+ return ret;
+}
+
+void __attribute__((noinline))
+Assert(
+ const char *file,
+ int line,
+ const char *expression
+ )
+{
+#if CONFIG_NONFATAL_ASSERTS
+ if (!mach_assert) {
+ kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
+ return;
+ }
+#endif
+
+ panic_plain("%s:%d Assertion failed: %s", file, line, expression);
+}
+
+
+void
+Debugger(const char *message)
+{
+ DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE);
+}
+
+void
+DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
+ uint64_t debugger_options_mask)
+{
+ spl_t previous_interrupts_state;
+ boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
+
+ previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
+ disable_preemption();
+
+ CPUDEBUGGERCOUNT++;
+
+ if (CPUDEBUGGERCOUNT > max_debugger_entry_count) {
+ static boolean_t in_panic_kprintf = FALSE;
+
+ /* Notify any listeners that we've started a panic */
+ PEHaltRestart(kPEPanicBegin);
+
+ if (!in_panic_kprintf) {
+ in_panic_kprintf = TRUE;
+ kprintf("Detected nested debugger entry count exceeding %d\n",
+ max_debugger_entry_count);
+ in_panic_kprintf = FALSE;
+ }
+
+ if (!panicDebugging) {
+ kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_options_mask);
+ }
+
+ panic_spin_forever();
+ }
+
+#if DEVELOPMENT || DEBUG
+ DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY));