+
+#if CONFIG_SERIAL_KDP
+
+static boolean_t needs_serial_init = TRUE;
+
+static void
+kdp_serial_send(void *rpkt, unsigned int rpkt_len)
+{
+ // printf("tx\n");
+ kdp_serialize_packet((unsigned char *)rpkt, rpkt_len, pal_serial_putc_nocr);
+}
+
+static void
+kdp_serial_receive(void *rpkt, unsigned int *rpkt_len, unsigned int timeout)
+{
+ int readkar;
+ uint64_t now, deadline;
+
+ clock_interval_to_deadline(timeout, 1000 * 1000 /* milliseconds */, &deadline);
+
+// printf("rx\n");
+ for(clock_get_uptime(&now); now < deadline; clock_get_uptime(&now)) {
+ readkar = pal_serial_getc();
+ if(readkar >= 0) {
+ unsigned char *packet;
+ // printf("got char %02x\n", readkar);
+ if((packet = kdp_unserialize_packet(readkar,rpkt_len))) {
+ memcpy(rpkt, packet, *rpkt_len);
+ return;
+ }
+ }
+ }
+ *rpkt_len = 0;
+}
+
+static boolean_t
+kdp_serial_setmode(boolean_t active)
+{
+ if (active == FALSE) /* leaving KDP */
+ return TRUE;
+
+ if (!needs_serial_init)
+ return TRUE;
+
+ pal_serial_init();
+ needs_serial_init = FALSE;
+ return TRUE;
+}
+
+
+static void
+kdp_serial_callout(__unused void *arg, kdp_event_t event)
+{
+ /*
+ * When we stop KDP, set the bit to re-initialize the console serial
+ * port the next time we send/receive a KDP packet. We don't do it on
+ * KDP_EVENT_ENTER directly because it also gets called when we trap to
+ * KDP for non-external debugging, i.e., stackshot or core dumps.
+ *
+ * Set needs_serial_init on exit (and initialization, see above) and not
+ * enter because enter is sent multiple times and causes excess
+ * reinitialization.
+ */
+
+ switch (event)
+ {
+ case KDP_EVENT_PANICLOG:
+ case KDP_EVENT_ENTER:
+ break;
+ case KDP_EVENT_EXIT:
+ needs_serial_init = TRUE;
+ break;
+ }
+}
+
+#endif /* CONFIG_SERIAL_KDP */
+
+void
+kdp_init(void)
+{
+ strlcpy(kdp_kernelversion_string, version, sizeof(kdp_kernelversion_string));
+
+ /* Relies on platform layer calling panic_init() before kdp_init() */
+ if (kernel_uuid_string[0] != '\0') {
+ /*
+ * Update kdp_kernelversion_string with our UUID
+ * generated at link time.
+ */
+
+ strlcat(kdp_kernelversion_string, "; UUID=", sizeof(kdp_kernelversion_string));
+ strlcat(kdp_kernelversion_string, kernel_uuid_string, sizeof(kdp_kernelversion_string));
+ }
+
+ debug_log_init();
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
+ if (vm_kernel_slide) {
+ char KASLR_stext[19];
+ strlcat(kdp_kernelversion_string, "; stext=", sizeof(kdp_kernelversion_string));
+ snprintf(KASLR_stext, sizeof(KASLR_stext), "%p", (void *) vm_kernel_stext);
+ strlcat(kdp_kernelversion_string, KASLR_stext, sizeof(kdp_kernelversion_string));
+ }
+#endif
+
+ if (debug_boot_arg & DB_REBOOT_POST_CORE)
+ kdp_flag |= REBOOT_POST_CORE;
+#if defined(__x86_64__)
+ kdp_machine_init();
+#endif
+
+ kdp_timer_callout_init();
+ kdp_crashdump_feature_mask = htonl(kdp_crashdump_feature_mask);
+ kdp_core_init();
+
+#if CONFIG_SERIAL_KDP
+ char kdpname[80];
+ struct kdp_in_addr ipaddr;
+ struct kdp_ether_addr macaddr;
+
+ boolean_t kdp_match_name_found = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
+ boolean_t kdp_not_serial = kdp_match_name_found ? (strncmp(kdpname, "serial", sizeof(kdpname))) : TRUE;
+
+ // serial must be explicitly requested
+ if(!kdp_match_name_found || kdp_not_serial)
+ return;
+
+#if WITH_CONSISTENT_DBG
+ if (kdp_not_serial && PE_consistent_debug_enabled() && debug_boot_arg) {
+ current_debugger = HW_SHM_CUR_DB;
+ return;
+ } else {
+ printf("Serial requested, consistent debug disabled or debug boot arg not present, configuring debugging over serial\n");
+ }
+#endif /* WITH_CONSISTENT_DBG */
+
+ kprintf("Initializing serial KDP\n");
+
+ kdp_register_callout(kdp_serial_callout, NULL);
+ kdp_register_link(NULL, kdp_serial_setmode);
+ kdp_register_send_receive(kdp_serial_send, kdp_serial_receive);
+
+ /* fake up an ip and mac for early serial debugging */
+ macaddr.ether_addr_octet[0] = 's';
+ macaddr.ether_addr_octet[1] = 'e';
+ macaddr.ether_addr_octet[2] = 'r';
+ macaddr.ether_addr_octet[3] = 'i';
+ macaddr.ether_addr_octet[4] = 'a';
+ macaddr.ether_addr_octet[5] = 'l';
+ ipaddr.s_addr = KDP_SERIAL_IPADDR;
+ kdp_set_ip_and_mac_addresses(&ipaddr, &macaddr);
+
+#endif /* CONFIG_SERIAL_KDP */
+}
+
+#else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
+void
+kdp_init(void)
+{
+}
+#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
+
+#if !CONFIG_KDP_INTERACTIVE_DEBUGGING
+__attribute__((noreturn))
+static void
+panic_spin_forever()
+{
+ kdb_printf("\nPlease go to https://panic.apple.com to report this panic\n");
+
+ for (;;) { }
+}
+#endif
+
+#if WITH_CONSISTENT_DBG && CONFIG_KDP_INTERACTIVE_DEBUGGING
+__attribute__((noreturn))
+static void
+panic_spin_shmcon()
+{
+ kdb_printf("\nPlease go to https://panic.apple.com to report this panic\n");
+ kdb_printf("Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
+ hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
+
+ assert(hwsd_info != NULL);
+ hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
+ hwsd_info->xhsdci_seq_no = 0;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+
+ for (;;) {
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
+ kern_dump(KERN_DUMP_HW_SHMEM_DBG);
+ }
+
+ if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
+ (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
+ hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
+ hwsd_info->xhsdci_seq_no = 0;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ }
+ }
+}
+#endif /* WITH_CONSISTENT_DBG && CONFIG_KDP_INTERACTIVE_DEBUGGING */
+
+#if !CONFIG_KDP_INTERACTIVE_DEBUGGING
+__attribute__((noreturn))
+void
+kdp_raise_exception(
+ __unused unsigned int exception,
+ __unused unsigned int code,
+ __unused unsigned int subcode,
+ __unused void *saved_state
+ )
+#else
+void
+kdp_raise_exception(
+ unsigned int exception,
+ unsigned int code,
+ unsigned int subcode,
+ void *saved_state
+ )
+#endif
+{
+
+#if CONFIG_KDP_INTERACTIVE_DEBUGGING
+
+ unsigned int initial_not_in_kdp = not_in_kdp;
+ not_in_kdp = 0;
+
+ disable_preemption();
+
+ if (current_debugger != KDP_CUR_DB) {
+ /* try a local disk dump */
+ if (kdp_has_polled_corefile()) {
+#if WITH_CONSISTENT_DBG
+ if (current_debugger == HW_SHM_CUR_DB) {
+ hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
+ }
+#endif /* WITH_CONSISTENT_DBG */
+ flag_panic_dump_in_progress = TRUE;
+ kern_dump(KERN_DUMP_DISK);
+ abort_panic_transfer();
+ }
+#if WITH_CONSISTENT_DBG
+ if (current_debugger == HW_SHM_CUR_DB) {
+ panic_spin_shmcon();
+ }
+#endif /* WITH_CONSISTENT_DBG */
+
+
+ if (!panicDebugging) {
+ kdp_machine_reboot();
+ }
+ }
+
+ kdp_debugger_loop(exception, code, subcode, saved_state);
+ not_in_kdp = initial_not_in_kdp;
+ enable_preemption();
+#else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
+ assert(current_debugger != KDP_CUR_DB);
+
+ /*
+ * If kernel debugging is enabled via boot-args, but KDP debugging
+ * is not compiled into the kernel, spin here waiting for debugging
+ * via another method. Why here? Because we want to have watchdog
+ * disabled (via KDP callout) while sitting waiting to be debugged.
+ */
+ panic_spin_forever();
+#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
+}