+#if WITH_CONSISTENT_DBG
+/*
+ * Whenever we start a coredump, make sure the buffers
+ * are all on the free queue and the state is as expected.
+ * The buffers may have been left in a different state if
+ * a previous coredump attempt failed.
+ */
+static void
+kern_dump_hw_shmem_dbg_reset()
+{
+ struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
+
+ STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
+ cur_elm->khsd_data_length = 0;
+ }
+
+ if (currently_filling_buf != NULL) {
+ currently_filling_buf->khsd_data_length = 0;
+
+ STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
+ currently_filling_buf = NULL;
+ }
+
+ if (currently_flushing_buf != NULL) {
+ currently_flushing_buf->khsd_data_length = 0;
+
+ STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
+ currently_flushing_buf = NULL;
+ }
+
+ STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
+ cur_elm->khsd_data_length = 0;
+
+ STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
+ STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
+ }
+
+ hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
+ kdp_hw_shmem_dbg_seq_no = 0;
+ hwsd_info->xhsdci_buf_phys_addr = 0;
+ hwsd_info->xhsdci_buf_data_length = 0;
+ hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
+ hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
+ hwsd_info->xhsdci_page_size = PAGE_SIZE;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+
+ kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
+}
+
+/*
+ * Tries to move buffers forward in 'progress'. If
+ * the hardware debugger is done consuming the current buffer, we
+ * can put the next one on it and move the current
+ * buffer back to the free queue.
+ */
+static int
+kern_dump_hw_shmem_dbg_process_buffers()
+{
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
+ kdb_printf("Detected remote error, terminating...\n");
+ return -1;
+ } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
+ if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
+ kdb_printf("Detected stale/invalid seq num. Expected: %d, received %d\n",
+ (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
+ hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ return -1;
+ }
+
+ kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
+
+ if (currently_flushing_buf != NULL) {
+ currently_flushing_buf->khsd_data_length = 0;
+ STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
+ }
+
+ currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
+ if (currently_flushing_buf != NULL) {
+ STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
+
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
+ hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
+ hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
+ hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
+ hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
+ hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ }
+
+ kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
+ kdp_hw_shmem_dbg_contact_deadline_interval;
+
+ return 0;
+ } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
+ kdb_printf("Kernel timed out waiting for hardware debugger to update handshake structure.");
+ kdb_printf(" No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
+
+ hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Populates currently_filling_buf with a new buffer
+ * once one becomes available. Returns 0 on success
+ * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
+ * if it is non-zero (an error).
+ */
+static int
+kern_dump_hw_shmem_dbg_get_buffer()
+{
+ int ret = 0;
+
+ assert(currently_filling_buf == NULL);
+
+ while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
+ ret = kern_dump_hw_shmem_dbg_process_buffers();
+ if (ret) {
+ return ret;
+ }
+ }
+
+ currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
+ STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
+
+ assert(currently_filling_buf->khsd_data_length == 0);
+ return ret;
+}
+
+/*
+ * Output procedure for hardware shared memory core dumps
+ *
+ * Tries to fill up the buffer completely before flushing
+ */
+static int
+kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
+ uint64_t length, void * data)
+{
+ int ret = 0;
+
+ assert(length < UINT32_MAX);
+ uint32_t bytes_remaining = (uint32_t) length;
+ uint32_t bytes_to_copy;
+
+ if (request == KDP_EOF) {
+ assert(currently_filling_buf == NULL);
+
+ /*
+ * Wait until we've flushed all the buffers
+ * before setting the connection status to done.
+ */
+ while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
+ currently_flushing_buf != NULL) {
+ ret = kern_dump_hw_shmem_dbg_process_buffers();
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /*
+ * If the last status we saw indicates that the buffer was
+ * empty and we didn't flush any new data since then, we expect
+ * the sequence number to still match the last we saw.
+ */
+ if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
+ kdb_printf("EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
+ kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
+ return -1;
+ }
+
+ kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
+
+ kdb_printf("Setting coredump status as done!\n");
+ hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
+ hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
+ FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
+
+ return ret;
+ }
+
+ assert(request == KDP_DATA);
+
+ /*
+ * The output procedure is called with length == 0 and data == NULL
+ * to flush any remaining output at the end of the coredump before
+ * we call it a final time to mark the dump as done.
+ */
+ if (length == 0) {
+ assert(data == NULL);
+
+ if (currently_filling_buf != NULL) {
+ STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
+ currently_filling_buf = NULL;
+ }
+
+ /*
+ * Move the current buffer along if possible.
+ */
+ ret = kern_dump_hw_shmem_dbg_process_buffers();
+ return ret;
+ }
+
+ while (bytes_remaining != 0) {
+ /*
+ * Make sure we have a buffer to work with.
+ */
+ while (currently_filling_buf == NULL) {
+ ret = kern_dump_hw_shmem_dbg_get_buffer();
+ if (ret) {
+ return ret;
+ }
+ }
+
+ assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
+ bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
+ currently_filling_buf->khsd_data_length);
+ bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
+ bytes_to_copy);
+
+ currently_filling_buf->khsd_data_length += bytes_to_copy;
+
+ if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
+ STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
+ currently_filling_buf = NULL;
+
+ /*
+ * Move it along if possible.
+ */
+ ret = kern_dump_hw_shmem_dbg_process_buffers();
+ if (ret) {
+ return ret;
+ }
+ }
+
+ bytes_remaining -= bytes_to_copy;
+ data = (void *) ((uintptr_t)data + bytes_to_copy);
+ }
+
+ return ret;
+}
+#endif /* WITH_CONSISTENT_DBG */
+