]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/kdp_core.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp_core.c
CommitLineData
3e170ce0 1/*
f427ee49 2 * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3e170ce0
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
f427ee49 5 *
3e170ce0
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
f427ee49 14 *
3e170ce0
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
f427ee49 17 *
3e170ce0
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
f427ee49 25 *
3e170ce0
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31#include <mach/mach_types.h>
32#include <mach/vm_attributes.h>
33#include <mach/vm_param.h>
34#include <mach/vm_map.h>
35#include <vm/vm_protos.h>
36#include <vm/vm_kern.h>
37#include <vm/vm_map.h>
39037602 38#include <machine/cpu_capabilities.h>
3e170ce0
A
39#include <libsa/types.h>
40#include <libkern/kernel_mach_header.h>
41#include <libkern/zlib.h>
42#include <kdp/kdp_internal.h>
43#include <kdp/kdp_core.h>
5ba3f43e 44#include <kdp/processor_core.h>
3e170ce0
A
45#include <IOKit/IOPolledInterface.h>
46#include <IOKit/IOBSD.h>
47#include <sys/errno.h>
48#include <sys/msgbuf.h>
5ba3f43e 49#include <san/kasan.h>
3e170ce0 50
5ba3f43e 51#if defined(__x86_64__)
3e170ce0
A
52#include <i386/pmap_internal.h>
53#include <kdp/ml/i386/kdp_x86_common.h>
5ba3f43e
A
54#include <kern/debug.h>
55#endif /* defined(__x86_64__) */
3e170ce0 56
f427ee49 57#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
58#include <arm/cpuid.h>
59#include <arm/caches_internal.h>
39037602 60#include <pexpert/arm/consistent_debug.h>
5ba3f43e
A
61
62#if !defined(ROUNDUP)
63#define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
64#endif
65
66#if !defined(ROUNDDOWN)
67#define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
68#endif
f427ee49 69#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0
A
70
71typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
f427ee49
A
72 vm_map_offset_t end,
73 void *context);
3e170ce0
A
74
75extern int pmap_traverse_present_mappings(pmap_t pmap,
f427ee49
A
76 vm_map_offset_t start,
77 vm_map_offset_t end,
78 pmap_traverse_callback callback,
79 void *context);
3e170ce0 80
5ba3f43e
A
81static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
82static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
83static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
84static int kern_dump_save_sw_vers(void *refcon, core_save_sw_vers_cb callback, void *context);
85static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
3e170ce0
A
86
87static int
88kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
f427ee49
A
89 vm_map_offset_t end,
90 void *context);
3e170ce0 91static int
5ba3f43e 92kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
f427ee49
A
93 vm_map_offset_t end,
94 void *context);
5ba3f43e 95
3e170ce0
A
96static int
97kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
f427ee49
A
98 vm_map_offset_t end,
99 void *context);
3e170ce0
A
100
101struct kdp_core_out_vars;
f427ee49
A
102typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
103 uint64_t length, void *panic_data);
104
105struct kdp_core_out_vars {
106 kern_dump_output_proc outproc;
107 z_output_func zoutput;
108 size_t zipped;
109 uint64_t totalbytes;
110 uint64_t lastpercent;
111 IOReturn error;
112 unsigned outremain;
113 unsigned outlen;
114 unsigned writes;
115 Bytef * outbuf;
3e170ce0
A
116};
117
3e170ce0
A
118extern uint32_t kdp_crashdump_pkt_size;
119
120static vm_offset_t kdp_core_zmem;
121static size_t kdp_core_zsize;
122static size_t kdp_core_zoffset;
f427ee49 123static z_stream kdp_core_zs;
3e170ce0 124
39037602
A
125static uint64_t kdp_core_total_size;
126static uint64_t kdp_core_total_size_sent_uncomp;
f427ee49 127#if defined(__arm__) || defined(__arm64__)
39037602
A
128struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
129
130#define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
131#define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
132
133/*
134 * Astris can read up to 4064 bytes at a time over
135 * the probe, so we should try to make our buffer
136 * size a multiple of this to make reads by astris
137 * (the bottleneck) most efficient.
138 */
139#define OPTIMAL_ASTRIS_READSIZE 4064
140
141struct kdp_hw_shmem_dbg_buf_elm {
f427ee49
A
142 vm_offset_t khsd_buf;
143 uint32_t khsd_data_length;
144 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
39037602
A
145};
146
147static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
f427ee49 148 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
39037602 149static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
f427ee49 150 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
39037602
A
151
152static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL;
153static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL;
154
155static uint32_t kdp_hw_shmem_dbg_bufsize = 0;
156
157static uint32_t kdp_hw_shmem_dbg_seq_no = 0;
158static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0;
159static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0;
160
161#define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
f427ee49 162#endif /* defined(__arm__) || defined(__arm64__) */
5ba3f43e
A
163
164static boolean_t kern_dump_successful = FALSE;
165
166struct mach_core_fileheader kdp_core_header = { };
39037602
A
167
168/*
169 * These variables will be modified by the BSD layer if the root device is
170 * a RAMDisk.
171 */
172uint64_t kdp_core_ramdisk_addr = 0;
173uint64_t kdp_core_ramdisk_size = 0;
3e170ce0 174
f427ee49
A
175boolean_t
176kdp_has_polled_corefile(void)
3e170ce0 177{
f427ee49 178 return NULL != gIOPolledCoreFileVars;
3e170ce0
A
179}
180
f427ee49
A
181kern_return_t
182kdp_polled_corefile_error(void)
d9a64523 183{
f427ee49 184 return gIOPolledCoreFileOpenRet;
d9a64523 185}
f427ee49
A
186
187#if defined(__arm__) || defined(__arm64__)
39037602
A
188/*
189 * Whenever we start a coredump, make sure the buffers
190 * are all on the free queue and the state is as expected.
191 * The buffers may have been left in a different state if
192 * a previous coredump attempt failed.
193 */
194static void
f427ee49 195kern_dump_hw_shmem_dbg_reset(void)
39037602
A
196{
197 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
198
199 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
200 cur_elm->khsd_data_length = 0;
201 }
202
203 if (currently_filling_buf != NULL) {
204 currently_filling_buf->khsd_data_length = 0;
205
206 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
207 currently_filling_buf = NULL;
208 }
209
210 if (currently_flushing_buf != NULL) {
211 currently_flushing_buf->khsd_data_length = 0;
212
213 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
214 currently_flushing_buf = NULL;
215 }
216
217 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
218 cur_elm->khsd_data_length = 0;
219
220 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
221 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
222 }
223
224 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
225 kdp_hw_shmem_dbg_seq_no = 0;
226 hwsd_info->xhsdci_buf_phys_addr = 0;
227 hwsd_info->xhsdci_buf_data_length = 0;
228 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
229 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
230 hwsd_info->xhsdci_page_size = PAGE_SIZE;
231 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
232
233 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
234}
235
236/*
237 * Tries to move buffers forward in 'progress'. If
238 * the hardware debugger is done consuming the current buffer, we
239 * can put the next one on it and move the current
240 * buffer back to the free queue.
241 */
242static int
f427ee49 243kern_dump_hw_shmem_dbg_process_buffers(void)
39037602
A
244{
245 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
246 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
5ba3f43e 247 kern_coredump_log(NULL, "Detected remote error, terminating...\n");
39037602
A
248 return -1;
249 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
250 if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
5ba3f43e 251 kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
f427ee49 252 (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
39037602
A
253 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
254 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
255 return -1;
256 }
257
258 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
259
260 if (currently_flushing_buf != NULL) {
261 currently_flushing_buf->khsd_data_length = 0;
262 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
263 }
264
265 currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
266 if (currently_flushing_buf != NULL) {
267 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
268
269 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
270 hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
271 hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
272 hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
273 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
274 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
275 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
276 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
277 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
278 }
279
280 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
f427ee49 281 kdp_hw_shmem_dbg_contact_deadline_interval;
39037602
A
282
283 return 0;
284 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
5ba3f43e
A
285 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
286 kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
39037602
A
287
288 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
289 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
290 return -1;
291 }
292
293 return 0;
294}
295
296/*
297 * Populates currently_filling_buf with a new buffer
298 * once one becomes available. Returns 0 on success
299 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
300 * if it is non-zero (an error).
301 */
302static int
f427ee49 303kern_dump_hw_shmem_dbg_get_buffer(void)
39037602
A
304{
305 int ret = 0;
306
307 assert(currently_filling_buf == NULL);
308
309 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
310 ret = kern_dump_hw_shmem_dbg_process_buffers();
311 if (ret) {
312 return ret;
313 }
314 }
315
316 currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
317 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
318
319 assert(currently_filling_buf->khsd_data_length == 0);
320 return ret;
321}
322
323/*
324 * Output procedure for hardware shared memory core dumps
325 *
326 * Tries to fill up the buffer completely before flushing
327 */
328static int
329kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
f427ee49 330 uint64_t length, void * data)
39037602
A
331{
332 int ret = 0;
333
334 assert(length < UINT32_MAX);
335 uint32_t bytes_remaining = (uint32_t) length;
336 uint32_t bytes_to_copy;
337
338 if (request == KDP_EOF) {
339 assert(currently_filling_buf == NULL);
340
341 /*
342 * Wait until we've flushed all the buffers
343 * before setting the connection status to done.
344 */
345 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
f427ee49 346 currently_flushing_buf != NULL) {
39037602
A
347 ret = kern_dump_hw_shmem_dbg_process_buffers();
348 if (ret) {
349 return ret;
350 }
351 }
352
353 /*
354 * If the last status we saw indicates that the buffer was
355 * empty and we didn't flush any new data since then, we expect
356 * the sequence number to still match the last we saw.
357 */
358 if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
5ba3f43e 359 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
f427ee49 360 kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
39037602
A
361 return -1;
362 }
363
364 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
365
5ba3f43e 366 kern_coredump_log(NULL, "Setting coredump status as done!\n");
39037602
A
367 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
368 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
369 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
370
371 return ret;
372 }
373
374 assert(request == KDP_DATA);
375
376 /*
377 * The output procedure is called with length == 0 and data == NULL
378 * to flush any remaining output at the end of the coredump before
379 * we call it a final time to mark the dump as done.
380 */
381 if (length == 0) {
382 assert(data == NULL);
383
384 if (currently_filling_buf != NULL) {
385 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
386 currently_filling_buf = NULL;
387 }
388
389 /*
390 * Move the current buffer along if possible.
391 */
392 ret = kern_dump_hw_shmem_dbg_process_buffers();
393 return ret;
394 }
395
396 while (bytes_remaining != 0) {
397 /*
398 * Make sure we have a buffer to work with.
399 */
400 while (currently_filling_buf == NULL) {
401 ret = kern_dump_hw_shmem_dbg_get_buffer();
402 if (ret) {
403 return ret;
404 }
405 }
406
407 assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
408 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
f427ee49 409 currently_filling_buf->khsd_data_length);
39037602 410 bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
f427ee49 411 bytes_to_copy);
39037602
A
412
413 currently_filling_buf->khsd_data_length += bytes_to_copy;
414
415 if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
416 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
417 currently_filling_buf = NULL;
418
419 /*
420 * Move it along if possible.
421 */
422 ret = kern_dump_hw_shmem_dbg_process_buffers();
423 if (ret) {
424 return ret;
425 }
426 }
427
428 bytes_remaining -= bytes_to_copy;
429 data = (void *) ((uintptr_t)data + bytes_to_copy);
430 }
431
432 return ret;
433}
f427ee49 434#endif /* defined(__arm__) || defined(__arm64__) */
39037602 435
f427ee49
A
436static IOReturn
437kern_dump_disk_proc(unsigned int request, __unused char *corename,
438 uint64_t length, void * data)
3e170ce0 439{
f427ee49
A
440 uint64_t noffset;
441 uint32_t err = kIOReturnSuccess;
442
443 switch (request) {
444 case KDP_WRQ:
445 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
446 if (kIOReturnSuccess != err) {
447 kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err);
448 break;
449 }
450 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
451 break;
452
453 case KDP_SEEK:
454 noffset = *((uint64_t *) data);
455 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
456 if (kIOReturnSuccess != err) {
457 kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err);
458 break;
459 }
460 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
461 if (kIOReturnSuccess != err) {
462 kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err);
463 }
464 break;
465
466 case KDP_DATA:
467 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
468 if (kIOReturnSuccess != err) {
469 kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n",
470 data, length, err);
471 break;
472 }
473 break;
474
475#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
476 /* Only supported on embedded by the underlying polled mode driver */
477 case KDP_FLUSH:
f427ee49
A
478 err = IOPolledFileFlush(gIOPolledCoreFileVars);
479 if (kIOReturnSuccess != err) {
480 kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err);
481 break;
482 }
483 break;
484#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0 485
f427ee49
A
486 case KDP_EOF:
487 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
488 if (kIOReturnSuccess != err) {
489 kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err);
490 break;
491 }
492 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
493 if (kIOReturnSuccess != err) {
494 kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err);
495 break;
496 }
497 break;
498 }
499
500 return err;
3e170ce0
A
501}
502
39037602
A
503/*
504 * flushes any data to the output proc immediately
505 */
3e170ce0
A
506static int
507kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
508{
f427ee49
A
509 struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque;
510 IOReturn ret;
3e170ce0 511
f427ee49 512 vars->zipped += len;
3e170ce0 513
f427ee49
A
514 if (vars->error >= 0) {
515 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess) {
516 kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
5ba3f43e 517 len, buf, ret);
f427ee49
A
518 vars->error = ret;
519 }
520 if (!buf && !len) {
521 kern_coredump_log(NULL, "100..");
522 }
3e170ce0 523 }
f427ee49 524 return len;
3e170ce0
A
525}
526
39037602
A
527/*
528 * tries to fill the buffer with data before flushing it via the output proc.
529 */
3e170ce0
A
530static int
531kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
532{
f427ee49
A
533 struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque;
534 unsigned remain;
535 IOReturn ret;
536 unsigned chunk;
537 boolean_t flush;
538
539 remain = inlen;
540 vars->zipped += inlen;
541 flush = (!inbuf && !inlen);
542
543 while ((vars->error >= 0) && (remain || flush)) {
544 chunk = vars->outremain;
545 if (chunk > remain) {
546 chunk = remain;
547 }
548 if (!inbuf) {
549 bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk);
550 } else {
551 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
552 inbuf += chunk;
553 }
554 vars->outremain -= chunk;
555 remain -= chunk;
556
557 if (vars->outremain && !flush) {
558 break;
559 }
560 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
561 vars->outlen - vars->outremain,
562 vars->outbuf)) != kIOReturnSuccess) {
563 kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
5ba3f43e 564 (vars->outlen - vars->outremain), vars->outbuf, ret);
f427ee49
A
565 vars->error = ret;
566 }
567 if (flush) {
568 kern_coredump_log(NULL, "100..");
569 flush = false;
570 }
571 vars->outremain = vars->outlen;
3e170ce0 572 }
f427ee49 573 return inlen;
3e170ce0
A
574}
575
576static int
577kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
578{
f427ee49
A
579 struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque;
580 uint64_t percent, total_in = 0;
581 unsigned len;
3e170ce0 582
f427ee49
A
583 len = strm->avail_in;
584 if (len > size) {
585 len = size;
586 }
587 if (len == 0) {
588 return 0;
589 }
3e170ce0 590
f427ee49
A
591 if (strm->next_in != (Bytef *) strm) {
592 memcpy(buf, strm->next_in, len);
593 } else {
594 bzero(buf, len);
595 }
596 strm->adler = z_crc32(strm->adler, buf, len);
3e170ce0 597
f427ee49
A
598 strm->avail_in -= len;
599 strm->next_in += len;
600 strm->total_in += len;
3e170ce0 601
f427ee49
A
602 if (0 == (511 & vars->writes++)) {
603 total_in = strm->total_in;
604 kdp_core_total_size_sent_uncomp = strm->total_in;
39037602 605
f427ee49
A
606 percent = (total_in * 100) / vars->totalbytes;
607 if ((percent - vars->lastpercent) >= 10) {
608 vars->lastpercent = percent;
609 kern_coredump_log(NULL, "%lld..\n", percent);
610 }
3e170ce0 611 }
3e170ce0 612
f427ee49 613 return (int)len;
3e170ce0
A
614}
615
616static IOReturn
39037602 617kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data)
3e170ce0 618{
f427ee49
A
619 z_stream * zs;
620 int zr;
621 boolean_t flush;
39037602 622
f427ee49 623 zs = &kdp_core_zs;
39037602 624
f427ee49
A
625 if (kdp_corezip_disabled) {
626 (*vars->zoutput)(zs, data, length);
627 } else {
628 flush = (!length && !data);
629 zr = Z_OK;
630
631 assert(!zs->avail_in);
632
633 while (vars->error >= 0) {
634 if (!zs->avail_in && !flush) {
635 if (!length) {
636 break;
637 }
638 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
639 zs->avail_in = length;
640 length = 0;
641 }
642 if (!zs->avail_out) {
643 zs->next_out = (Bytef *) zs;
644 zs->avail_out = UINT32_MAX;
645 }
646 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
647 if (Z_STREAM_END == zr) {
648 break;
649 }
650 if (zr != Z_OK) {
651 kern_coredump_log(NULL, "ZERR %d\n", zr);
652 vars->error = zr;
653 }
654 }
39037602 655
f427ee49
A
656 if (flush) {
657 (*vars->zoutput)(zs, NULL, 0);
658 }
3e170ce0 659 }
3e170ce0 660
f427ee49 661 return vars->error;
3e170ce0
A
662}
663
5ba3f43e
A
664kern_return_t
665kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data)
39037602 666{
f427ee49
A
667 IOReturn err;
668 unsigned int chunk;
669 enum { kMaxZLibChunk = 1024 * 1024 * 1024 };
670 struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars;
671
672 do{
673 if (length <= kMaxZLibChunk) {
674 chunk = (typeof(chunk))length;
675 } else {
676 chunk = kMaxZLibChunk;
677 }
678 err = kdp_core_stream_output_chunk(vars, chunk, data);
679
680 length -= chunk;
681 if (data) {
682 data = (void *) (((uintptr_t) data) + chunk);
683 }
684 }while (length && (kIOReturnSuccess == err));
685
686 return err;
39037602
A
687}
688
5ba3f43e
A
689#if defined(__arm__) || defined(__arm64__)
690extern pmap_paddr_t avail_start, avail_end;
691extern struct vm_object pmap_object_store;
692#endif
3e170ce0
A
693extern vm_offset_t c_buffers;
694extern vm_size_t c_buffers_size;
695
696ppnum_t
39037602 697kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
3e170ce0 698{
f427ee49
A
699 ppnum_t ppn = 0;
700 uint64_t vincr = PAGE_SIZE_64;
701
702 assert(!(vaddr & PAGE_MASK_64));
703
704 /* VA ranges to exclude */
705 if (vaddr == c_buffers) {
706 /* compressor data */
707 ppn = 0;
708 vincr = c_buffers_size;
709 } else if (vaddr == kdp_core_zmem) {
710 /* zlib working memory */
711 ppn = 0;
712 vincr = kdp_core_zsize;
713 } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
714 ppn = 0;
715 vincr = kdp_core_ramdisk_size;
716 } else
d9a64523 717#if defined(__arm64__) && defined(CONFIG_XNUPOST)
f427ee49
A
718 if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS) {
719 /* not readable */
720 ppn = 0;
721 vincr = _COMM_PAGE_AREA_LENGTH;
722 } else
5ba3f43e
A
723#endif /* defined(__arm64__) */
724#if defined(__arm__) || defined(__arm64__)
f427ee49
A
725 if (vaddr == phystokv(avail_start)) {
726 /* physical memory map */
727 ppn = 0;
728 vincr = (avail_end - avail_start);
729 } else
5ba3f43e 730#endif /* defined(__arm__) || defined(__arm64__) */
f427ee49
A
731 {
732 ppn = (pvphysaddr != NULL ?
733 pmap_find_phys(kernel_pmap, vaddr) :
734 pmap_find_phys_nofault(kernel_pmap, vaddr));
735 }
3e170ce0 736
f427ee49 737 *pvincr = round_page_64(vincr);
39037602 738
f427ee49
A
739 if (ppn && pvphysaddr) {
740 uint64_t phys = ptoa_64(ppn);
741 if (physmap_enclosed(phys)) {
742 *pvphysaddr = phystokv(phys);
743 } else {
744 ppn = 0;
745 }
cb323159 746 }
39037602 747
f427ee49 748 return ppn;
3e170ce0
A
749}
750
751int
752pmap_traverse_present_mappings(pmap_t __unused pmap,
f427ee49
A
753 vm_map_offset_t start,
754 vm_map_offset_t end,
755 pmap_traverse_callback callback,
756 void *context)
3e170ce0 757{
f427ee49
A
758 IOReturn ret;
759 vm_map_offset_t vcurstart, vcur;
760 uint64_t vincr = 0;
761 vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
762 vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
cb323159 763#if defined(XNU_TARGET_OS_BRIDGE)
f427ee49
A
764 vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
765 vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
cb323159
A
766#endif
767
f427ee49 768 boolean_t lastvavalid;
5ba3f43e 769#if defined(__arm__) || defined(__arm64__)
f427ee49 770 vm_page_t m = VM_PAGE_NULL;
5ba3f43e 771#endif
3e170ce0 772
5ba3f43e 773#if defined(__x86_64__)
f427ee49 774 assert(!is_ept_pmap(pmap));
3e170ce0
A
775#endif
776
f427ee49 777 /* Assumes pmap is locked, or being called from the kernel debugger */
3e170ce0 778
f427ee49
A
779 if (start > end) {
780 return KERN_INVALID_ARGUMENT;
781 }
782
783 ret = KERN_SUCCESS;
784 lastvavalid = FALSE;
785 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
786 ppnum_t ppn = 0;
5ba3f43e
A
787
788#if defined(__arm__) || defined(__arm64__)
f427ee49
A
789 /* We're at the start of the physmap, so pull out the pagetable pages that
790 * are accessed through that region.*/
791 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
792 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
793 }
3e170ce0 794
f427ee49
A
795 if (m != VM_PAGE_NULL) {
796 vm_map_offset_t vprev = vcur;
797 ppn = (ppnum_t)atop(avail_end);
798 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
799 /* Ignore pages that come from the static region and have already been dumped.*/
800 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
801 ppn = VM_PAGE_GET_PHYS_PAGE(m);
802 break;
803 }
804 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
805 }
806 vincr = PAGE_SIZE_64;
807 if (ppn == atop(avail_end)) {
808 vm_object_unlock(&pmap_object_store);
809 m = VM_PAGE_NULL;
810 // avail_end is not a valid physical address,
811 // so phystokv(avail_end) may not produce the expected result.
812 vcur = phystokv(avail_start) + (avail_end - avail_start);
813 } else {
814 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
815 vcur = phystokv(ptoa(ppn));
816 }
817 if (vcur != vprev) {
818 ret = callback(vcurstart, vprev, context);
819 lastvavalid = FALSE;
820 }
821 }
822 if (m == VM_PAGE_NULL) {
823 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
824 }
5ba3f43e 825#else /* defined(__arm__) || defined(__arm64__) */
f427ee49 826 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
5ba3f43e 827#endif
f427ee49
A
828 if (ppn != 0) {
829 if (((vcur < debug_start) || (vcur >= debug_end))
830 && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
cb323159 831#if defined(XNU_TARGET_OS_BRIDGE)
f427ee49
A
832 // include the macOS panic region if it's mapped
833 && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
cb323159 834#endif
f427ee49
A
835 ) {
836 /* not something we want */
837 ppn = 0;
838 }
839 }
3e170ce0 840
f427ee49
A
841 if (ppn != 0) {
842 if (!lastvavalid) {
843 /* Start of a new virtual region */
844 vcurstart = vcur;
845 lastvavalid = TRUE;
846 }
847 } else {
848 if (lastvavalid) {
849 /* end of a virtual region */
850 ret = callback(vcurstart, vcur, context);
851 lastvavalid = FALSE;
852 }
3e170ce0 853
5ba3f43e 854#if defined(__x86_64__)
f427ee49
A
855 /* Try to skip by 2MB if possible */
856 if ((vcur & PDMASK) == 0) {
857 pd_entry_t *pde;
858 pde = pmap_pde(pmap, vcur);
859 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
860 /* Make sure we wouldn't overflow */
861 if (vcur < (end - NBPD)) {
862 vincr = NBPD;
863 }
864 }
865 }
5ba3f43e 866#endif /* defined(__x86_64__) */
f427ee49
A
867 }
868 vcur += vincr;
869 }
870
871 if ((ret == KERN_SUCCESS) && lastvavalid) {
872 /* send previous run */
873 ret = callback(vcurstart, vcur, context);
3e170ce0 874 }
a39ff7e2
A
875
876#if KASAN
f427ee49
A
877 if (ret == KERN_SUCCESS) {
878 ret = kasan_traverse_mappings(callback, context);
879 }
a39ff7e2
A
880#endif
881
f427ee49 882 return ret;
3e170ce0
A
883}
884
f427ee49 885struct kern_dump_preflight_context {
5ba3f43e
A
886 uint32_t region_count;
887 uint64_t dumpable_bytes;
888};
889
3e170ce0
A
890int
891kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
f427ee49
A
892 vm_map_offset_t end,
893 void *context)
3e170ce0 894{
5ba3f43e
A
895 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
896 IOReturn ret = KERN_SUCCESS;
3e170ce0 897
5ba3f43e
A
898 kdc->region_count++;
899 kdc->dumpable_bytes += (end - start);
3e170ce0 900
f427ee49 901 return ret;
3e170ce0
A
902}
903
3e170ce0 904
f427ee49 905struct kern_dump_send_seg_desc_context {
5ba3f43e
A
906 core_save_segment_descriptions_cb callback;
907 void *context;
908};
3e170ce0 909
5ba3f43e
A
910int
911kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
f427ee49
A
912 vm_map_offset_t end,
913 void *context)
5ba3f43e
A
914{
915 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
916 uint64_t seg_start = (uint64_t) start;
917 uint64_t seg_end = (uint64_t) end;
3e170ce0 918
5ba3f43e 919 return kds_context->callback(seg_start, seg_end, kds_context->context);
3e170ce0
A
920}
921
f427ee49 922struct kern_dump_send_segdata_context {
5ba3f43e
A
923 core_save_segment_data_cb callback;
924 void *context;
925};
3e170ce0
A
926
927int
928kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
f427ee49
A
929 vm_map_offset_t end,
930 void *context)
3e170ce0 931{
5ba3f43e 932 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
3e170ce0 933
5ba3f43e 934 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
3e170ce0
A
935}
936
937static int
5ba3f43e 938kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
3e170ce0 939{
5ba3f43e
A
940 struct kern_dump_preflight_context kdc_preflight = { };
941 uint64_t thread_state_size = 0, thread_count = 0;
f427ee49 942 vm_map_offset_t vstart = kdp_core_start_addr();
5ba3f43e
A
943 kern_return_t ret;
944
945 ret = pmap_traverse_present_mappings(kernel_pmap,
f427ee49
A
946 vstart,
947 VM_MAX_KERNEL_ADDRESS,
948 kern_dump_pmap_traverse_preflight_callback,
949 &kdc_preflight);
5ba3f43e
A
950 if (ret != KERN_SUCCESS) {
951 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
952 return ret;
39037602 953 }
3e170ce0 954
5ba3f43e
A
955 kern_collectth_state_size(&thread_count, &thread_state_size);
956
957 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
f427ee49 958 thread_count, thread_state_size, 0, context);
5ba3f43e
A
959 return ret;
960}
961
962static int
963kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
964{
f427ee49 965 vm_map_offset_t vstart = kdp_core_start_addr();
5ba3f43e
A
966 kern_return_t ret;
967 struct kern_dump_send_seg_desc_context kds_context;
968
969 kds_context.callback = callback;
970 kds_context.context = context;
971
972 ret = pmap_traverse_present_mappings(kernel_pmap,
f427ee49
A
973 vstart,
974 VM_MAX_KERNEL_ADDRESS,
975 kern_dump_pmap_traverse_send_segdesc_callback,
976 &kds_context);
5ba3f43e
A
977 if (ret != KERN_SUCCESS) {
978 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
979 return ret;
39037602 980 }
39037602 981
5ba3f43e
A
982 return KERN_SUCCESS;
983}
39037602 984
5ba3f43e
A
985static int
986kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
987{
988 kern_return_t ret;
989 uint64_t thread_state_size = 0, thread_count = 0;
990
991 kern_collectth_state_size(&thread_count, &thread_state_size);
992
993 if (thread_state_size > 0) {
994 void * iter = NULL;
995 do {
f427ee49 996 kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
5ba3f43e
A
997
998 ret = callback(buf, context);
999 if (ret != KERN_SUCCESS) {
1000 return ret;
1001 }
1002 } while (iter);
1003 }
1004
1005 return KERN_SUCCESS;
1006}
1007
1008static int
1009kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, void *context)
1010{
1011 return callback(&kdp_kernelversion_string, sizeof(kdp_kernelversion_string), context);
1012}
1013
1014static int
1015kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
1016{
f427ee49 1017 vm_map_offset_t vstart = kdp_core_start_addr();
5ba3f43e
A
1018 kern_return_t ret;
1019 struct kern_dump_send_segdata_context kds_context;
1020
1021 kds_context.callback = callback;
1022 kds_context.context = context;
1023
1024 ret = pmap_traverse_present_mappings(kernel_pmap,
f427ee49
A
1025 vstart,
1026 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
5ba3f43e
A
1027 if (ret != KERN_SUCCESS) {
1028 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
1029 return ret;
1030 }
1031
1032 return KERN_SUCCESS;
1033}
1034
1035kern_return_t
1036kdp_reset_output_vars(void *kdp_core_out_vars, uint64_t totalbytes)
1037{
1038 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1039
1040 /* Re-initialize kdp_outvars */
1041 outvars->zipped = 0;
1042 outvars->totalbytes = totalbytes;
1043 outvars->lastpercent = 0;
1044 outvars->error = kIOReturnSuccess;
1045 outvars->outremain = 0;
1046 outvars->outlen = 0;
1047 outvars->writes = 0;
1048 outvars->outbuf = NULL;
1049
1050 if (outvars->outproc == &kdp_send_crashdump_data) {
1051 /* KERN_DUMP_NET */
1052 outvars->outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
1053 outvars->outremain = outvars->outlen = kdp_crashdump_pkt_size;
1054 }
1055
1056 kdp_core_total_size = totalbytes;
1057
1058 /* Re-initialize zstream variables */
39037602
A
1059 kdp_core_zs.avail_in = 0;
1060 kdp_core_zs.next_in = NULL;
1061 kdp_core_zs.avail_out = 0;
1062 kdp_core_zs.next_out = NULL;
5ba3f43e 1063 kdp_core_zs.opaque = outvars;
39037602 1064
5ba3f43e 1065 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars->zoutput);
39037602 1066
5ba3f43e
A
1067 return KERN_SUCCESS;
1068}
1069
1070static int
1071kern_dump_update_header(struct kdp_core_out_vars *outvars)
1072{
1073 uint64_t foffset;
1074 int ret;
1075
1076 /* Write the file header -- first seek to the beginning of the file */
1077 foffset = 0;
1078 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
d9a64523 1079 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
f427ee49 1080 sizeof(foffset), &foffset, foffset, ret);
5ba3f43e 1081 return ret;
3e170ce0 1082 }
3e170ce0 1083
5ba3f43e 1084 if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) {
d9a64523 1085 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
f427ee49
A
1086 sizeof(kdp_core_header), &kdp_core_header, ret);
1087 return ret;
5ba3f43e 1088 }
3e170ce0 1089
5ba3f43e
A
1090 if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) {
1091 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
1092 return ret;
1093 }
3e170ce0 1094
f427ee49 1095#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
1096 if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) {
1097 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
1098 return ret;
1099 }
f427ee49 1100#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0 1101
5ba3f43e
A
1102 return KERN_SUCCESS;
1103}
3e170ce0 1104
5ba3f43e
A
1105int
1106kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
1107{
1108 int ret = 0;
1109 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1110
1111 assert(kdp_core_header.num_files < KERN_COREDUMP_MAX_CORES);
1112 assert(out_file_length != NULL);
1113 *out_file_length = 0;
1114
1115 kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset;
1116 kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped;
1117 strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename,
f427ee49 1118 MACH_CORE_FILEHEADER_NAMELEN);
5ba3f43e
A
1119 kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
1120 kdp_core_header.num_files++;
1121 kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE;
1122
1123 ret = kern_dump_update_header(outvars);
1124 if (ret == KERN_SUCCESS) {
1125 *out_file_length = outvars->zipped;
1126 }
3e170ce0 1127
5ba3f43e
A
1128 return ret;
1129}
39037602 1130
5ba3f43e
A
1131int
1132kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset)
1133{
1134 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1135 int ret;
3e170ce0 1136
5ba3f43e 1137 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) {
d9a64523 1138 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
f427ee49 1139 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
5ba3f43e 1140 }
3e170ce0 1141
5ba3f43e
A
1142 return ret;
1143}
3e170ce0 1144
5ba3f43e
A
1145static int
1146do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant)
1147{
1148 struct kdp_core_out_vars outvars = { };
3e170ce0 1149
f427ee49
A
1150 char *coredump_log_start = NULL, *buf = NULL;
1151 size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
5ba3f43e
A
1152 uint64_t foffset = 0;
1153 int ret = 0;
1154 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
3e170ce0 1155
5ba3f43e
A
1156 /*
1157 * Record the initial panic log buffer length so we can dump the coredump log
1158 * and panic log to disk
1159 */
f427ee49
A
1160 coredump_log_start = debug_buf_ptr;
1161#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
1162 assert(panic_info->eph_other_log_offset != 0);
1163 assert(panic_info->eph_panic_log_len != 0);
1164 /* Include any data from before the panic log as well */
f427ee49
A
1165 prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1166 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1167#else /* defined(__arm__) || defined(__arm64__) */
5c9f4661 1168 if (panic_info->mph_panic_log_offset != 0) {
f427ee49
A
1169 prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1170 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
5c9f4661 1171 }
f427ee49 1172#endif /* defined(__arm__) || defined(__arm64__) */
5ba3f43e 1173
f427ee49 1174 assert(prior_debug_logsize <= debug_buf_size);
3e170ce0 1175
cb323159 1176 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
5ba3f43e
A
1177 /* Open the file for output */
1178 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, NULL)) != kIOReturnSuccess) {
1179 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1180 dump_succeeded = FALSE;
1181 goto exit;
1182 }
1183 }
1184 output_opened = true;
3e170ce0 1185
5ba3f43e
A
1186 /* Initialize gzip, output context */
1187 bzero(&outvars, sizeof(outvars));
1188 outvars.outproc = outproc;
3e170ce0 1189
cb323159 1190 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
5ba3f43e 1191 outvars.zoutput = kdp_core_zoutput;
f427ee49 1192 reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
5ba3f43e 1193 /* Space for file header, panic log, core log */
f427ee49
A
1194 foffset = ((KERN_COREDUMP_HEADERSIZE + reserved_debug_logsize + (KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1)) \
1195 & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1));
5ba3f43e 1196 kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE;
3e170ce0 1197
5ba3f43e
A
1198 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1199 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
d9a64523 1200 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
f427ee49 1201 sizeof(foffset), &foffset, foffset, ret);
5ba3f43e
A
1202 dump_succeeded = FALSE;
1203 goto exit;
1204 }
1205 } else if (kd_variant == KERN_DUMP_NET) {
1206 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
1207 outvars.zoutput = kdp_core_zoutputbuf;
f427ee49 1208#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
1209 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1210 outvars.zoutput = kdp_core_zoutput;
1211 kern_dump_hw_shmem_dbg_reset();
1212#endif
1213 }
3e170ce0 1214
5ba3f43e
A
1215#if defined(__arm__) || defined(__arm64__)
1216 flush_mmu_tlb();
1217#endif
3e170ce0 1218
5ba3f43e 1219 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." :
f427ee49 1220 "Transmitting kernel state, please wait:\n");
5ba3f43e 1221
cb323159
A
1222
1223#if defined(__x86_64__)
1224 if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
1225 uint64_t compressed_stackshot_len = 0;
1226
1227 if ((ret = kdp_reset_output_vars(&outvars, panic_stackshot_len)) != KERN_SUCCESS) {
1228 kern_coredump_log(NULL, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1229 dump_succeeded = FALSE;
1230 } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1231 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n",
f427ee49 1232 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
cb323159
A
1233 dump_succeeded = FALSE;
1234 } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) {
1235 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars, ret);
1236 dump_succeeded = FALSE;
1237 } else if ((ret = kern_dump_record_file(&outvars, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
1238 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1239 dump_succeeded = FALSE;
1240 } else {
1241 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1242 foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1243 if ((ret = kern_dump_seek_to_next_file(&outvars, foffset)) != kIOReturnSuccess) {
1244 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1245 dump_succeeded = FALSE;
1246 }
1247 }
1248 }
1249#endif
1250
5ba3f43e
A
1251 if (kd_variant == KERN_DUMP_DISK) {
1252 /*
1253 * Dump co-processors as well, foffset will be overwritten with the
1254 * offset of the next location in the file to be written to.
1255 */
1256 if (kern_do_coredump(&outvars, FALSE, foffset, &foffset) != 0) {
1257 dump_succeeded = FALSE;
1258 }
cb323159 1259 } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
5ba3f43e
A
1260 /* Only the kernel */
1261 if (kern_do_coredump(&outvars, TRUE, foffset, &foffset) != 0) {
1262 dump_succeeded = FALSE;
1263 }
3e170ce0 1264 }
3e170ce0 1265
5ba3f43e 1266 if (kd_variant == KERN_DUMP_DISK) {
f427ee49
A
1267 assert(reserved_debug_logsize != 0);
1268 size_t remaining_debug_logspace = reserved_debug_logsize;
1269
5ba3f43e
A
1270 /* Write the debug log -- first seek to the end of the corefile header */
1271 foffset = KERN_COREDUMP_HEADERSIZE;
1272 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
d9a64523 1273 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
f427ee49 1274 sizeof(foffset), &foffset, foffset, ret);
5ba3f43e
A
1275 dump_succeeded = FALSE;
1276 goto exit;
1277 }
3e170ce0 1278
f427ee49
A
1279 /* First flush the data from just the paniclog */
1280 size_t initial_log_length = 0;
1281#if defined(__arm__) || defined(__arm64__)
1282 initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1283 panic_info->eph_panic_log_len;
cc8bc92a 1284#else
f427ee49
A
1285 if (panic_info->mph_panic_log_offset != 0) {
1286 initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1287 panic_info->mph_panic_log_len;
5c9f4661 1288 }
5ba3f43e 1289#endif
3e170ce0 1290
5ba3f43e 1291 buf = debug_buf_base;
f427ee49
A
1292 if ((ret = (*outproc)(KDP_DATA, NULL, initial_log_length, buf)) != kIOReturnSuccess) {
1293 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1294 initial_log_length, buf, ret);
1295 dump_succeeded = FALSE;
1296 goto exit;
5ba3f43e 1297 }
3e170ce0 1298
f427ee49
A
1299 remaining_debug_logspace -= initial_log_length;
1300
1301 /* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1302#if defined(__arm__) || defined(__arm64__)
1303 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1304#else
cc8bc92a 1305 /*
f427ee49
A
1306 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1307 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1308 * we began taking a coredump.
cc8bc92a 1309 */
f427ee49
A
1310 if (panic_info->mph_other_log_offset != 0) {
1311 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1312 } else {
1313 buf = coredump_log_start;
1314 }
1315#endif
1316 assert(debug_buf_ptr >= buf);
1317
1318 size_t other_log_length = debug_buf_ptr - buf;
1319 if (other_log_length > remaining_debug_logspace) {
1320 other_log_length = remaining_debug_logspace;
1321 }
5ba3f43e
A
1322
1323 /* Write the coredump log */
f427ee49 1324 if ((ret = (*outproc)(KDP_DATA, NULL, other_log_length, buf)) != kIOReturnSuccess) {
d9a64523 1325 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
f427ee49 1326 other_log_length, buf, ret);
5ba3f43e
A
1327 dump_succeeded = FALSE;
1328 goto exit;
1329 }
1330
f427ee49 1331 kdp_core_header.log_length = initial_log_length + other_log_length;
5ba3f43e 1332 kern_dump_update_header(&outvars);
3e170ce0 1333 }
3e170ce0
A
1334
1335exit:
5ba3f43e
A
1336 /* close / last packet */
1337 if (output_opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess) {
1338 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1339 dump_succeeded = FALSE;
1340 }
3e170ce0 1341
5c9f4661 1342 /* If applicable, update the panic header and flush it so we update the CRC */
f427ee49 1343#if defined(__arm__) || defined(__arm64__)
cc8bc92a 1344 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
f427ee49 1345 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
5c9f4661 1346 paniclog_flush();
cc8bc92a 1347#else
5c9f4661
A
1348 if (panic_info->mph_panic_log_offset != 0) {
1349 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
f427ee49 1350 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
5c9f4661
A
1351 paniclog_flush();
1352 }
cc8bc92a 1353#endif
cc8bc92a 1354
f427ee49 1355 return dump_succeeded ? 0 : -1;
5ba3f43e 1356}
3e170ce0 1357
5ba3f43e 1358boolean_t
f427ee49 1359dumped_kernel_core(void)
5ba3f43e
A
1360{
1361 return kern_dump_successful;
3e170ce0
A
1362}
1363
1364int
39037602 1365kern_dump(enum kern_dump_type kd_variant)
3e170ce0 1366{
5ba3f43e
A
1367 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1368 int ret = -1;
1369#if KASAN
1370 kasan_disable();
1371#endif
cb323159 1372 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
f427ee49
A
1373 if (dumped_local) {
1374 return 0;
1375 }
1376 if (local_dump_in_progress) {
1377 return -1;
1378 }
5ba3f43e 1379 local_dump_in_progress = TRUE;
f427ee49
A
1380#if defined(__arm__) || defined(__arm64__)
1381 if (hwsd_info != NULL) {
1382 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
1383 }
5ba3f43e 1384#endif
cb323159 1385 ret = do_kern_dump(&kern_dump_disk_proc, kd_variant);
5ba3f43e
A
1386 if (ret == 0) {
1387 dumped_local = TRUE;
1388 kern_dump_successful = TRUE;
1389 local_dump_in_progress = FALSE;
1390 }
1391
1392 return ret;
f427ee49 1393#if defined(__arm__) || defined(__arm64__)
39037602 1394 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
5ba3f43e
A
1395 ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG);
1396 if (ret == 0) {
1397 kern_dump_successful = TRUE;
1398 }
1399 return ret;
39037602 1400#endif
5ba3f43e
A
1401 } else {
1402 ret = do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET);
1403 if (ret == 0) {
1404 kern_dump_successful = TRUE;
1405 }
1406 return ret;
1407 }
1408}
1409
f427ee49 1410#if defined(__arm__) || defined(__arm64__)
5ba3f43e 1411void
f427ee49 1412panic_spin_shmcon(void)
5ba3f43e 1413{
f427ee49
A
1414 if (!PE_i_can_has_debugger(NULL)) {
1415 return;
1416 }
1417
d9a64523
A
1418 if (hwsd_info == NULL) {
1419 kern_coredump_log(NULL, "handshake structure not initialized\n");
1420 return;
1421 }
1422
5ba3f43e
A
1423 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
1424 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
f427ee49 1425 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
5ba3f43e 1426
5ba3f43e
A
1427 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1428 hwsd_info->xhsdci_seq_no = 0;
1429 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1430
1431 for (;;) {
1432 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1433 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
1434 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
1435 }
1436
1437 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
f427ee49 1438 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
5ba3f43e
A
1439 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1440 hwsd_info->xhsdci_seq_no = 0;
1441 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1442 }
39037602 1443 }
3e170ce0 1444}
f427ee49 1445#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0
A
1446
1447static void *
1448kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
1449{
f427ee49 1450 void * result;
3e170ce0 1451
f427ee49
A
1452 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
1453 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
1454 assert(kdp_core_zoffset <= kdp_core_zsize);
3e170ce0 1455
f427ee49 1456 return result;
3e170ce0
A
1457}
1458
1459static void
f427ee49
A
1460kdp_core_zfree(void * __unused ref, void * __unused ptr)
1461{
1462}
3e170ce0
A
1463
1464
f427ee49 1465#if defined(__arm__) || defined(__arm64__)
5ba3f43e
A
1466#define LEVEL Z_BEST_SPEED
1467#define NETBUF 0
1468#else
3e170ce0
A
1469#define LEVEL Z_BEST_SPEED
1470#define NETBUF 1440
5ba3f43e 1471#endif
3e170ce0
A
1472
1473void
1474kdp_core_init(void)
1475{
39037602
A
1476 int wbits = 12;
1477 int memlevel = 3;
1478 kern_return_t kr;
f427ee49 1479#if defined(__arm__) || defined(__arm64__)
39037602
A
1480 int i = 0;
1481 vm_offset_t kdp_core_hw_shmem_buf = 0;
1482 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
5ba3f43e 1483 cache_info_t *cpuid_cache_info = NULL;
f427ee49 1484#endif /* defined(__arm__) || defined(__arm64__) */
5ba3f43e 1485 kern_coredump_callback_config core_config = { };
39037602 1486
f427ee49
A
1487 if (kdp_core_zs.zalloc) {
1488 return;
1489 }
39037602
A
1490 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
1491 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
1492 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
f427ee49 1493 assert(KERN_SUCCESS == kr);
39037602 1494
3e170ce0 1495 kdp_core_zoffset = 0;
39037602
A
1496 kdp_core_zs.zalloc = kdp_core_zalloc;
1497 kdp_core_zs.zfree = kdp_core_zfree;
1498
1499 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
f427ee49 1500 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
39037602
A
1501 /* Allocation failed */
1502 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
1503 kdp_core_zoffset = 0;
1504 }
1505
5ba3f43e
A
1506 bzero(&kdp_core_header, sizeof(kdp_core_header));
1507
1508 core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1509 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1510 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1511 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1512 core_config.kcc_coredump_save_sw_vers = kern_dump_save_sw_vers;
1513 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1514 core_config.kcc_coredump_save_misc_data = NULL;
1515
1516 kr = kern_register_xnu_coredump_helper(&core_config);
1517 assert(KERN_SUCCESS == kr);
1518
f427ee49 1519#if defined(__arm__) || defined(__arm64__)
39037602
A
1520 if (!PE_consistent_debug_enabled()) {
1521 return;
1522 }
1523
f427ee49
A
1524 if (!PE_i_can_has_debugger(NULL)) {
1525 return;
1526 }
1527
39037602
A
1528 /*
1529 * We need to allocate physically contiguous memory since astris isn't capable
1530 * of doing address translations while the CPUs are running.
1531 */
1532 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
f427ee49
A
1533 kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf,
1534 kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
1535 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
39037602
A
1536 assert(KERN_SUCCESS == kr);
1537
1538 /*
1539 * Put the connection info structure at the beginning of this buffer and adjust
1540 * the buffer size accordingly.
1541 */
1542 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
1543 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
1544 hwsd_info->xhsdci_seq_no = 0;
1545 hwsd_info->xhsdci_buf_phys_addr = 0;
1546 hwsd_info->xhsdci_buf_data_length = 0;
1547 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
1548 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
1549 hwsd_info->xhsdci_page_size = PAGE_SIZE;
1550
5ba3f43e
A
1551 cpuid_cache_info = cache_info();
1552 assert(cpuid_cache_info != NULL);
1553
39037602 1554 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
5ba3f43e
A
1555 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1556 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (uint64_t) cpuid_cache_info->c_linesz);
1557 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
1558 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
1559 /* The buffer size should be a cache-line length multiple */
1560 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
39037602
A
1561
1562 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
1563 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
1564
1565 for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
f427ee49 1566 cur_elm = zalloc_permanent_type(typeof(*cur_elm));
39037602
A
1567 assert(cur_elm != NULL);
1568
1569 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
1570 cur_elm->khsd_data_length = 0;
1571
1572 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
1573
1574 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
1575 }
1576
1577 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC,
f427ee49 1578 &kdp_hw_shmem_dbg_contact_deadline_interval);
39037602
A
1579
1580 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
1581 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
f427ee49 1582#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0
A
1583}
1584
1585#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */