]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/kdp_core.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp_core.c
CommitLineData
3e170ce0 1/*
5ba3f43e 2 * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved.
3e170ce0
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31#include <mach/mach_types.h>
32#include <mach/vm_attributes.h>
33#include <mach/vm_param.h>
34#include <mach/vm_map.h>
35#include <vm/vm_protos.h>
36#include <vm/vm_kern.h>
37#include <vm/vm_map.h>
39037602 38#include <machine/cpu_capabilities.h>
3e170ce0
A
39#include <libsa/types.h>
40#include <libkern/kernel_mach_header.h>
41#include <libkern/zlib.h>
42#include <kdp/kdp_internal.h>
43#include <kdp/kdp_core.h>
5ba3f43e 44#include <kdp/processor_core.h>
3e170ce0
A
45#include <IOKit/IOPolledInterface.h>
46#include <IOKit/IOBSD.h>
47#include <sys/errno.h>
48#include <sys/msgbuf.h>
5ba3f43e 49#include <san/kasan.h>
3e170ce0 50
5ba3f43e 51#if defined(__x86_64__)
3e170ce0
A
52#include <i386/pmap_internal.h>
53#include <kdp/ml/i386/kdp_x86_common.h>
5ba3f43e
A
54#include <kern/debug.h>
55#endif /* defined(__x86_64__) */
3e170ce0 56
5ba3f43e
A
57#if CONFIG_EMBEDDED
58#include <arm/cpuid.h>
59#include <arm/caches_internal.h>
39037602 60#include <pexpert/arm/consistent_debug.h>
5ba3f43e
A
61
62#if !defined(ROUNDUP)
63#define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
64#endif
65
66#if !defined(ROUNDDOWN)
67#define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
68#endif
69#endif /* CONFIG_EMBEDDED */
3e170ce0
A
70
71typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
72 vm_map_offset_t end,
73 void *context);
74
75extern int pmap_traverse_present_mappings(pmap_t pmap,
76 vm_map_offset_t start,
77 vm_map_offset_t end,
78 pmap_traverse_callback callback,
79 void *context);
80
5ba3f43e
A
81static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
82static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
83static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
84static int kern_dump_save_sw_vers(void *refcon, core_save_sw_vers_cb callback, void *context);
85static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
3e170ce0
A
86
87static int
88kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
89 vm_map_offset_t end,
90 void *context);
91static int
5ba3f43e
A
92kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
93 vm_map_offset_t end,
94 void *context);
95
3e170ce0
A
96static int
97kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
98 vm_map_offset_t end,
99 void *context);
100
101struct kdp_core_out_vars;
102typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
103 uint64_t length, void *panic_data);
104
105struct kdp_core_out_vars
106{
107 kern_dump_output_proc outproc;
5ba3f43e 108 z_output_func zoutput;
3e170ce0
A
109 size_t zipped;
110 uint64_t totalbytes;
111 uint64_t lastpercent;
112 IOReturn error;
113 unsigned outremain;
114 unsigned outlen;
115 unsigned writes;
116 Bytef * outbuf;
117};
118
3e170ce0
A
119extern uint32_t kdp_crashdump_pkt_size;
120
121static vm_offset_t kdp_core_zmem;
122static size_t kdp_core_zsize;
123static size_t kdp_core_zoffset;
124static z_stream kdp_core_zs;
125
39037602
A
126static uint64_t kdp_core_total_size;
127static uint64_t kdp_core_total_size_sent_uncomp;
5ba3f43e 128#if CONFIG_EMBEDDED
39037602
A
129struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
130
131#define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
132#define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
133
134/*
135 * Astris can read up to 4064 bytes at a time over
136 * the probe, so we should try to make our buffer
137 * size a multiple of this to make reads by astris
138 * (the bottleneck) most efficient.
139 */
140#define OPTIMAL_ASTRIS_READSIZE 4064
141
142struct kdp_hw_shmem_dbg_buf_elm {
143 vm_offset_t khsd_buf;
144 uint32_t khsd_data_length;
145 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
146};
147
148static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
149 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
150static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
151 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
152
153static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL;
154static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL;
155
156static uint32_t kdp_hw_shmem_dbg_bufsize = 0;
157
158static uint32_t kdp_hw_shmem_dbg_seq_no = 0;
159static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0;
160static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0;
161
162#define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
5ba3f43e
A
163#endif /* CONFIG_EMBEDDED */
164
165static boolean_t kern_dump_successful = FALSE;
166
167struct mach_core_fileheader kdp_core_header = { };
39037602
A
168
169/*
170 * These variables will be modified by the BSD layer if the root device is
171 * a RAMDisk.
172 */
173uint64_t kdp_core_ramdisk_addr = 0;
174uint64_t kdp_core_ramdisk_size = 0;
3e170ce0 175
3e170ce0
A
176boolean_t kdp_has_polled_corefile(void)
177{
178 return (NULL != gIOPolledCoreFileVars);
179}
180
5ba3f43e 181#if CONFIG_EMBEDDED
39037602
A
182/*
183 * Whenever we start a coredump, make sure the buffers
184 * are all on the free queue and the state is as expected.
185 * The buffers may have been left in a different state if
186 * a previous coredump attempt failed.
187 */
188static void
189kern_dump_hw_shmem_dbg_reset()
190{
191 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
192
193 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
194 cur_elm->khsd_data_length = 0;
195 }
196
197 if (currently_filling_buf != NULL) {
198 currently_filling_buf->khsd_data_length = 0;
199
200 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
201 currently_filling_buf = NULL;
202 }
203
204 if (currently_flushing_buf != NULL) {
205 currently_flushing_buf->khsd_data_length = 0;
206
207 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
208 currently_flushing_buf = NULL;
209 }
210
211 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
212 cur_elm->khsd_data_length = 0;
213
214 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
215 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
216 }
217
218 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
219 kdp_hw_shmem_dbg_seq_no = 0;
220 hwsd_info->xhsdci_buf_phys_addr = 0;
221 hwsd_info->xhsdci_buf_data_length = 0;
222 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
223 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
224 hwsd_info->xhsdci_page_size = PAGE_SIZE;
225 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
226
227 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
228}
229
230/*
231 * Tries to move buffers forward in 'progress'. If
232 * the hardware debugger is done consuming the current buffer, we
233 * can put the next one on it and move the current
234 * buffer back to the free queue.
235 */
236static int
237kern_dump_hw_shmem_dbg_process_buffers()
238{
239 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
240 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
5ba3f43e 241 kern_coredump_log(NULL, "Detected remote error, terminating...\n");
39037602
A
242 return -1;
243 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
244 if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
5ba3f43e 245 kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
39037602
A
246 (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
247 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
248 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
249 return -1;
250 }
251
252 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
253
254 if (currently_flushing_buf != NULL) {
255 currently_flushing_buf->khsd_data_length = 0;
256 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
257 }
258
259 currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
260 if (currently_flushing_buf != NULL) {
261 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
262
263 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
264 hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
265 hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
266 hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
267 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
268 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
269 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
270 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
271 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
272 }
273
274 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
275 kdp_hw_shmem_dbg_contact_deadline_interval;
276
277 return 0;
278 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
5ba3f43e
A
279 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
280 kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
39037602
A
281
282 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
283 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
284 return -1;
285 }
286
287 return 0;
288}
289
290/*
291 * Populates currently_filling_buf with a new buffer
292 * once one becomes available. Returns 0 on success
293 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
294 * if it is non-zero (an error).
295 */
296static int
297kern_dump_hw_shmem_dbg_get_buffer()
298{
299 int ret = 0;
300
301 assert(currently_filling_buf == NULL);
302
303 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
304 ret = kern_dump_hw_shmem_dbg_process_buffers();
305 if (ret) {
306 return ret;
307 }
308 }
309
310 currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
311 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
312
313 assert(currently_filling_buf->khsd_data_length == 0);
314 return ret;
315}
316
317/*
318 * Output procedure for hardware shared memory core dumps
319 *
320 * Tries to fill up the buffer completely before flushing
321 */
322static int
323kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
324 uint64_t length, void * data)
325{
326 int ret = 0;
327
328 assert(length < UINT32_MAX);
329 uint32_t bytes_remaining = (uint32_t) length;
330 uint32_t bytes_to_copy;
331
332 if (request == KDP_EOF) {
333 assert(currently_filling_buf == NULL);
334
335 /*
336 * Wait until we've flushed all the buffers
337 * before setting the connection status to done.
338 */
339 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
340 currently_flushing_buf != NULL) {
341 ret = kern_dump_hw_shmem_dbg_process_buffers();
342 if (ret) {
343 return ret;
344 }
345 }
346
347 /*
348 * If the last status we saw indicates that the buffer was
349 * empty and we didn't flush any new data since then, we expect
350 * the sequence number to still match the last we saw.
351 */
352 if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
5ba3f43e 353 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
39037602
A
354 kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
355 return -1;
356 }
357
358 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
359
5ba3f43e 360 kern_coredump_log(NULL, "Setting coredump status as done!\n");
39037602
A
361 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
362 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
363 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
364
365 return ret;
366 }
367
368 assert(request == KDP_DATA);
369
370 /*
371 * The output procedure is called with length == 0 and data == NULL
372 * to flush any remaining output at the end of the coredump before
373 * we call it a final time to mark the dump as done.
374 */
375 if (length == 0) {
376 assert(data == NULL);
377
378 if (currently_filling_buf != NULL) {
379 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
380 currently_filling_buf = NULL;
381 }
382
383 /*
384 * Move the current buffer along if possible.
385 */
386 ret = kern_dump_hw_shmem_dbg_process_buffers();
387 return ret;
388 }
389
390 while (bytes_remaining != 0) {
391 /*
392 * Make sure we have a buffer to work with.
393 */
394 while (currently_filling_buf == NULL) {
395 ret = kern_dump_hw_shmem_dbg_get_buffer();
396 if (ret) {
397 return ret;
398 }
399 }
400
401 assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
402 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
403 currently_filling_buf->khsd_data_length);
404 bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
405 bytes_to_copy);
406
407 currently_filling_buf->khsd_data_length += bytes_to_copy;
408
409 if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
410 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
411 currently_filling_buf = NULL;
412
413 /*
414 * Move it along if possible.
415 */
416 ret = kern_dump_hw_shmem_dbg_process_buffers();
417 if (ret) {
418 return ret;
419 }
420 }
421
422 bytes_remaining -= bytes_to_copy;
423 data = (void *) ((uintptr_t)data + bytes_to_copy);
424 }
425
426 return ret;
427}
5ba3f43e 428#endif /* CONFIG_EMBEDDED */
39037602 429
3e170ce0
A
430static IOReturn
431kern_dump_disk_proc(unsigned int request, __unused char *corename,
432 uint64_t length, void * data)
433{
434 uint64_t noffset;
435 uint32_t err = kIOReturnSuccess;
436
437 switch (request)
438 {
439 case KDP_WRQ:
440 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
5ba3f43e
A
441 if (kIOReturnSuccess != err) {
442 kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err);
443 break;
444 }
3e170ce0
A
445 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
446 break;
447
448 case KDP_SEEK:
449 noffset = *((uint64_t *) data);
450 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
5ba3f43e
A
451 if (kIOReturnSuccess != err) {
452 kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err);
453 break;
454 }
3e170ce0 455 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
5ba3f43e
A
456 if (kIOReturnSuccess != err) {
457 kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err);
458 }
3e170ce0
A
459 break;
460
461 case KDP_DATA:
462 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
5ba3f43e
A
463 if (kIOReturnSuccess != err) {
464 kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, 0x%p, 0x%llx, NULL) returned 0x%x\n",
465 data, length, err);
466 break;
467 }
468 break;
469
470#if CONFIG_EMBEDDED
471 /* Only supported on embedded by the underlying polled mode driver */
472 case KDP_FLUSH:
473 err = IOPolledFileFlush(gIOPolledCoreFileVars);
474 if (kIOReturnSuccess != err) {
475 kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err);
476 break;
477 }
3e170ce0 478 break;
5ba3f43e 479#endif
3e170ce0
A
480
481 case KDP_EOF:
482 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
5ba3f43e
A
483 if (kIOReturnSuccess != err) {
484 kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err);
485 break;
486 }
3e170ce0 487 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
5ba3f43e
A
488 if (kIOReturnSuccess != err) {
489 kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err);
490 break;
491 }
3e170ce0
A
492 break;
493 }
494
495 return (err);
496}
497
39037602
A
498/*
499 * flushes any data to the output proc immediately
500 */
3e170ce0
A
501static int
502kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
503{
504 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
505 IOReturn ret;
506
507 vars->zipped += len;
508
509 if (vars->error >= 0)
510 {
511 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess)
512 {
5ba3f43e
A
513 kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
514 len, buf, ret);
3e170ce0
A
515 vars->error = ret;
516 }
5ba3f43e 517 if (!buf && !len) kern_coredump_log(NULL, "100..");
3e170ce0
A
518 }
519 return (len);
520}
521
39037602
A
522/*
523 * tries to fill the buffer with data before flushing it via the output proc.
524 */
3e170ce0
A
525static int
526kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
527{
528 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
529 unsigned remain;
530 IOReturn ret;
531 unsigned chunk;
532 boolean_t flush;
533
534 remain = inlen;
535 vars->zipped += inlen;
536 flush = (!inbuf && !inlen);
537
538 while ((vars->error >= 0) && (remain || flush))
539 {
540 chunk = vars->outremain;
541 if (chunk > remain) chunk = remain;
39037602
A
542 if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk);
543 else
544 {
545 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
546 inbuf += chunk;
547 }
3e170ce0
A
548 vars->outremain -= chunk;
549 remain -= chunk;
3e170ce0
A
550
551 if (vars->outremain && !flush) break;
552 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
553 vars->outlen - vars->outremain,
554 vars->outbuf)) != kIOReturnSuccess)
555 {
5ba3f43e
A
556 kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
557 (vars->outlen - vars->outremain), vars->outbuf, ret);
3e170ce0
A
558 vars->error = ret;
559 }
560 if (flush)
561 {
5ba3f43e 562 kern_coredump_log(NULL, "100..");
3e170ce0
A
563 flush = false;
564 }
565 vars->outremain = vars->outlen;
566 }
567 return (inlen);
568}
569
570static int
571kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
572{
573 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
39037602 574 uint64_t percent, total_in = 0;
3e170ce0
A
575 unsigned len;
576
577 len = strm->avail_in;
578 if (len > size) len = size;
579 if (len == 0) return 0;
580
581 if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len);
582 else bzero(buf, len);
583 strm->adler = z_crc32(strm->adler, buf, len);
584
585 strm->avail_in -= len;
586 strm->next_in += len;
587 strm->total_in += len;
588
589 if (0 == (511 & vars->writes++))
590 {
39037602
A
591 total_in = strm->total_in;
592 kdp_core_total_size_sent_uncomp = strm->total_in;
593
594 percent = (total_in * 100) / vars->totalbytes;
3e170ce0
A
595 if ((percent - vars->lastpercent) >= 10)
596 {
597 vars->lastpercent = percent;
5ba3f43e 598 kern_coredump_log(NULL, "%lld..\n", percent);
3e170ce0
A
599 }
600 }
601
602 return (int)len;
603}
604
605static IOReturn
39037602 606kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data)
3e170ce0
A
607{
608 z_stream * zs;
609 int zr;
610 boolean_t flush;
611
3e170ce0 612 zs = &kdp_core_zs;
3e170ce0 613
39037602 614 if (kdp_corezip_disabled)
3e170ce0 615 {
39037602
A
616 (*vars->zoutput)(zs, data, length);
617 }
618 else
619 {
620
621 flush = (!length && !data);
622 zr = Z_OK;
623
624 assert(!zs->avail_in);
625
626 while (vars->error >= 0)
3e170ce0 627 {
39037602
A
628 if (!zs->avail_in && !flush)
629 {
630 if (!length) break;
631 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
632 zs->avail_in = length;
633 length = 0;
634 }
635 if (!zs->avail_out)
636 {
637 zs->next_out = (Bytef *) zs;
638 zs->avail_out = UINT32_MAX;
639 }
640 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
641 if (Z_STREAM_END == zr) break;
642 if (zr != Z_OK)
643 {
5ba3f43e 644 kern_coredump_log(NULL, "ZERR %d\n", zr);
39037602
A
645 vars->error = zr;
646 }
3e170ce0 647 }
3e170ce0 648
39037602
A
649 if (flush) (*vars->zoutput)(zs, NULL, 0);
650 }
3e170ce0
A
651
652 return (vars->error);
653}
654
5ba3f43e
A
655kern_return_t
656kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data)
39037602
A
657{
658 IOReturn err;
659 unsigned int chunk;
660 enum { kMaxZLibChunk = 1024*1024*1024 };
5ba3f43e 661 struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars;
39037602
A
662
663 do
664 {
665 if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length;
666 else chunk = kMaxZLibChunk;
667 err = kdp_core_stream_output_chunk(vars, chunk, data);
668
669 length -= chunk;
670 if (data) data = (void *) (((uintptr_t) data) + chunk);
671 }
672 while (length && (kIOReturnSuccess == err));
673
674 return (err);
675}
676
5ba3f43e
A
677#if defined(__arm__) || defined(__arm64__)
678extern pmap_paddr_t avail_start, avail_end;
679extern struct vm_object pmap_object_store;
680#endif
3e170ce0
A
681extern vm_offset_t c_buffers;
682extern vm_size_t c_buffers_size;
683
684ppnum_t
39037602 685kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
3e170ce0 686{
39037602
A
687 ppnum_t ppn = 0;
688 uint64_t vincr = PAGE_SIZE_64;
3e170ce0
A
689
690 assert(!(vaddr & PAGE_MASK_64));
691
692 /* VA ranges to exclude */
693 if (vaddr == c_buffers)
694 {
695 /* compressor data */
696 ppn = 0;
697 vincr = c_buffers_size;
698 }
699 else if (vaddr == kdp_core_zmem)
700 {
701 /* zlib working memory */
702 ppn = 0;
703 vincr = kdp_core_zsize;
704 }
39037602
A
705 else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr))
706 {
707 ppn = 0;
708 vincr = kdp_core_ramdisk_size;
709 }
3e170ce0 710 else
5ba3f43e
A
711#if defined(__arm64__)
712 if (vaddr == _COMM_PAGE64_BASE_ADDRESS)
713 {
714 /* not readable */
715 ppn = 0;
716 vincr = _COMM_PAGE_AREA_LENGTH;
717 }
718 else
719#endif /* defined(__arm64__) */
720#if defined(__arm__) || defined(__arm64__)
721 if (vaddr == phystokv(avail_start))
722 {
723 /* physical memory map */
724 ppn = 0;
725 vincr = (avail_end - avail_start);
726 }
727 else
728#endif /* defined(__arm__) || defined(__arm64__) */
3e170ce0
A
729 ppn = pmap_find_phys(kernel_pmap, vaddr);
730
39037602
A
731 *pvincr = round_page_64(vincr);
732
733 if (ppn && pvphysaddr)
734 {
735 uint64_t phys = ptoa_64(ppn);
5ba3f43e
A
736#if defined(__arm__) || defined(__arm64__)
737 if (isphysmem(phys)) *pvphysaddr = phystokv(phys);
738#else
739 if (physmap_enclosed(phys)) *pvphysaddr = (uintptr_t)PHYSMAP_PTOV(phys);
740#endif
39037602
A
741 else ppn = 0;
742 }
743
3e170ce0
A
744 return (ppn);
745}
746
747int
748pmap_traverse_present_mappings(pmap_t __unused pmap,
749 vm_map_offset_t start,
750 vm_map_offset_t end,
751 pmap_traverse_callback callback,
752 void *context)
753{
754 IOReturn ret;
755 vm_map_offset_t vcurstart, vcur;
5ba3f43e 756 uint64_t vincr = 0;
3e170ce0
A
757 vm_map_offset_t debug_start;
758 vm_map_offset_t debug_end;
759 boolean_t lastvavalid;
5ba3f43e
A
760#if defined(__arm__) || defined(__arm64__)
761 vm_page_t m = VM_PAGE_NULL;
762#endif
3e170ce0 763
5ba3f43e
A
764 debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
765 debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
3e170ce0 766
5ba3f43e 767#if defined(__x86_64__)
3e170ce0
A
768 assert(!is_ept_pmap(pmap));
769#endif
770
771 /* Assumes pmap is locked, or being called from the kernel debugger */
772
773 if (start > end) return (KERN_INVALID_ARGUMENT);
774
775 ret = KERN_SUCCESS;
776 lastvavalid = FALSE;
777 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
5ba3f43e
A
778 ppnum_t ppn = 0;
779
780#if defined(__arm__) || defined(__arm64__)
781 /* We're at the start of the physmap, so pull out the pagetable pages that
782 * are accessed through that region.*/
783 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store))
784 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
3e170ce0 785
5ba3f43e
A
786 if (m != VM_PAGE_NULL)
787 {
788 vm_map_offset_t vprev = vcur;
789 ppn = (ppnum_t)atop(avail_end);
790 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m))
791 {
792 /* Ignore pages that come from the static region and have already been dumped.*/
793 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start))
794 {
795 ppn = VM_PAGE_GET_PHYS_PAGE(m);
796 break;
797 }
798 m = (vm_page_t)vm_page_queue_next(&m->listq);
799 }
800 vcur = phystokv(ptoa(ppn));
801 if (vcur != vprev)
802 {
803 ret = callback(vcurstart, vprev, context);
804 lastvavalid = FALSE;
805 }
806 vincr = PAGE_SIZE_64;
807 if (ppn == atop(avail_end))
808 {
809 vm_object_unlock(&pmap_object_store);
810 m = VM_PAGE_NULL;
811 }
812 else
813 m = (vm_page_t)vm_page_queue_next(&m->listq);
814 }
815 if (m == VM_PAGE_NULL)
816 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
817#else /* defined(__arm__) || defined(__arm64__) */
39037602 818 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
5ba3f43e 819#endif
3e170ce0
A
820 if (ppn != 0)
821 {
822 if (((vcur < debug_start) || (vcur >= debug_end))
5ba3f43e
A
823 && !(EFI_VALID_PAGE(ppn) ||
824 pmap_valid_page(ppn)))
3e170ce0
A
825 {
826 /* not something we want */
827 ppn = 0;
828 }
829 }
830
831 if (ppn != 0) {
832 if (!lastvavalid) {
833 /* Start of a new virtual region */
834 vcurstart = vcur;
835 lastvavalid = TRUE;
836 }
837 } else {
838 if (lastvavalid) {
839 /* end of a virtual region */
840 ret = callback(vcurstart, vcur, context);
841 lastvavalid = FALSE;
842 }
843
5ba3f43e 844#if defined(__x86_64__)
3e170ce0
A
845 /* Try to skip by 2MB if possible */
846 if (((vcur & PDMASK) == 0) && cpu_64bit) {
847 pd_entry_t *pde;
848 pde = pmap_pde(pmap, vcur);
849 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
850 /* Make sure we wouldn't overflow */
851 if (vcur < (end - NBPD)) {
852 vincr = NBPD;
853 }
854 }
855 }
5ba3f43e 856#endif /* defined(__x86_64__) */
3e170ce0
A
857 }
858 vcur += vincr;
859 }
860
861 if ((ret == KERN_SUCCESS) && lastvavalid) {
862 /* send previous run */
863 ret = callback(vcurstart, vcur, context);
864 }
865 return (ret);
866}
867
5ba3f43e
A
868struct kern_dump_preflight_context
869{
870 uint32_t region_count;
871 uint64_t dumpable_bytes;
872};
873
3e170ce0
A
874int
875kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
876 vm_map_offset_t end,
877 void *context)
878{
5ba3f43e
A
879 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
880 IOReturn ret = KERN_SUCCESS;
3e170ce0 881
5ba3f43e
A
882 kdc->region_count++;
883 kdc->dumpable_bytes += (end - start);
3e170ce0 884
5ba3f43e 885 return (ret);
3e170ce0
A
886}
887
3e170ce0 888
5ba3f43e
A
889struct kern_dump_send_seg_desc_context
890{
891 core_save_segment_descriptions_cb callback;
892 void *context;
893};
3e170ce0 894
5ba3f43e
A
895int
896kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
897 vm_map_offset_t end,
898 void *context)
899{
900 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
901 uint64_t seg_start = (uint64_t) start;
902 uint64_t seg_end = (uint64_t) end;
3e170ce0 903
5ba3f43e 904 return kds_context->callback(seg_start, seg_end, kds_context->context);
3e170ce0
A
905}
906
5ba3f43e
A
907struct kern_dump_send_segdata_context
908{
909 core_save_segment_data_cb callback;
910 void *context;
911};
3e170ce0
A
912
913int
914kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
915 vm_map_offset_t end,
916 void *context)
917{
5ba3f43e 918 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
3e170ce0 919
5ba3f43e 920 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
3e170ce0
A
921}
922
923static int
5ba3f43e 924kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
3e170ce0 925{
5ba3f43e
A
926 struct kern_dump_preflight_context kdc_preflight = { };
927 uint64_t thread_state_size = 0, thread_count = 0;
928 kern_return_t ret;
929
930 ret = pmap_traverse_present_mappings(kernel_pmap,
931 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
932 VM_MAX_KERNEL_ADDRESS,
933 kern_dump_pmap_traverse_preflight_callback,
934 &kdc_preflight);
935 if (ret != KERN_SUCCESS) {
936 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
937 return ret;
39037602 938 }
3e170ce0 939
5ba3f43e
A
940 kern_collectth_state_size(&thread_count, &thread_state_size);
941
942 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
943 thread_count, thread_state_size, 0, context);
944 return ret;
945}
946
947static int
948kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
949{
950 kern_return_t ret;
951 struct kern_dump_send_seg_desc_context kds_context;
952
953 kds_context.callback = callback;
954 kds_context.context = context;
955
956 ret = pmap_traverse_present_mappings(kernel_pmap,
957 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
958 VM_MAX_KERNEL_ADDRESS,
959 kern_dump_pmap_traverse_send_segdesc_callback,
960 &kds_context);
961 if (ret != KERN_SUCCESS) {
962 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
963 return ret;
39037602 964 }
39037602 965
5ba3f43e
A
966 return KERN_SUCCESS;
967}
39037602 968
5ba3f43e
A
969static int
970kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
971{
972 kern_return_t ret;
973 uint64_t thread_state_size = 0, thread_count = 0;
974
975 kern_collectth_state_size(&thread_count, &thread_state_size);
976
977 if (thread_state_size > 0) {
978 void * iter = NULL;
979 do {
980 kern_collectth_state (current_thread(), buf, thread_state_size, &iter);
981
982 ret = callback(buf, context);
983 if (ret != KERN_SUCCESS) {
984 return ret;
985 }
986 } while (iter);
987 }
988
989 return KERN_SUCCESS;
990}
991
992static int
993kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, void *context)
994{
995 return callback(&kdp_kernelversion_string, sizeof(kdp_kernelversion_string), context);
996}
997
998static int
999kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
1000{
1001 kern_return_t ret;
1002 struct kern_dump_send_segdata_context kds_context;
1003
1004 kds_context.callback = callback;
1005 kds_context.context = context;
1006
1007 ret = pmap_traverse_present_mappings(kernel_pmap,
1008 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1009 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
1010 if (ret != KERN_SUCCESS) {
1011 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
1012 return ret;
1013 }
1014
1015 return KERN_SUCCESS;
1016}
1017
1018kern_return_t
1019kdp_reset_output_vars(void *kdp_core_out_vars, uint64_t totalbytes)
1020{
1021 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1022
1023 /* Re-initialize kdp_outvars */
1024 outvars->zipped = 0;
1025 outvars->totalbytes = totalbytes;
1026 outvars->lastpercent = 0;
1027 outvars->error = kIOReturnSuccess;
1028 outvars->outremain = 0;
1029 outvars->outlen = 0;
1030 outvars->writes = 0;
1031 outvars->outbuf = NULL;
1032
1033 if (outvars->outproc == &kdp_send_crashdump_data) {
1034 /* KERN_DUMP_NET */
1035 outvars->outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
1036 outvars->outremain = outvars->outlen = kdp_crashdump_pkt_size;
1037 }
1038
1039 kdp_core_total_size = totalbytes;
1040
1041 /* Re-initialize zstream variables */
39037602
A
1042 kdp_core_zs.avail_in = 0;
1043 kdp_core_zs.next_in = NULL;
1044 kdp_core_zs.avail_out = 0;
1045 kdp_core_zs.next_out = NULL;
5ba3f43e 1046 kdp_core_zs.opaque = outvars;
39037602 1047
5ba3f43e 1048 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars->zoutput);
39037602 1049
5ba3f43e
A
1050 return KERN_SUCCESS;
1051}
1052
1053static int
1054kern_dump_update_header(struct kdp_core_out_vars *outvars)
1055{
1056 uint64_t foffset;
1057 int ret;
1058
1059 /* Write the file header -- first seek to the beginning of the file */
1060 foffset = 0;
1061 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1062 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1063 sizeof(foffset), &foffset, foffset, ret);
1064 return ret;
3e170ce0 1065 }
3e170ce0 1066
5ba3f43e
A
1067 if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) {
1068 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1069 sizeof(kdp_core_header), &kdp_core_header, ret);
1070 return ret;
1071 }
3e170ce0 1072
5ba3f43e
A
1073 if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) {
1074 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
1075 return ret;
1076 }
3e170ce0 1077
5ba3f43e
A
1078#if CONFIG_EMBEDDED
1079 if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) {
1080 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
1081 return ret;
1082 }
1083#endif
3e170ce0 1084
5ba3f43e
A
1085 return KERN_SUCCESS;
1086}
3e170ce0 1087
5ba3f43e
A
1088int
1089kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
1090{
1091 int ret = 0;
1092 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1093
1094 assert(kdp_core_header.num_files < KERN_COREDUMP_MAX_CORES);
1095 assert(out_file_length != NULL);
1096 *out_file_length = 0;
1097
1098 kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset;
1099 kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped;
1100 strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename,
1101 MACH_CORE_FILEHEADER_NAMELEN);
1102 kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
1103 kdp_core_header.num_files++;
1104 kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE;
1105
1106 ret = kern_dump_update_header(outvars);
1107 if (ret == KERN_SUCCESS) {
1108 *out_file_length = outvars->zipped;
1109 }
3e170ce0 1110
5ba3f43e
A
1111 return ret;
1112}
39037602 1113
5ba3f43e
A
1114int
1115kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset)
1116{
1117 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1118 int ret;
3e170ce0 1119
5ba3f43e
A
1120 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) {
1121 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1122 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
1123 }
3e170ce0 1124
5ba3f43e
A
1125 return ret;
1126}
3e170ce0 1127
5ba3f43e
A
1128static int
1129do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant)
1130{
1131 struct kdp_core_out_vars outvars = { };
3e170ce0 1132
5ba3f43e
A
1133 char *log_start = NULL, *buf = NULL;
1134 size_t existing_log_size = 0, new_log_len = 0;
1135 uint64_t foffset = 0;
1136 int ret = 0;
1137 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
3e170ce0 1138
5ba3f43e
A
1139 /*
1140 * Record the initial panic log buffer length so we can dump the coredump log
1141 * and panic log to disk
1142 */
1143 log_start = debug_buf_ptr;
1144#if CONFIG_EMBEDDED
1145 assert(panic_info->eph_other_log_offset != 0);
1146 assert(panic_info->eph_panic_log_len != 0);
1147 /* Include any data from before the panic log as well */
1148 existing_log_size = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1149 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1150#else /* CONFIG_EMBEDDED */
1151 existing_log_size = log_start - debug_buf_base;
1152#endif /* CONFIG_EMBEDDED */
1153
1154 assert (existing_log_size <= debug_buf_size);
3e170ce0 1155
5ba3f43e
A
1156 if (kd_variant == KERN_DUMP_DISK) {
1157 /* Open the file for output */
1158 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, NULL)) != kIOReturnSuccess) {
1159 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1160 dump_succeeded = FALSE;
1161 goto exit;
1162 }
1163 }
1164 output_opened = true;
3e170ce0 1165
5ba3f43e
A
1166 /* Initialize gzip, output context */
1167 bzero(&outvars, sizeof(outvars));
1168 outvars.outproc = outproc;
3e170ce0 1169
5ba3f43e
A
1170 if (kd_variant == KERN_DUMP_DISK) {
1171 outvars.zoutput = kdp_core_zoutput;
1172 /* Space for file header, panic log, core log */
1173 foffset = (KERN_COREDUMP_HEADERSIZE + existing_log_size + KERN_COREDUMP_MAXDEBUGLOGSIZE +
1174 KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1);
1175 kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE;
3e170ce0 1176
5ba3f43e
A
1177 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1178 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1179 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1180 sizeof(foffset), &foffset, foffset, ret);
1181 dump_succeeded = FALSE;
1182 goto exit;
1183 }
1184 } else if (kd_variant == KERN_DUMP_NET) {
1185 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
1186 outvars.zoutput = kdp_core_zoutputbuf;
1187#if CONFIG_EMBEDDED
1188 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1189 outvars.zoutput = kdp_core_zoutput;
1190 kern_dump_hw_shmem_dbg_reset();
1191#endif
1192 }
3e170ce0 1193
5ba3f43e
A
1194#if defined(__arm__) || defined(__arm64__)
1195 flush_mmu_tlb();
1196#endif
3e170ce0 1197
5ba3f43e
A
1198 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." :
1199 "Transmitting kernel state, please wait:\n");
1200
1201 if (kd_variant == KERN_DUMP_DISK) {
1202 /*
1203 * Dump co-processors as well, foffset will be overwritten with the
1204 * offset of the next location in the file to be written to.
1205 */
1206 if (kern_do_coredump(&outvars, FALSE, foffset, &foffset) != 0) {
1207 dump_succeeded = FALSE;
1208 }
1209 } else {
1210 /* Only the kernel */
1211 if (kern_do_coredump(&outvars, TRUE, foffset, &foffset) != 0) {
1212 dump_succeeded = FALSE;
1213 }
3e170ce0 1214 }
3e170ce0 1215
5ba3f43e
A
1216 if (kd_variant == KERN_DUMP_DISK) {
1217#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1218 /* Write the macOS panic stackshot on its own to a separate 'corefile' */
1219 if (panic_stackshot_buf && panic_stackshot_len) {
1220 uint64_t compressed_stackshot_len = 0;
1221
1222 /* Seek to the offset of the next 'file' (foffset provided/updated from kern_do_coredump) */
1223 if ((ret = kern_dump_seek_to_next_file(&outvars, foffset)) != kIOReturnSuccess) {
1224 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1225 dump_succeeded = FALSE;
1226 } else if ((ret = kdp_reset_output_vars(&outvars, panic_stackshot_len)) != KERN_SUCCESS) {
1227 kern_coredump_log(NULL, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1228 dump_succeeded = FALSE;
1229 } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1230 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, 0x%p) returned 0x%x\n",
1231 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1232 dump_succeeded = FALSE;
1233 } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) {
1234 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(0x%p, 0, NULL) returned 0x%x\n", &outvars, ret);
1235 dump_succeeded = FALSE;
1236 } else if ((ret = kern_dump_record_file(&outvars, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
1237 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1238 dump_succeeded = FALSE;
1239 } else {
1240 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1241 }
1242 }
1243#endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
39037602 1244
5ba3f43e
A
1245 /* Write the debug log -- first seek to the end of the corefile header */
1246 foffset = KERN_COREDUMP_HEADERSIZE;
1247 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1248 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1249 sizeof(foffset), &foffset, foffset, ret);
1250 dump_succeeded = FALSE;
1251 goto exit;
1252 }
3e170ce0 1253
5ba3f43e
A
1254 new_log_len = debug_buf_ptr - log_start;
1255 if (new_log_len > KERN_COREDUMP_MAXDEBUGLOGSIZE) {
1256 new_log_len = KERN_COREDUMP_MAXDEBUGLOGSIZE;
1257 }
3e170ce0 1258
5ba3f43e
A
1259#if CONFIG_EMBEDDED
1260 /* This data is after the panic stackshot, we need to write it separately */
1261 existing_log_size -= panic_info->eph_other_log_len;
1262#endif
3e170ce0 1263
5ba3f43e
A
1264 /*
1265 * Write out the paniclog (from the beginning of the debug
1266 * buffer until the start of the stackshot)
1267 */
1268 buf = debug_buf_base;
1269 if ((ret = (*outproc)(KDP_DATA, NULL, existing_log_size, buf)) != kIOReturnSuccess) {
1270 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1271 existing_log_size, buf, ret);
1272 dump_succeeded = FALSE;
1273 goto exit;
1274 }
3e170ce0 1275
5ba3f43e
A
1276#if CONFIG_EMBEDDED
1277 /* The next part of the log we're interested in is the beginning of the 'other' log */
1278 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1279 /* Include any data after the panic stackshot but before we started the coredump log (see above) */
1280 new_log_len += panic_info->eph_other_log_len;
1281#else /* CONFIG_EMBEDDED */
1282 buf += existing_log_size;
1283#endif /* CONFIG_EMBEDDED */
1284
1285 /* Write the coredump log */
1286 if ((ret = (*outproc)(KDP_DATA, NULL, new_log_len, buf)) != kIOReturnSuccess) {
1287 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1288 new_log_len, buf, ret);
1289 dump_succeeded = FALSE;
1290 goto exit;
1291 }
1292
1293 kdp_core_header.log_length = existing_log_size + new_log_len;
1294 kern_dump_update_header(&outvars);
3e170ce0 1295 }
3e170ce0
A
1296
1297exit:
5ba3f43e
A
1298 /* close / last packet */
1299 if (output_opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess) {
1300 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1301 dump_succeeded = FALSE;
1302 }
3e170ce0 1303
5ba3f43e
A
1304 return (dump_succeeded ? 0 : -1);
1305}
3e170ce0 1306
5ba3f43e
A
1307boolean_t
1308dumped_kernel_core()
1309{
1310 return kern_dump_successful;
3e170ce0
A
1311}
1312
1313int
39037602 1314kern_dump(enum kern_dump_type kd_variant)
3e170ce0 1315{
5ba3f43e
A
1316 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1317 int ret = -1;
1318#if KASAN
1319 kasan_disable();
1320#endif
39037602
A
1321 if (kd_variant == KERN_DUMP_DISK) {
1322 if (dumped_local) return (0);
5ba3f43e
A
1323 if (local_dump_in_progress) return (-1);
1324 local_dump_in_progress = TRUE;
1325#if CONFIG_EMBEDDED
1326 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
1327#endif
1328 ret = do_kern_dump(&kern_dump_disk_proc, KERN_DUMP_DISK);
1329 if (ret == 0) {
1330 dumped_local = TRUE;
1331 kern_dump_successful = TRUE;
1332 local_dump_in_progress = FALSE;
1333 }
1334
1335 return ret;
1336#if CONFIG_EMBEDDED
39037602 1337 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
5ba3f43e
A
1338 ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG);
1339 if (ret == 0) {
1340 kern_dump_successful = TRUE;
1341 }
1342 return ret;
39037602 1343#endif
5ba3f43e
A
1344 } else {
1345 ret = do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET);
1346 if (ret == 0) {
1347 kern_dump_successful = TRUE;
1348 }
1349 return ret;
1350 }
1351}
1352
1353#if CONFIG_EMBEDDED
1354#pragma clang diagnostic push
1355#pragma clang diagnostic ignored "-Wmissing-noreturn"
1356void
1357panic_spin_shmcon()
1358{
1359#pragma clang diagnostic pop
1360 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
1361 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1362 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
1363
1364 assert(hwsd_info != NULL);
1365 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1366 hwsd_info->xhsdci_seq_no = 0;
1367 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1368
1369 for (;;) {
1370 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1371 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
1372 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
1373 }
1374
1375 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
1376 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
1377 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1378 hwsd_info->xhsdci_seq_no = 0;
1379 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1380 }
39037602 1381 }
3e170ce0 1382}
5ba3f43e 1383#endif /* CONFIG_EMBEDDED */
3e170ce0
A
1384
1385static void *
1386kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
1387{
1388 void * result;
1389
1390 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
1391 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
1392 assert(kdp_core_zoffset <= kdp_core_zsize);
1393
1394 return (result);
1395}
1396
1397static void
1398kdp_core_zfree(void * __unused ref, void * __unused ptr) {}
1399
1400
5ba3f43e
A
1401#if CONFIG_EMBEDDED
1402#define LEVEL Z_BEST_SPEED
1403#define NETBUF 0
1404#else
3e170ce0
A
1405#define LEVEL Z_BEST_SPEED
1406#define NETBUF 1440
5ba3f43e 1407#endif
3e170ce0
A
1408
1409void
1410kdp_core_init(void)
1411{
39037602
A
1412 int wbits = 12;
1413 int memlevel = 3;
1414 kern_return_t kr;
5ba3f43e 1415#if CONFIG_EMBEDDED
39037602
A
1416 int i = 0;
1417 vm_offset_t kdp_core_hw_shmem_buf = 0;
1418 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
5ba3f43e 1419 cache_info_t *cpuid_cache_info = NULL;
39037602 1420#endif
5ba3f43e 1421 kern_coredump_callback_config core_config = { };
39037602
A
1422
1423 if (kdp_core_zs.zalloc) return;
1424 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
1425 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
1426 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
1427 assert (KERN_SUCCESS == kr);
1428
3e170ce0 1429 kdp_core_zoffset = 0;
39037602
A
1430 kdp_core_zs.zalloc = kdp_core_zalloc;
1431 kdp_core_zs.zfree = kdp_core_zfree;
1432
1433 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
1434 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
1435 /* Allocation failed */
1436 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
1437 kdp_core_zoffset = 0;
1438 }
1439
5ba3f43e
A
1440 bzero(&kdp_core_header, sizeof(kdp_core_header));
1441
1442 core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1443 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1444 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1445 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1446 core_config.kcc_coredump_save_sw_vers = kern_dump_save_sw_vers;
1447 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1448 core_config.kcc_coredump_save_misc_data = NULL;
1449
1450 kr = kern_register_xnu_coredump_helper(&core_config);
1451 assert(KERN_SUCCESS == kr);
1452
1453#if CONFIG_EMBEDDED
39037602
A
1454 if (!PE_consistent_debug_enabled()) {
1455 return;
1456 }
1457
1458 /*
1459 * We need to allocate physically contiguous memory since astris isn't capable
1460 * of doing address translations while the CPUs are running.
1461 */
1462 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
1463 kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
1464 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1465 assert(KERN_SUCCESS == kr);
1466
1467 /*
1468 * Put the connection info structure at the beginning of this buffer and adjust
1469 * the buffer size accordingly.
1470 */
1471 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
1472 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
1473 hwsd_info->xhsdci_seq_no = 0;
1474 hwsd_info->xhsdci_buf_phys_addr = 0;
1475 hwsd_info->xhsdci_buf_data_length = 0;
1476 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
1477 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
1478 hwsd_info->xhsdci_page_size = PAGE_SIZE;
1479
5ba3f43e
A
1480 cpuid_cache_info = cache_info();
1481 assert(cpuid_cache_info != NULL);
1482
39037602 1483 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
5ba3f43e
A
1484 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1485 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (uint64_t) cpuid_cache_info->c_linesz);
1486 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
1487 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
1488 /* The buffer size should be a cache-line length multiple */
1489 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
39037602
A
1490
1491 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
1492 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
1493
1494 for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
1495 cur_elm = kalloc(sizeof(*cur_elm));
1496 assert(cur_elm != NULL);
1497
1498 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
1499 cur_elm->khsd_data_length = 0;
1500
1501 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
1502
1503 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
1504 }
1505
1506 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC,
1507 &kdp_hw_shmem_dbg_contact_deadline_interval);
1508
1509 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
1510 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
5ba3f43e
A
1511#endif /* CONFIG_EMBEDDED */
1512
1513#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1514 /* Allocate space in the kernel map for the panic stackshot */
1515 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
1516 assert (KERN_SUCCESS == kr);
1517#endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
3e170ce0
A
1518}
1519
1520#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */