]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/kdp_core.c
ac71c806165f94d3dfd6e03c215b1f888a36fec6
[apple/xnu.git] / osfmk / kdp / kdp_core.c
1 /*
2 * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <kdp/processor_core.h>
45 #include <IOKit/IOPolledInterface.h>
46 #include <IOKit/IOBSD.h>
47 #include <sys/errno.h>
48 #include <sys/msgbuf.h>
49 #include <san/kasan.h>
50
51 #if defined(__x86_64__)
52 #include <i386/pmap_internal.h>
53 #include <kdp/ml/i386/kdp_x86_common.h>
54 #include <kern/debug.h>
55 #endif /* defined(__x86_64__) */
56
57 #if CONFIG_EMBEDDED
58 #include <arm/cpuid.h>
59 #include <arm/caches_internal.h>
60 #include <pexpert/arm/consistent_debug.h>
61
62 #if !defined(ROUNDUP)
63 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
64 #endif
65
66 #if !defined(ROUNDDOWN)
67 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
68 #endif
69 #endif /* CONFIG_EMBEDDED */
70
71 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
72 vm_map_offset_t end,
73 void *context);
74
75 extern int pmap_traverse_present_mappings(pmap_t pmap,
76 vm_map_offset_t start,
77 vm_map_offset_t end,
78 pmap_traverse_callback callback,
79 void *context);
80
81 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
82 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
83 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
84 static int kern_dump_save_sw_vers(void *refcon, core_save_sw_vers_cb callback, void *context);
85 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
86
87 static int
88 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
89 vm_map_offset_t end,
90 void *context);
91 static int
92 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
93 vm_map_offset_t end,
94 void *context);
95
96 static int
97 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
98 vm_map_offset_t end,
99 void *context);
100
101 struct kdp_core_out_vars;
102 typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
103 uint64_t length, void *panic_data);
104
105 struct kdp_core_out_vars
106 {
107 kern_dump_output_proc outproc;
108 z_output_func zoutput;
109 size_t zipped;
110 uint64_t totalbytes;
111 uint64_t lastpercent;
112 IOReturn error;
113 unsigned outremain;
114 unsigned outlen;
115 unsigned writes;
116 Bytef * outbuf;
117 };
118
119 extern uint32_t kdp_crashdump_pkt_size;
120
121 static vm_offset_t kdp_core_zmem;
122 static size_t kdp_core_zsize;
123 static size_t kdp_core_zoffset;
124 static z_stream kdp_core_zs;
125
126 static uint64_t kdp_core_total_size;
127 static uint64_t kdp_core_total_size_sent_uncomp;
128 #if CONFIG_EMBEDDED
129 struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
130
131 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
132 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
133
134 /*
135 * Astris can read up to 4064 bytes at a time over
136 * the probe, so we should try to make our buffer
137 * size a multiple of this to make reads by astris
138 * (the bottleneck) most efficient.
139 */
140 #define OPTIMAL_ASTRIS_READSIZE 4064
141
142 struct kdp_hw_shmem_dbg_buf_elm {
143 vm_offset_t khsd_buf;
144 uint32_t khsd_data_length;
145 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
146 };
147
148 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
149 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
150 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
151 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
152
153 static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL;
154 static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL;
155
156 static uint32_t kdp_hw_shmem_dbg_bufsize = 0;
157
158 static uint32_t kdp_hw_shmem_dbg_seq_no = 0;
159 static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0;
160 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0;
161
162 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
163 #endif /* CONFIG_EMBEDDED */
164
165 static boolean_t kern_dump_successful = FALSE;
166
167 struct mach_core_fileheader kdp_core_header = { };
168
169 /*
170 * These variables will be modified by the BSD layer if the root device is
171 * a RAMDisk.
172 */
173 uint64_t kdp_core_ramdisk_addr = 0;
174 uint64_t kdp_core_ramdisk_size = 0;
175
176 boolean_t kdp_has_polled_corefile(void)
177 {
178 return (NULL != gIOPolledCoreFileVars);
179 }
180
181 #if CONFIG_EMBEDDED
182 /*
183 * Whenever we start a coredump, make sure the buffers
184 * are all on the free queue and the state is as expected.
185 * The buffers may have been left in a different state if
186 * a previous coredump attempt failed.
187 */
188 static void
189 kern_dump_hw_shmem_dbg_reset()
190 {
191 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
192
193 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
194 cur_elm->khsd_data_length = 0;
195 }
196
197 if (currently_filling_buf != NULL) {
198 currently_filling_buf->khsd_data_length = 0;
199
200 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
201 currently_filling_buf = NULL;
202 }
203
204 if (currently_flushing_buf != NULL) {
205 currently_flushing_buf->khsd_data_length = 0;
206
207 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
208 currently_flushing_buf = NULL;
209 }
210
211 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
212 cur_elm->khsd_data_length = 0;
213
214 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
215 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
216 }
217
218 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
219 kdp_hw_shmem_dbg_seq_no = 0;
220 hwsd_info->xhsdci_buf_phys_addr = 0;
221 hwsd_info->xhsdci_buf_data_length = 0;
222 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
223 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
224 hwsd_info->xhsdci_page_size = PAGE_SIZE;
225 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
226
227 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
228 }
229
230 /*
231 * Tries to move buffers forward in 'progress'. If
232 * the hardware debugger is done consuming the current buffer, we
233 * can put the next one on it and move the current
234 * buffer back to the free queue.
235 */
236 static int
237 kern_dump_hw_shmem_dbg_process_buffers()
238 {
239 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
240 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
241 kern_coredump_log(NULL, "Detected remote error, terminating...\n");
242 return -1;
243 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
244 if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
245 kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
246 (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
247 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
248 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
249 return -1;
250 }
251
252 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
253
254 if (currently_flushing_buf != NULL) {
255 currently_flushing_buf->khsd_data_length = 0;
256 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
257 }
258
259 currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
260 if (currently_flushing_buf != NULL) {
261 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
262
263 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
264 hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
265 hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
266 hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
267 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
268 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
269 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
270 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
271 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
272 }
273
274 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
275 kdp_hw_shmem_dbg_contact_deadline_interval;
276
277 return 0;
278 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
279 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
280 kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
281
282 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
283 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
284 return -1;
285 }
286
287 return 0;
288 }
289
290 /*
291 * Populates currently_filling_buf with a new buffer
292 * once one becomes available. Returns 0 on success
293 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
294 * if it is non-zero (an error).
295 */
296 static int
297 kern_dump_hw_shmem_dbg_get_buffer()
298 {
299 int ret = 0;
300
301 assert(currently_filling_buf == NULL);
302
303 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
304 ret = kern_dump_hw_shmem_dbg_process_buffers();
305 if (ret) {
306 return ret;
307 }
308 }
309
310 currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
311 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
312
313 assert(currently_filling_buf->khsd_data_length == 0);
314 return ret;
315 }
316
317 /*
318 * Output procedure for hardware shared memory core dumps
319 *
320 * Tries to fill up the buffer completely before flushing
321 */
322 static int
323 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
324 uint64_t length, void * data)
325 {
326 int ret = 0;
327
328 assert(length < UINT32_MAX);
329 uint32_t bytes_remaining = (uint32_t) length;
330 uint32_t bytes_to_copy;
331
332 if (request == KDP_EOF) {
333 assert(currently_filling_buf == NULL);
334
335 /*
336 * Wait until we've flushed all the buffers
337 * before setting the connection status to done.
338 */
339 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
340 currently_flushing_buf != NULL) {
341 ret = kern_dump_hw_shmem_dbg_process_buffers();
342 if (ret) {
343 return ret;
344 }
345 }
346
347 /*
348 * If the last status we saw indicates that the buffer was
349 * empty and we didn't flush any new data since then, we expect
350 * the sequence number to still match the last we saw.
351 */
352 if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
353 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
354 kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
355 return -1;
356 }
357
358 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
359
360 kern_coredump_log(NULL, "Setting coredump status as done!\n");
361 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
362 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
363 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
364
365 return ret;
366 }
367
368 assert(request == KDP_DATA);
369
370 /*
371 * The output procedure is called with length == 0 and data == NULL
372 * to flush any remaining output at the end of the coredump before
373 * we call it a final time to mark the dump as done.
374 */
375 if (length == 0) {
376 assert(data == NULL);
377
378 if (currently_filling_buf != NULL) {
379 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
380 currently_filling_buf = NULL;
381 }
382
383 /*
384 * Move the current buffer along if possible.
385 */
386 ret = kern_dump_hw_shmem_dbg_process_buffers();
387 return ret;
388 }
389
390 while (bytes_remaining != 0) {
391 /*
392 * Make sure we have a buffer to work with.
393 */
394 while (currently_filling_buf == NULL) {
395 ret = kern_dump_hw_shmem_dbg_get_buffer();
396 if (ret) {
397 return ret;
398 }
399 }
400
401 assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
402 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
403 currently_filling_buf->khsd_data_length);
404 bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
405 bytes_to_copy);
406
407 currently_filling_buf->khsd_data_length += bytes_to_copy;
408
409 if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
410 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
411 currently_filling_buf = NULL;
412
413 /*
414 * Move it along if possible.
415 */
416 ret = kern_dump_hw_shmem_dbg_process_buffers();
417 if (ret) {
418 return ret;
419 }
420 }
421
422 bytes_remaining -= bytes_to_copy;
423 data = (void *) ((uintptr_t)data + bytes_to_copy);
424 }
425
426 return ret;
427 }
428 #endif /* CONFIG_EMBEDDED */
429
430 static IOReturn
431 kern_dump_disk_proc(unsigned int request, __unused char *corename,
432 uint64_t length, void * data)
433 {
434 uint64_t noffset;
435 uint32_t err = kIOReturnSuccess;
436
437 switch (request)
438 {
439 case KDP_WRQ:
440 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
441 if (kIOReturnSuccess != err) {
442 kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err);
443 break;
444 }
445 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
446 break;
447
448 case KDP_SEEK:
449 noffset = *((uint64_t *) data);
450 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
451 if (kIOReturnSuccess != err) {
452 kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err);
453 break;
454 }
455 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
456 if (kIOReturnSuccess != err) {
457 kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err);
458 }
459 break;
460
461 case KDP_DATA:
462 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
463 if (kIOReturnSuccess != err) {
464 kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, 0x%p, 0x%llx, NULL) returned 0x%x\n",
465 data, length, err);
466 break;
467 }
468 break;
469
470 #if CONFIG_EMBEDDED
471 /* Only supported on embedded by the underlying polled mode driver */
472 case KDP_FLUSH:
473 err = IOPolledFileFlush(gIOPolledCoreFileVars);
474 if (kIOReturnSuccess != err) {
475 kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err);
476 break;
477 }
478 break;
479 #endif
480
481 case KDP_EOF:
482 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
483 if (kIOReturnSuccess != err) {
484 kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err);
485 break;
486 }
487 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
488 if (kIOReturnSuccess != err) {
489 kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err);
490 break;
491 }
492 break;
493 }
494
495 return (err);
496 }
497
498 /*
499 * flushes any data to the output proc immediately
500 */
501 static int
502 kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
503 {
504 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
505 IOReturn ret;
506
507 vars->zipped += len;
508
509 if (vars->error >= 0)
510 {
511 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess)
512 {
513 kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
514 len, buf, ret);
515 vars->error = ret;
516 }
517 if (!buf && !len) kern_coredump_log(NULL, "100..");
518 }
519 return (len);
520 }
521
522 /*
523 * tries to fill the buffer with data before flushing it via the output proc.
524 */
525 static int
526 kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
527 {
528 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
529 unsigned remain;
530 IOReturn ret;
531 unsigned chunk;
532 boolean_t flush;
533
534 remain = inlen;
535 vars->zipped += inlen;
536 flush = (!inbuf && !inlen);
537
538 while ((vars->error >= 0) && (remain || flush))
539 {
540 chunk = vars->outremain;
541 if (chunk > remain) chunk = remain;
542 if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk);
543 else
544 {
545 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
546 inbuf += chunk;
547 }
548 vars->outremain -= chunk;
549 remain -= chunk;
550
551 if (vars->outremain && !flush) break;
552 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
553 vars->outlen - vars->outremain,
554 vars->outbuf)) != kIOReturnSuccess)
555 {
556 kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
557 (vars->outlen - vars->outremain), vars->outbuf, ret);
558 vars->error = ret;
559 }
560 if (flush)
561 {
562 kern_coredump_log(NULL, "100..");
563 flush = false;
564 }
565 vars->outremain = vars->outlen;
566 }
567 return (inlen);
568 }
569
570 static int
571 kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
572 {
573 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
574 uint64_t percent, total_in = 0;
575 unsigned len;
576
577 len = strm->avail_in;
578 if (len > size) len = size;
579 if (len == 0) return 0;
580
581 if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len);
582 else bzero(buf, len);
583 strm->adler = z_crc32(strm->adler, buf, len);
584
585 strm->avail_in -= len;
586 strm->next_in += len;
587 strm->total_in += len;
588
589 if (0 == (511 & vars->writes++))
590 {
591 total_in = strm->total_in;
592 kdp_core_total_size_sent_uncomp = strm->total_in;
593
594 percent = (total_in * 100) / vars->totalbytes;
595 if ((percent - vars->lastpercent) >= 10)
596 {
597 vars->lastpercent = percent;
598 kern_coredump_log(NULL, "%lld..\n", percent);
599 }
600 }
601
602 return (int)len;
603 }
604
605 static IOReturn
606 kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data)
607 {
608 z_stream * zs;
609 int zr;
610 boolean_t flush;
611
612 zs = &kdp_core_zs;
613
614 if (kdp_corezip_disabled)
615 {
616 (*vars->zoutput)(zs, data, length);
617 }
618 else
619 {
620
621 flush = (!length && !data);
622 zr = Z_OK;
623
624 assert(!zs->avail_in);
625
626 while (vars->error >= 0)
627 {
628 if (!zs->avail_in && !flush)
629 {
630 if (!length) break;
631 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
632 zs->avail_in = length;
633 length = 0;
634 }
635 if (!zs->avail_out)
636 {
637 zs->next_out = (Bytef *) zs;
638 zs->avail_out = UINT32_MAX;
639 }
640 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
641 if (Z_STREAM_END == zr) break;
642 if (zr != Z_OK)
643 {
644 kern_coredump_log(NULL, "ZERR %d\n", zr);
645 vars->error = zr;
646 }
647 }
648
649 if (flush) (*vars->zoutput)(zs, NULL, 0);
650 }
651
652 return (vars->error);
653 }
654
655 kern_return_t
656 kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data)
657 {
658 IOReturn err;
659 unsigned int chunk;
660 enum { kMaxZLibChunk = 1024*1024*1024 };
661 struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars;
662
663 do
664 {
665 if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length;
666 else chunk = kMaxZLibChunk;
667 err = kdp_core_stream_output_chunk(vars, chunk, data);
668
669 length -= chunk;
670 if (data) data = (void *) (((uintptr_t) data) + chunk);
671 }
672 while (length && (kIOReturnSuccess == err));
673
674 return (err);
675 }
676
677 #if defined(__arm__) || defined(__arm64__)
678 extern pmap_paddr_t avail_start, avail_end;
679 extern struct vm_object pmap_object_store;
680 #endif
681 extern vm_offset_t c_buffers;
682 extern vm_size_t c_buffers_size;
683
684 ppnum_t
685 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
686 {
687 ppnum_t ppn = 0;
688 uint64_t vincr = PAGE_SIZE_64;
689
690 assert(!(vaddr & PAGE_MASK_64));
691
692 /* VA ranges to exclude */
693 if (vaddr == c_buffers)
694 {
695 /* compressor data */
696 ppn = 0;
697 vincr = c_buffers_size;
698 }
699 else if (vaddr == kdp_core_zmem)
700 {
701 /* zlib working memory */
702 ppn = 0;
703 vincr = kdp_core_zsize;
704 }
705 else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr))
706 {
707 ppn = 0;
708 vincr = kdp_core_ramdisk_size;
709 }
710 else
711 #if defined(__arm64__)
712 if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS)
713 {
714 /* not readable */
715 ppn = 0;
716 vincr = _COMM_PAGE_AREA_LENGTH;
717 }
718 else
719 #endif /* defined(__arm64__) */
720 #if defined(__arm__) || defined(__arm64__)
721 if (vaddr == phystokv(avail_start))
722 {
723 /* physical memory map */
724 ppn = 0;
725 vincr = (avail_end - avail_start);
726 }
727 else
728 #endif /* defined(__arm__) || defined(__arm64__) */
729 ppn = pmap_find_phys(kernel_pmap, vaddr);
730
731 *pvincr = round_page_64(vincr);
732
733 if (ppn && pvphysaddr)
734 {
735 uint64_t phys = ptoa_64(ppn);
736 #if defined(__arm__) || defined(__arm64__)
737 if (isphysmem(phys)) *pvphysaddr = phystokv(phys);
738 #else
739 if (physmap_enclosed(phys)) *pvphysaddr = (uintptr_t)PHYSMAP_PTOV(phys);
740 #endif
741 else ppn = 0;
742 }
743
744 return (ppn);
745 }
746
747 int
748 pmap_traverse_present_mappings(pmap_t __unused pmap,
749 vm_map_offset_t start,
750 vm_map_offset_t end,
751 pmap_traverse_callback callback,
752 void *context)
753 {
754 IOReturn ret;
755 vm_map_offset_t vcurstart, vcur;
756 uint64_t vincr = 0;
757 vm_map_offset_t debug_start;
758 vm_map_offset_t debug_end;
759 boolean_t lastvavalid;
760 #if defined(__arm__) || defined(__arm64__)
761 vm_page_t m = VM_PAGE_NULL;
762 #endif
763
764 debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
765 debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
766
767 #if defined(__x86_64__)
768 assert(!is_ept_pmap(pmap));
769 #endif
770
771 /* Assumes pmap is locked, or being called from the kernel debugger */
772
773 if (start > end) return (KERN_INVALID_ARGUMENT);
774
775 ret = KERN_SUCCESS;
776 lastvavalid = FALSE;
777 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
778 ppnum_t ppn = 0;
779
780 #if defined(__arm__) || defined(__arm64__)
781 /* We're at the start of the physmap, so pull out the pagetable pages that
782 * are accessed through that region.*/
783 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store))
784 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
785
786 if (m != VM_PAGE_NULL)
787 {
788 vm_map_offset_t vprev = vcur;
789 ppn = (ppnum_t)atop(avail_end);
790 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m))
791 {
792 /* Ignore pages that come from the static region and have already been dumped.*/
793 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start))
794 {
795 ppn = VM_PAGE_GET_PHYS_PAGE(m);
796 break;
797 }
798 m = (vm_page_t)vm_page_queue_next(&m->listq);
799 }
800 vcur = phystokv(ptoa(ppn));
801 if (vcur != vprev)
802 {
803 ret = callback(vcurstart, vprev, context);
804 lastvavalid = FALSE;
805 }
806 vincr = PAGE_SIZE_64;
807 if (ppn == atop(avail_end))
808 {
809 vm_object_unlock(&pmap_object_store);
810 m = VM_PAGE_NULL;
811 }
812 else
813 m = (vm_page_t)vm_page_queue_next(&m->listq);
814 }
815 if (m == VM_PAGE_NULL)
816 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
817 #else /* defined(__arm__) || defined(__arm64__) */
818 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
819 #endif
820 if (ppn != 0)
821 {
822 if (((vcur < debug_start) || (vcur >= debug_end))
823 && !(EFI_VALID_PAGE(ppn) ||
824 pmap_valid_page(ppn)))
825 {
826 /* not something we want */
827 ppn = 0;
828 }
829 }
830
831 if (ppn != 0) {
832 if (!lastvavalid) {
833 /* Start of a new virtual region */
834 vcurstart = vcur;
835 lastvavalid = TRUE;
836 }
837 } else {
838 if (lastvavalid) {
839 /* end of a virtual region */
840 ret = callback(vcurstart, vcur, context);
841 lastvavalid = FALSE;
842 }
843
844 #if defined(__x86_64__)
845 /* Try to skip by 2MB if possible */
846 if (((vcur & PDMASK) == 0) && cpu_64bit) {
847 pd_entry_t *pde;
848 pde = pmap_pde(pmap, vcur);
849 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
850 /* Make sure we wouldn't overflow */
851 if (vcur < (end - NBPD)) {
852 vincr = NBPD;
853 }
854 }
855 }
856 #endif /* defined(__x86_64__) */
857 }
858 vcur += vincr;
859 }
860
861 if ((ret == KERN_SUCCESS) && lastvavalid) {
862 /* send previous run */
863 ret = callback(vcurstart, vcur, context);
864 }
865 return (ret);
866 }
867
868 struct kern_dump_preflight_context
869 {
870 uint32_t region_count;
871 uint64_t dumpable_bytes;
872 };
873
874 int
875 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
876 vm_map_offset_t end,
877 void *context)
878 {
879 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
880 IOReturn ret = KERN_SUCCESS;
881
882 kdc->region_count++;
883 kdc->dumpable_bytes += (end - start);
884
885 return (ret);
886 }
887
888
889 struct kern_dump_send_seg_desc_context
890 {
891 core_save_segment_descriptions_cb callback;
892 void *context;
893 };
894
895 int
896 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
897 vm_map_offset_t end,
898 void *context)
899 {
900 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
901 uint64_t seg_start = (uint64_t) start;
902 uint64_t seg_end = (uint64_t) end;
903
904 return kds_context->callback(seg_start, seg_end, kds_context->context);
905 }
906
907 struct kern_dump_send_segdata_context
908 {
909 core_save_segment_data_cb callback;
910 void *context;
911 };
912
913 int
914 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
915 vm_map_offset_t end,
916 void *context)
917 {
918 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
919
920 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
921 }
922
923 static int
924 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
925 {
926 struct kern_dump_preflight_context kdc_preflight = { };
927 uint64_t thread_state_size = 0, thread_count = 0;
928 kern_return_t ret;
929
930 ret = pmap_traverse_present_mappings(kernel_pmap,
931 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
932 VM_MAX_KERNEL_ADDRESS,
933 kern_dump_pmap_traverse_preflight_callback,
934 &kdc_preflight);
935 if (ret != KERN_SUCCESS) {
936 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
937 return ret;
938 }
939
940 kern_collectth_state_size(&thread_count, &thread_state_size);
941
942 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
943 thread_count, thread_state_size, 0, context);
944 return ret;
945 }
946
947 static int
948 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
949 {
950 kern_return_t ret;
951 struct kern_dump_send_seg_desc_context kds_context;
952
953 kds_context.callback = callback;
954 kds_context.context = context;
955
956 ret = pmap_traverse_present_mappings(kernel_pmap,
957 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
958 VM_MAX_KERNEL_ADDRESS,
959 kern_dump_pmap_traverse_send_segdesc_callback,
960 &kds_context);
961 if (ret != KERN_SUCCESS) {
962 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
963 return ret;
964 }
965
966 return KERN_SUCCESS;
967 }
968
969 static int
970 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
971 {
972 kern_return_t ret;
973 uint64_t thread_state_size = 0, thread_count = 0;
974
975 kern_collectth_state_size(&thread_count, &thread_state_size);
976
977 if (thread_state_size > 0) {
978 void * iter = NULL;
979 do {
980 kern_collectth_state (current_thread(), buf, thread_state_size, &iter);
981
982 ret = callback(buf, context);
983 if (ret != KERN_SUCCESS) {
984 return ret;
985 }
986 } while (iter);
987 }
988
989 return KERN_SUCCESS;
990 }
991
992 static int
993 kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, void *context)
994 {
995 return callback(&kdp_kernelversion_string, sizeof(kdp_kernelversion_string), context);
996 }
997
998 static int
999 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
1000 {
1001 kern_return_t ret;
1002 struct kern_dump_send_segdata_context kds_context;
1003
1004 kds_context.callback = callback;
1005 kds_context.context = context;
1006
1007 ret = pmap_traverse_present_mappings(kernel_pmap,
1008 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1009 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
1010 if (ret != KERN_SUCCESS) {
1011 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
1012 return ret;
1013 }
1014
1015 return KERN_SUCCESS;
1016 }
1017
1018 kern_return_t
1019 kdp_reset_output_vars(void *kdp_core_out_vars, uint64_t totalbytes)
1020 {
1021 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1022
1023 /* Re-initialize kdp_outvars */
1024 outvars->zipped = 0;
1025 outvars->totalbytes = totalbytes;
1026 outvars->lastpercent = 0;
1027 outvars->error = kIOReturnSuccess;
1028 outvars->outremain = 0;
1029 outvars->outlen = 0;
1030 outvars->writes = 0;
1031 outvars->outbuf = NULL;
1032
1033 if (outvars->outproc == &kdp_send_crashdump_data) {
1034 /* KERN_DUMP_NET */
1035 outvars->outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
1036 outvars->outremain = outvars->outlen = kdp_crashdump_pkt_size;
1037 }
1038
1039 kdp_core_total_size = totalbytes;
1040
1041 /* Re-initialize zstream variables */
1042 kdp_core_zs.avail_in = 0;
1043 kdp_core_zs.next_in = NULL;
1044 kdp_core_zs.avail_out = 0;
1045 kdp_core_zs.next_out = NULL;
1046 kdp_core_zs.opaque = outvars;
1047
1048 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars->zoutput);
1049
1050 return KERN_SUCCESS;
1051 }
1052
1053 static int
1054 kern_dump_update_header(struct kdp_core_out_vars *outvars)
1055 {
1056 uint64_t foffset;
1057 int ret;
1058
1059 /* Write the file header -- first seek to the beginning of the file */
1060 foffset = 0;
1061 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1062 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1063 sizeof(foffset), &foffset, foffset, ret);
1064 return ret;
1065 }
1066
1067 if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) {
1068 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1069 sizeof(kdp_core_header), &kdp_core_header, ret);
1070 return ret;
1071 }
1072
1073 if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) {
1074 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
1075 return ret;
1076 }
1077
1078 #if CONFIG_EMBEDDED
1079 if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) {
1080 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
1081 return ret;
1082 }
1083 #endif
1084
1085 return KERN_SUCCESS;
1086 }
1087
1088 int
1089 kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
1090 {
1091 int ret = 0;
1092 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1093
1094 assert(kdp_core_header.num_files < KERN_COREDUMP_MAX_CORES);
1095 assert(out_file_length != NULL);
1096 *out_file_length = 0;
1097
1098 kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset;
1099 kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped;
1100 strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename,
1101 MACH_CORE_FILEHEADER_NAMELEN);
1102 kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
1103 kdp_core_header.num_files++;
1104 kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE;
1105
1106 ret = kern_dump_update_header(outvars);
1107 if (ret == KERN_SUCCESS) {
1108 *out_file_length = outvars->zipped;
1109 }
1110
1111 return ret;
1112 }
1113
1114 int
1115 kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset)
1116 {
1117 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1118 int ret;
1119
1120 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) {
1121 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1122 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
1123 }
1124
1125 return ret;
1126 }
1127
1128 static int
1129 do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant)
1130 {
1131 struct kdp_core_out_vars outvars = { };
1132
1133 char *log_start = NULL, *buf = NULL;
1134 size_t existing_log_size = 0, new_log_len = 0;
1135 uint64_t foffset = 0;
1136 int ret = 0;
1137 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
1138
1139 /*
1140 * Record the initial panic log buffer length so we can dump the coredump log
1141 * and panic log to disk
1142 */
1143 log_start = debug_buf_ptr;
1144 #if CONFIG_EMBEDDED
1145 assert(panic_info->eph_other_log_offset != 0);
1146 assert(panic_info->eph_panic_log_len != 0);
1147 /* Include any data from before the panic log as well */
1148 existing_log_size = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1149 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1150 #else /* CONFIG_EMBEDDED */
1151 if (panic_info->mph_panic_log_offset != 0) {
1152 existing_log_size = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1153 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
1154 }
1155 #endif /* CONFIG_EMBEDDED */
1156
1157 assert (existing_log_size <= debug_buf_size);
1158
1159 if (kd_variant == KERN_DUMP_DISK) {
1160 /* Open the file for output */
1161 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, NULL)) != kIOReturnSuccess) {
1162 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1163 dump_succeeded = FALSE;
1164 goto exit;
1165 }
1166 }
1167 output_opened = true;
1168
1169 /* Initialize gzip, output context */
1170 bzero(&outvars, sizeof(outvars));
1171 outvars.outproc = outproc;
1172
1173 if (kd_variant == KERN_DUMP_DISK) {
1174 outvars.zoutput = kdp_core_zoutput;
1175 /* Space for file header, panic log, core log */
1176 foffset = (KERN_COREDUMP_HEADERSIZE + existing_log_size + KERN_COREDUMP_MAXDEBUGLOGSIZE +
1177 KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1);
1178 kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE;
1179
1180 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1181 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1182 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1183 sizeof(foffset), &foffset, foffset, ret);
1184 dump_succeeded = FALSE;
1185 goto exit;
1186 }
1187 } else if (kd_variant == KERN_DUMP_NET) {
1188 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
1189 outvars.zoutput = kdp_core_zoutputbuf;
1190 #if CONFIG_EMBEDDED
1191 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1192 outvars.zoutput = kdp_core_zoutput;
1193 kern_dump_hw_shmem_dbg_reset();
1194 #endif
1195 }
1196
1197 #if defined(__arm__) || defined(__arm64__)
1198 flush_mmu_tlb();
1199 #endif
1200
1201 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." :
1202 "Transmitting kernel state, please wait:\n");
1203
1204 if (kd_variant == KERN_DUMP_DISK) {
1205 /*
1206 * Dump co-processors as well, foffset will be overwritten with the
1207 * offset of the next location in the file to be written to.
1208 */
1209 if (kern_do_coredump(&outvars, FALSE, foffset, &foffset) != 0) {
1210 dump_succeeded = FALSE;
1211 }
1212 } else {
1213 /* Only the kernel */
1214 if (kern_do_coredump(&outvars, TRUE, foffset, &foffset) != 0) {
1215 dump_succeeded = FALSE;
1216 }
1217 }
1218
1219 if (kd_variant == KERN_DUMP_DISK) {
1220 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1221 /* Write the macOS panic stackshot on its own to a separate 'corefile' */
1222 if (panic_stackshot_buf && panic_stackshot_len) {
1223 uint64_t compressed_stackshot_len = 0;
1224
1225 /* Seek to the offset of the next 'file' (foffset provided/updated from kern_do_coredump) */
1226 if ((ret = kern_dump_seek_to_next_file(&outvars, foffset)) != kIOReturnSuccess) {
1227 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1228 dump_succeeded = FALSE;
1229 } else if ((ret = kdp_reset_output_vars(&outvars, panic_stackshot_len)) != KERN_SUCCESS) {
1230 kern_coredump_log(NULL, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1231 dump_succeeded = FALSE;
1232 } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1233 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, 0x%p) returned 0x%x\n",
1234 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1235 dump_succeeded = FALSE;
1236 } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) {
1237 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(0x%p, 0, NULL) returned 0x%x\n", &outvars, ret);
1238 dump_succeeded = FALSE;
1239 } else if ((ret = kern_dump_record_file(&outvars, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
1240 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1241 dump_succeeded = FALSE;
1242 } else {
1243 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1244 }
1245 }
1246 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1247
1248 /* Write the debug log -- first seek to the end of the corefile header */
1249 foffset = KERN_COREDUMP_HEADERSIZE;
1250 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1251 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1252 sizeof(foffset), &foffset, foffset, ret);
1253 dump_succeeded = FALSE;
1254 goto exit;
1255 }
1256
1257 new_log_len = debug_buf_ptr - log_start;
1258 if (new_log_len > KERN_COREDUMP_MAXDEBUGLOGSIZE) {
1259 new_log_len = KERN_COREDUMP_MAXDEBUGLOGSIZE;
1260 }
1261
1262 /* This data is after the panic stackshot, we need to write it separately */
1263 #if CONFIG_EMBEDDED
1264 existing_log_size -= panic_info->eph_other_log_len;
1265 #else
1266 if (existing_log_size) {
1267 existing_log_size -= panic_info->mph_other_log_len;
1268 }
1269 #endif
1270
1271 /*
1272 * Write out the paniclog (from the beginning of the debug
1273 * buffer until the start of the stackshot)
1274 */
1275 buf = debug_buf_base;
1276 if ((ret = (*outproc)(KDP_DATA, NULL, existing_log_size, buf)) != kIOReturnSuccess) {
1277 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1278 existing_log_size, buf, ret);
1279 dump_succeeded = FALSE;
1280 goto exit;
1281 }
1282
1283 /*
1284 * The next part of the log we're interested in is the beginning of the 'other' log.
1285 * Include any data after the panic stackshot but before we started the coredump log
1286 * (see above)
1287 */
1288 #if CONFIG_EMBEDDED
1289 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1290 new_log_len += panic_info->eph_other_log_len;
1291 #else /* CONFIG_EMBEDDED */
1292 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1293 new_log_len += panic_info->mph_other_log_len;
1294 #endif /* CONFIG_EMBEDDED */
1295
1296 /* Write the coredump log */
1297 if ((ret = (*outproc)(KDP_DATA, NULL, new_log_len, buf)) != kIOReturnSuccess) {
1298 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1299 new_log_len, buf, ret);
1300 dump_succeeded = FALSE;
1301 goto exit;
1302 }
1303
1304 kdp_core_header.log_length = existing_log_size + new_log_len;
1305 kern_dump_update_header(&outvars);
1306 }
1307
1308 exit:
1309 /* close / last packet */
1310 if (output_opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess) {
1311 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1312 dump_succeeded = FALSE;
1313 }
1314
1315 /* If applicable, update the panic header and flush it so we update the CRC */
1316 #if CONFIG_EMBEDDED
1317 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1318 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1319 paniclog_flush();
1320 #else
1321 if (panic_info->mph_panic_log_offset != 0) {
1322 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1323 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1324 paniclog_flush();
1325 }
1326 #endif
1327
1328 return (dump_succeeded ? 0 : -1);
1329 }
1330
1331 boolean_t
1332 dumped_kernel_core()
1333 {
1334 return kern_dump_successful;
1335 }
1336
1337 int
1338 kern_dump(enum kern_dump_type kd_variant)
1339 {
1340 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1341 int ret = -1;
1342 #if KASAN
1343 kasan_disable();
1344 #endif
1345 if (kd_variant == KERN_DUMP_DISK) {
1346 if (dumped_local) return (0);
1347 if (local_dump_in_progress) return (-1);
1348 local_dump_in_progress = TRUE;
1349 #if CONFIG_EMBEDDED
1350 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
1351 #endif
1352 ret = do_kern_dump(&kern_dump_disk_proc, KERN_DUMP_DISK);
1353 if (ret == 0) {
1354 dumped_local = TRUE;
1355 kern_dump_successful = TRUE;
1356 local_dump_in_progress = FALSE;
1357 }
1358
1359 return ret;
1360 #if CONFIG_EMBEDDED
1361 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1362 ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG);
1363 if (ret == 0) {
1364 kern_dump_successful = TRUE;
1365 }
1366 return ret;
1367 #endif
1368 } else {
1369 ret = do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET);
1370 if (ret == 0) {
1371 kern_dump_successful = TRUE;
1372 }
1373 return ret;
1374 }
1375 }
1376
1377 #if CONFIG_EMBEDDED
1378 #pragma clang diagnostic push
1379 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1380 void
1381 panic_spin_shmcon()
1382 {
1383 #pragma clang diagnostic pop
1384 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
1385 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1386 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
1387
1388 assert(hwsd_info != NULL);
1389 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1390 hwsd_info->xhsdci_seq_no = 0;
1391 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1392
1393 for (;;) {
1394 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1395 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
1396 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
1397 }
1398
1399 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
1400 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
1401 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1402 hwsd_info->xhsdci_seq_no = 0;
1403 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1404 }
1405 }
1406 }
1407 #endif /* CONFIG_EMBEDDED */
1408
1409 static void *
1410 kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
1411 {
1412 void * result;
1413
1414 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
1415 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
1416 assert(kdp_core_zoffset <= kdp_core_zsize);
1417
1418 return (result);
1419 }
1420
1421 static void
1422 kdp_core_zfree(void * __unused ref, void * __unused ptr) {}
1423
1424
1425 #if CONFIG_EMBEDDED
1426 #define LEVEL Z_BEST_SPEED
1427 #define NETBUF 0
1428 #else
1429 #define LEVEL Z_BEST_SPEED
1430 #define NETBUF 1440
1431 #endif
1432
1433 void
1434 kdp_core_init(void)
1435 {
1436 int wbits = 12;
1437 int memlevel = 3;
1438 kern_return_t kr;
1439 #if CONFIG_EMBEDDED
1440 int i = 0;
1441 vm_offset_t kdp_core_hw_shmem_buf = 0;
1442 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
1443 cache_info_t *cpuid_cache_info = NULL;
1444 #endif
1445 kern_coredump_callback_config core_config = { };
1446
1447 if (kdp_core_zs.zalloc) return;
1448 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
1449 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
1450 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
1451 assert (KERN_SUCCESS == kr);
1452
1453 kdp_core_zoffset = 0;
1454 kdp_core_zs.zalloc = kdp_core_zalloc;
1455 kdp_core_zs.zfree = kdp_core_zfree;
1456
1457 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
1458 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
1459 /* Allocation failed */
1460 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
1461 kdp_core_zoffset = 0;
1462 }
1463
1464 bzero(&kdp_core_header, sizeof(kdp_core_header));
1465
1466 core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1467 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1468 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1469 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1470 core_config.kcc_coredump_save_sw_vers = kern_dump_save_sw_vers;
1471 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1472 core_config.kcc_coredump_save_misc_data = NULL;
1473
1474 kr = kern_register_xnu_coredump_helper(&core_config);
1475 assert(KERN_SUCCESS == kr);
1476
1477 #if CONFIG_EMBEDDED
1478 if (!PE_consistent_debug_enabled()) {
1479 return;
1480 }
1481
1482 /*
1483 * We need to allocate physically contiguous memory since astris isn't capable
1484 * of doing address translations while the CPUs are running.
1485 */
1486 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
1487 kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
1488 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1489 assert(KERN_SUCCESS == kr);
1490
1491 /*
1492 * Put the connection info structure at the beginning of this buffer and adjust
1493 * the buffer size accordingly.
1494 */
1495 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
1496 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
1497 hwsd_info->xhsdci_seq_no = 0;
1498 hwsd_info->xhsdci_buf_phys_addr = 0;
1499 hwsd_info->xhsdci_buf_data_length = 0;
1500 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
1501 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
1502 hwsd_info->xhsdci_page_size = PAGE_SIZE;
1503
1504 cpuid_cache_info = cache_info();
1505 assert(cpuid_cache_info != NULL);
1506
1507 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
1508 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1509 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (uint64_t) cpuid_cache_info->c_linesz);
1510 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
1511 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
1512 /* The buffer size should be a cache-line length multiple */
1513 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
1514
1515 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
1516 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
1517
1518 for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
1519 cur_elm = kalloc(sizeof(*cur_elm));
1520 assert(cur_elm != NULL);
1521
1522 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
1523 cur_elm->khsd_data_length = 0;
1524
1525 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
1526
1527 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
1528 }
1529
1530 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC,
1531 &kdp_hw_shmem_dbg_contact_deadline_interval);
1532
1533 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
1534 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
1535 #endif /* CONFIG_EMBEDDED */
1536
1537 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1538 /* Allocate space in the kernel map for the panic stackshot */
1539 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
1540 assert (KERN_SUCCESS == kr);
1541 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1542 }
1543
1544 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */