2 * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <kdp/processor_core.h>
45 #include <IOKit/IOPolledInterface.h>
46 #include <IOKit/IOBSD.h>
47 #include <sys/errno.h>
48 #include <sys/msgbuf.h>
49 #include <san/kasan.h>
51 #if defined(__x86_64__)
52 #include <i386/pmap_internal.h>
53 #include <kdp/ml/i386/kdp_x86_common.h>
54 #include <kern/debug.h>
55 #endif /* defined(__x86_64__) */
58 #include <arm/cpuid.h>
59 #include <arm/caches_internal.h>
60 #include <pexpert/arm/consistent_debug.h>
63 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
66 #if !defined(ROUNDDOWN)
67 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
69 #endif /* CONFIG_EMBEDDED */
71 typedef int (*pmap_traverse_callback
)(vm_map_offset_t start
,
75 extern int pmap_traverse_present_mappings(pmap_t pmap
,
76 vm_map_offset_t start
,
78 pmap_traverse_callback callback
,
81 static int kern_dump_save_summary(void *refcon
, core_save_summary_cb callback
, void *context
);
82 static int kern_dump_save_seg_descriptions(void *refcon
, core_save_segment_descriptions_cb callback
, void *context
);
83 static int kern_dump_save_thread_state(void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
);
84 static int kern_dump_save_sw_vers(void *refcon
, core_save_sw_vers_cb callback
, void *context
);
85 static int kern_dump_save_segment_data(void *refcon
, core_save_segment_data_cb callback
, void *context
);
88 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
92 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
97 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
101 struct kdp_core_out_vars
;
102 typedef int (*kern_dump_output_proc
)(unsigned int request
, char *corename
,
103 uint64_t length
, void *panic_data
);
105 struct kdp_core_out_vars
107 kern_dump_output_proc outproc
;
108 z_output_func zoutput
;
111 uint64_t lastpercent
;
119 extern uint32_t kdp_crashdump_pkt_size
;
121 static vm_offset_t kdp_core_zmem
;
122 static size_t kdp_core_zsize
;
123 static size_t kdp_core_zoffset
;
124 static z_stream kdp_core_zs
;
126 static uint64_t kdp_core_total_size
;
127 static uint64_t kdp_core_total_size_sent_uncomp
;
129 struct xnu_hw_shmem_dbg_command_info
*hwsd_info
= NULL
;
131 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
132 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
135 * Astris can read up to 4064 bytes at a time over
136 * the probe, so we should try to make our buffer
137 * size a multiple of this to make reads by astris
138 * (the bottleneck) most efficient.
140 #define OPTIMAL_ASTRIS_READSIZE 4064
142 struct kdp_hw_shmem_dbg_buf_elm
{
143 vm_offset_t khsd_buf
;
144 uint32_t khsd_data_length
;
145 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm
) khsd_elms
;
148 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) free_hw_shmem_dbg_bufs
=
149 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs
);
150 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) hw_shmem_dbg_bufs_to_flush
=
151 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush
);
153 static struct kdp_hw_shmem_dbg_buf_elm
*currently_filling_buf
= NULL
;
154 static struct kdp_hw_shmem_dbg_buf_elm
*currently_flushing_buf
= NULL
;
156 static uint32_t kdp_hw_shmem_dbg_bufsize
= 0;
158 static uint32_t kdp_hw_shmem_dbg_seq_no
= 0;
159 static uint64_t kdp_hw_shmem_dbg_contact_deadline
= 0;
160 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval
= 0;
162 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
163 #endif /* CONFIG_EMBEDDED */
165 static boolean_t kern_dump_successful
= FALSE
;
167 struct mach_core_fileheader kdp_core_header
= { };
170 * These variables will be modified by the BSD layer if the root device is
173 uint64_t kdp_core_ramdisk_addr
= 0;
174 uint64_t kdp_core_ramdisk_size
= 0;
176 boolean_t
kdp_has_polled_corefile(void)
178 return (NULL
!= gIOPolledCoreFileVars
);
181 kern_return_t
kdp_polled_corefile_error(void)
183 return gIOPolledCoreFileOpenRet
;
187 * Whenever we start a coredump, make sure the buffers
188 * are all on the free queue and the state is as expected.
189 * The buffers may have been left in a different state if
190 * a previous coredump attempt failed.
193 kern_dump_hw_shmem_dbg_reset()
195 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
, *tmp_elm
= NULL
;
197 STAILQ_FOREACH(cur_elm
, &free_hw_shmem_dbg_bufs
, khsd_elms
) {
198 cur_elm
->khsd_data_length
= 0;
201 if (currently_filling_buf
!= NULL
) {
202 currently_filling_buf
->khsd_data_length
= 0;
204 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_filling_buf
, khsd_elms
);
205 currently_filling_buf
= NULL
;
208 if (currently_flushing_buf
!= NULL
) {
209 currently_flushing_buf
->khsd_data_length
= 0;
211 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
212 currently_flushing_buf
= NULL
;
215 STAILQ_FOREACH_SAFE(cur_elm
, &hw_shmem_dbg_bufs_to_flush
, khsd_elms
, tmp_elm
) {
216 cur_elm
->khsd_data_length
= 0;
218 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush
, cur_elm
, kdp_hw_shmem_dbg_buf_elm
, khsd_elms
);
219 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
222 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_EMPTY
;
223 kdp_hw_shmem_dbg_seq_no
= 0;
224 hwsd_info
->xhsdci_buf_phys_addr
= 0;
225 hwsd_info
->xhsdci_buf_data_length
= 0;
226 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
227 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
228 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
229 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
231 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval
;
235 * Tries to move buffers forward in 'progress'. If
236 * the hardware debugger is done consuming the current buffer, we
237 * can put the next one on it and move the current
238 * buffer back to the free queue.
241 kern_dump_hw_shmem_dbg_process_buffers()
243 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
244 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
) {
245 kern_coredump_log(NULL
, "Detected remote error, terminating...\n");
247 } else if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BUF_EMPTY
) {
248 if (hwsd_info
->xhsdci_seq_no
!= (kdp_hw_shmem_dbg_seq_no
+ 1)) {
249 kern_coredump_log(NULL
, "Detected stale/invalid seq num. Expected: %d, received %d\n",
250 (kdp_hw_shmem_dbg_seq_no
+ 1), hwsd_info
->xhsdci_seq_no
);
251 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
252 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
256 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
258 if (currently_flushing_buf
!= NULL
) {
259 currently_flushing_buf
->khsd_data_length
= 0;
260 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
263 currently_flushing_buf
= STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush
);
264 if (currently_flushing_buf
!= NULL
) {
265 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush
, khsd_elms
);
267 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
268 hwsd_info
->xhsdci_buf_phys_addr
= kvtophys(currently_flushing_buf
->khsd_buf
);
269 hwsd_info
->xhsdci_buf_data_length
= currently_flushing_buf
->khsd_data_length
;
270 hwsd_info
->xhsdci_coredump_total_size_uncomp
= kdp_core_total_size
;
271 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= kdp_core_total_size_sent_uncomp
;
272 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
);
273 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
274 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_READY
;
275 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
278 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() +
279 kdp_hw_shmem_dbg_contact_deadline_interval
;
282 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline
) {
283 kern_coredump_log(NULL
, "Kernel timed out waiting for hardware debugger to update handshake structure.");
284 kern_coredump_log(NULL
, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
);
286 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
287 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
295 * Populates currently_filling_buf with a new buffer
296 * once one becomes available. Returns 0 on success
297 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
298 * if it is non-zero (an error).
301 kern_dump_hw_shmem_dbg_get_buffer()
305 assert(currently_filling_buf
== NULL
);
307 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs
)) {
308 ret
= kern_dump_hw_shmem_dbg_process_buffers();
314 currently_filling_buf
= STAILQ_FIRST(&free_hw_shmem_dbg_bufs
);
315 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs
, khsd_elms
);
317 assert(currently_filling_buf
->khsd_data_length
== 0);
322 * Output procedure for hardware shared memory core dumps
324 * Tries to fill up the buffer completely before flushing
327 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request
, __unused
char *corename
,
328 uint64_t length
, void * data
)
332 assert(length
< UINT32_MAX
);
333 uint32_t bytes_remaining
= (uint32_t) length
;
334 uint32_t bytes_to_copy
;
336 if (request
== KDP_EOF
) {
337 assert(currently_filling_buf
== NULL
);
340 * Wait until we've flushed all the buffers
341 * before setting the connection status to done.
343 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush
) ||
344 currently_flushing_buf
!= NULL
) {
345 ret
= kern_dump_hw_shmem_dbg_process_buffers();
352 * If the last status we saw indicates that the buffer was
353 * empty and we didn't flush any new data since then, we expect
354 * the sequence number to still match the last we saw.
356 if (hwsd_info
->xhsdci_seq_no
< kdp_hw_shmem_dbg_seq_no
) {
357 kern_coredump_log(NULL
, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
358 kdp_hw_shmem_dbg_seq_no
, hwsd_info
->xhsdci_seq_no
);
362 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
364 kern_coredump_log(NULL
, "Setting coredump status as done!\n");
365 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
366 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_STATUS_DONE
;
367 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
372 assert(request
== KDP_DATA
);
375 * The output procedure is called with length == 0 and data == NULL
376 * to flush any remaining output at the end of the coredump before
377 * we call it a final time to mark the dump as done.
380 assert(data
== NULL
);
382 if (currently_filling_buf
!= NULL
) {
383 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
384 currently_filling_buf
= NULL
;
388 * Move the current buffer along if possible.
390 ret
= kern_dump_hw_shmem_dbg_process_buffers();
394 while (bytes_remaining
!= 0) {
396 * Make sure we have a buffer to work with.
398 while (currently_filling_buf
== NULL
) {
399 ret
= kern_dump_hw_shmem_dbg_get_buffer();
405 assert(kdp_hw_shmem_dbg_bufsize
>= currently_filling_buf
->khsd_data_length
);
406 bytes_to_copy
= MIN(bytes_remaining
, kdp_hw_shmem_dbg_bufsize
-
407 currently_filling_buf
->khsd_data_length
);
408 bcopy(data
, (void *)(currently_filling_buf
->khsd_buf
+ currently_filling_buf
->khsd_data_length
),
411 currently_filling_buf
->khsd_data_length
+= bytes_to_copy
;
413 if (currently_filling_buf
->khsd_data_length
== kdp_hw_shmem_dbg_bufsize
) {
414 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
415 currently_filling_buf
= NULL
;
418 * Move it along if possible.
420 ret
= kern_dump_hw_shmem_dbg_process_buffers();
426 bytes_remaining
-= bytes_to_copy
;
427 data
= (void *) ((uintptr_t)data
+ bytes_to_copy
);
432 #endif /* CONFIG_EMBEDDED */
435 kern_dump_disk_proc(unsigned int request
, __unused
char *corename
,
436 uint64_t length
, void * data
)
439 uint32_t err
= kIOReturnSuccess
;
444 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, 0);
445 if (kIOReturnSuccess
!= err
) {
446 kern_coredump_log(NULL
, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err
);
449 err
= IOPolledFilePollersOpen(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
, false);
453 noffset
= *((uint64_t *) data
);
454 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
455 if (kIOReturnSuccess
!= err
) {
456 kern_coredump_log(NULL
, "IOPolledFileWrite (during seek) returned 0x%x\n", err
);
459 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, noffset
);
460 if (kIOReturnSuccess
!= err
) {
461 kern_coredump_log(NULL
, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset
, err
);
466 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, data
, length
, NULL
);
467 if (kIOReturnSuccess
!= err
) {
468 kern_coredump_log(NULL
, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n",
475 /* Only supported on embedded by the underlying polled mode driver */
477 err
= IOPolledFileFlush(gIOPolledCoreFileVars
);
478 if (kIOReturnSuccess
!= err
) {
479 kern_coredump_log(NULL
, "IOPolledFileFlush() returned 0x%x\n", err
);
486 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
487 if (kIOReturnSuccess
!= err
) {
488 kern_coredump_log(NULL
, "IOPolledFileWrite (during EOF) returned 0x%x\n", err
);
491 err
= IOPolledFilePollersClose(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
);
492 if (kIOReturnSuccess
!= err
) {
493 kern_coredump_log(NULL
, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err
);
503 * flushes any data to the output proc immediately
506 kdp_core_zoutput(z_streamp strm
, Bytef
*buf
, unsigned len
)
508 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
513 if (vars
->error
>= 0)
515 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
, len
, buf
)) != kIOReturnSuccess
)
517 kern_coredump_log(NULL
, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
521 if (!buf
&& !len
) kern_coredump_log(NULL
, "100..");
527 * tries to fill the buffer with data before flushing it via the output proc.
530 kdp_core_zoutputbuf(z_streamp strm
, Bytef
*inbuf
, unsigned inlen
)
532 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
539 vars
->zipped
+= inlen
;
540 flush
= (!inbuf
&& !inlen
);
542 while ((vars
->error
>= 0) && (remain
|| flush
))
544 chunk
= vars
->outremain
;
545 if (chunk
> remain
) chunk
= remain
;
546 if (!inbuf
) bzero(&vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
549 bcopy(inbuf
, &vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
552 vars
->outremain
-= chunk
;
555 if (vars
->outremain
&& !flush
) break;
556 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
,
557 vars
->outlen
- vars
->outremain
,
558 vars
->outbuf
)) != kIOReturnSuccess
)
560 kern_coredump_log(NULL
, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
561 (vars
->outlen
- vars
->outremain
), vars
->outbuf
, ret
);
566 kern_coredump_log(NULL
, "100..");
569 vars
->outremain
= vars
->outlen
;
575 kdp_core_zinput(z_streamp strm
, Bytef
*buf
, unsigned size
)
577 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
578 uint64_t percent
, total_in
= 0;
581 len
= strm
->avail_in
;
582 if (len
> size
) len
= size
;
583 if (len
== 0) return 0;
585 if (strm
->next_in
!= (Bytef
*) strm
) memcpy(buf
, strm
->next_in
, len
);
586 else bzero(buf
, len
);
587 strm
->adler
= z_crc32(strm
->adler
, buf
, len
);
589 strm
->avail_in
-= len
;
590 strm
->next_in
+= len
;
591 strm
->total_in
+= len
;
593 if (0 == (511 & vars
->writes
++))
595 total_in
= strm
->total_in
;
596 kdp_core_total_size_sent_uncomp
= strm
->total_in
;
598 percent
= (total_in
* 100) / vars
->totalbytes
;
599 if ((percent
- vars
->lastpercent
) >= 10)
601 vars
->lastpercent
= percent
;
602 kern_coredump_log(NULL
, "%lld..\n", percent
);
610 kdp_core_stream_output_chunk(struct kdp_core_out_vars
* vars
, unsigned length
, void * data
)
618 if (kdp_corezip_disabled
)
620 (*vars
->zoutput
)(zs
, data
, length
);
625 flush
= (!length
&& !data
);
628 assert(!zs
->avail_in
);
630 while (vars
->error
>= 0)
632 if (!zs
->avail_in
&& !flush
)
635 zs
->next_in
= data
? data
: (Bytef
*) zs
/* zero marker */;
636 zs
->avail_in
= length
;
641 zs
->next_out
= (Bytef
*) zs
;
642 zs
->avail_out
= UINT32_MAX
;
644 zr
= deflate(zs
, flush
? Z_FINISH
: Z_NO_FLUSH
);
645 if (Z_STREAM_END
== zr
) break;
648 kern_coredump_log(NULL
, "ZERR %d\n", zr
);
653 if (flush
) (*vars
->zoutput
)(zs
, NULL
, 0);
656 return (vars
->error
);
660 kdp_core_output(void *kdp_core_out_vars
, uint64_t length
, void * data
)
664 enum { kMaxZLibChunk
= 1024*1024*1024 };
665 struct kdp_core_out_vars
*vars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
669 if (length
<= kMaxZLibChunk
) chunk
= (typeof(chunk
)) length
;
670 else chunk
= kMaxZLibChunk
;
671 err
= kdp_core_stream_output_chunk(vars
, chunk
, data
);
674 if (data
) data
= (void *) (((uintptr_t) data
) + chunk
);
676 while (length
&& (kIOReturnSuccess
== err
));
681 #if defined(__arm__) || defined(__arm64__)
682 extern pmap_paddr_t avail_start
, avail_end
;
683 extern struct vm_object pmap_object_store
;
685 extern vm_offset_t c_buffers
;
686 extern vm_size_t c_buffers_size
;
689 kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
, uintptr_t * pvphysaddr
)
692 uint64_t vincr
= PAGE_SIZE_64
;
694 assert(!(vaddr
& PAGE_MASK_64
));
696 /* VA ranges to exclude */
697 if (vaddr
== c_buffers
)
699 /* compressor data */
701 vincr
= c_buffers_size
;
703 else if (vaddr
== kdp_core_zmem
)
705 /* zlib working memory */
707 vincr
= kdp_core_zsize
;
709 else if ((kdp_core_ramdisk_addr
!= 0) && (vaddr
== kdp_core_ramdisk_addr
))
712 vincr
= kdp_core_ramdisk_size
;
715 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
716 if (vaddr
== _COMM_HIGH_PAGE64_BASE_ADDRESS
)
720 vincr
= _COMM_PAGE_AREA_LENGTH
;
723 #endif /* defined(__arm64__) */
724 #if defined(__arm__) || defined(__arm64__)
725 if (vaddr
== phystokv(avail_start
))
727 /* physical memory map */
729 vincr
= (avail_end
- avail_start
);
732 #endif /* defined(__arm__) || defined(__arm64__) */
733 ppn
= pmap_find_phys(kernel_pmap
, vaddr
);
735 *pvincr
= round_page_64(vincr
);
737 if (ppn
&& pvphysaddr
)
739 uint64_t phys
= ptoa_64(ppn
);
740 #if defined(__arm__) || defined(__arm64__)
741 if (isphysmem(phys
)) *pvphysaddr
= phystokv(phys
);
743 if (physmap_enclosed(phys
)) *pvphysaddr
= (uintptr_t)PHYSMAP_PTOV(phys
);
752 pmap_traverse_present_mappings(pmap_t __unused pmap
,
753 vm_map_offset_t start
,
755 pmap_traverse_callback callback
,
759 vm_map_offset_t vcurstart
, vcur
;
761 vm_map_offset_t debug_start
;
762 vm_map_offset_t debug_end
;
763 boolean_t lastvavalid
;
764 #if defined(__arm__) || defined(__arm64__)
765 vm_page_t m
= VM_PAGE_NULL
;
768 debug_start
= trunc_page((vm_map_offset_t
) debug_buf_base
);
769 debug_end
= round_page((vm_map_offset_t
) (debug_buf_base
+ debug_buf_size
));
771 #if defined(__x86_64__)
772 assert(!is_ept_pmap(pmap
));
775 /* Assumes pmap is locked, or being called from the kernel debugger */
777 if (start
> end
) return (KERN_INVALID_ARGUMENT
);
781 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
784 #if defined(__arm__) || defined(__arm64__)
785 /* We're at the start of the physmap, so pull out the pagetable pages that
786 * are accessed through that region.*/
787 if (vcur
== phystokv(avail_start
) && vm_object_lock_try_shared(&pmap_object_store
))
788 m
= (vm_page_t
)vm_page_queue_first(&pmap_object_store
.memq
);
790 if (m
!= VM_PAGE_NULL
)
792 vm_map_offset_t vprev
= vcur
;
793 ppn
= (ppnum_t
)atop(avail_end
);
794 while (!vm_page_queue_end(&pmap_object_store
.memq
, (vm_page_queue_entry_t
)m
))
796 /* Ignore pages that come from the static region and have already been dumped.*/
797 if (VM_PAGE_GET_PHYS_PAGE(m
) >= atop(avail_start
))
799 ppn
= VM_PAGE_GET_PHYS_PAGE(m
);
802 m
= (vm_page_t
)vm_page_queue_next(&m
->vmp_listq
);
804 vincr
= PAGE_SIZE_64
;
805 if (ppn
== atop(avail_end
))
807 vm_object_unlock(&pmap_object_store
);
809 // avail_end is not a valid physical address,
810 // so phystokv(avail_end) may not produce the expected result.
811 vcur
= phystokv(avail_start
) + (avail_end
- avail_start
);
813 m
= (vm_page_t
)vm_page_queue_next(&m
->vmp_listq
);
814 vcur
= phystokv(ptoa(ppn
));
818 ret
= callback(vcurstart
, vprev
, context
);
822 if (m
== VM_PAGE_NULL
)
823 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
824 #else /* defined(__arm__) || defined(__arm64__) */
825 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
829 if (((vcur
< debug_start
) || (vcur
>= debug_end
))
830 && !(EFI_VALID_PAGE(ppn
) ||
831 pmap_valid_page(ppn
)))
833 /* not something we want */
840 /* Start of a new virtual region */
846 /* end of a virtual region */
847 ret
= callback(vcurstart
, vcur
, context
);
851 #if defined(__x86_64__)
852 /* Try to skip by 2MB if possible */
853 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
855 pde
= pmap_pde(pmap
, vcur
);
856 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
857 /* Make sure we wouldn't overflow */
858 if (vcur
< (end
- NBPD
)) {
863 #endif /* defined(__x86_64__) */
868 if ((ret
== KERN_SUCCESS
) && lastvavalid
) {
869 /* send previous run */
870 ret
= callback(vcurstart
, vcur
, context
);
874 if (ret
== KERN_SUCCESS
) {
875 ret
= kasan_traverse_mappings(callback
, context
);
882 struct kern_dump_preflight_context
884 uint32_t region_count
;
885 uint64_t dumpable_bytes
;
889 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
893 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
894 IOReturn ret
= KERN_SUCCESS
;
897 kdc
->dumpable_bytes
+= (end
- start
);
903 struct kern_dump_send_seg_desc_context
905 core_save_segment_descriptions_cb callback
;
910 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
914 struct kern_dump_send_seg_desc_context
*kds_context
= (struct kern_dump_send_seg_desc_context
*)context
;
915 uint64_t seg_start
= (uint64_t) start
;
916 uint64_t seg_end
= (uint64_t) end
;
918 return kds_context
->callback(seg_start
, seg_end
, kds_context
->context
);
921 struct kern_dump_send_segdata_context
923 core_save_segment_data_cb callback
;
928 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
932 struct kern_dump_send_segdata_context
*kds_context
= (struct kern_dump_send_segdata_context
*)context
;
934 return kds_context
->callback((void *)start
, (uint64_t)(end
- start
), kds_context
->context
);
938 kern_dump_save_summary(__unused
void *refcon
, core_save_summary_cb callback
, void *context
)
940 struct kern_dump_preflight_context kdc_preflight
= { };
941 uint64_t thread_state_size
= 0, thread_count
= 0;
944 ret
= pmap_traverse_present_mappings(kernel_pmap
,
945 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
946 VM_MAX_KERNEL_ADDRESS
,
947 kern_dump_pmap_traverse_preflight_callback
,
949 if (ret
!= KERN_SUCCESS
) {
950 kern_coredump_log(context
, "save_summary: pmap traversal failed: %d\n", ret
);
954 kern_collectth_state_size(&thread_count
, &thread_state_size
);
956 ret
= callback(kdc_preflight
.region_count
, kdc_preflight
.dumpable_bytes
,
957 thread_count
, thread_state_size
, 0, context
);
962 kern_dump_save_seg_descriptions(__unused
void *refcon
, core_save_segment_descriptions_cb callback
, void *context
)
965 struct kern_dump_send_seg_desc_context kds_context
;
967 kds_context
.callback
= callback
;
968 kds_context
.context
= context
;
970 ret
= pmap_traverse_present_mappings(kernel_pmap
,
971 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
972 VM_MAX_KERNEL_ADDRESS
,
973 kern_dump_pmap_traverse_send_segdesc_callback
,
975 if (ret
!= KERN_SUCCESS
) {
976 kern_coredump_log(context
, "save_seg_desc: pmap traversal failed: %d\n", ret
);
984 kern_dump_save_thread_state(__unused
void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
)
987 uint64_t thread_state_size
= 0, thread_count
= 0;
989 kern_collectth_state_size(&thread_count
, &thread_state_size
);
991 if (thread_state_size
> 0) {
994 kern_collectth_state (current_thread(), buf
, thread_state_size
, &iter
);
996 ret
= callback(buf
, context
);
997 if (ret
!= KERN_SUCCESS
) {
1003 return KERN_SUCCESS
;
1007 kern_dump_save_sw_vers(__unused
void *refcon
, core_save_sw_vers_cb callback
, void *context
)
1009 return callback(&kdp_kernelversion_string
, sizeof(kdp_kernelversion_string
), context
);
1013 kern_dump_save_segment_data(__unused
void *refcon
, core_save_segment_data_cb callback
, void *context
)
1016 struct kern_dump_send_segdata_context kds_context
;
1018 kds_context
.callback
= callback
;
1019 kds_context
.context
= context
;
1021 ret
= pmap_traverse_present_mappings(kernel_pmap
,
1022 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1023 VM_MAX_KERNEL_ADDRESS
, kern_dump_pmap_traverse_send_segdata_callback
, &kds_context
);
1024 if (ret
!= KERN_SUCCESS
) {
1025 kern_coredump_log(context
, "save_seg_data: pmap traversal failed: %d\n", ret
);
1029 return KERN_SUCCESS
;
1033 kdp_reset_output_vars(void *kdp_core_out_vars
, uint64_t totalbytes
)
1035 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1037 /* Re-initialize kdp_outvars */
1038 outvars
->zipped
= 0;
1039 outvars
->totalbytes
= totalbytes
;
1040 outvars
->lastpercent
= 0;
1041 outvars
->error
= kIOReturnSuccess
;
1042 outvars
->outremain
= 0;
1043 outvars
->outlen
= 0;
1044 outvars
->writes
= 0;
1045 outvars
->outbuf
= NULL
;
1047 if (outvars
->outproc
== &kdp_send_crashdump_data
) {
1049 outvars
->outbuf
= (Bytef
*) (kdp_core_zmem
+ kdp_core_zoffset
);
1050 outvars
->outremain
= outvars
->outlen
= kdp_crashdump_pkt_size
;
1053 kdp_core_total_size
= totalbytes
;
1055 /* Re-initialize zstream variables */
1056 kdp_core_zs
.avail_in
= 0;
1057 kdp_core_zs
.next_in
= NULL
;
1058 kdp_core_zs
.avail_out
= 0;
1059 kdp_core_zs
.next_out
= NULL
;
1060 kdp_core_zs
.opaque
= outvars
;
1062 deflateResetWithIO(&kdp_core_zs
, kdp_core_zinput
, outvars
->zoutput
);
1064 return KERN_SUCCESS
;
1068 kern_dump_update_header(struct kdp_core_out_vars
*outvars
)
1073 /* Write the file header -- first seek to the beginning of the file */
1075 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1076 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1077 sizeof(foffset
), &foffset
, foffset
, ret
);
1081 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, sizeof(kdp_core_header
), &kdp_core_header
)) != kIOReturnSuccess
) {
1082 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1083 sizeof(kdp_core_header
), &kdp_core_header
, ret
);
1087 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1088 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret
);
1093 if ((ret
= (outvars
->outproc
)(KDP_FLUSH
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1094 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret
);
1099 return KERN_SUCCESS
;
1103 kern_dump_record_file(void *kdp_core_out_vars
, const char *filename
, uint64_t file_offset
, uint64_t *out_file_length
)
1106 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1108 assert(kdp_core_header
.num_files
< KERN_COREDUMP_MAX_CORES
);
1109 assert(out_file_length
!= NULL
);
1110 *out_file_length
= 0;
1112 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_offset
= file_offset
;
1113 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_length
= outvars
->zipped
;
1114 strncpy((char *)&kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
, filename
,
1115 MACH_CORE_FILEHEADER_NAMELEN
);
1116 kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
[MACH_CORE_FILEHEADER_NAMELEN
- 1] = '\0';
1117 kdp_core_header
.num_files
++;
1118 kdp_core_header
.signature
= MACH_CORE_FILEHEADER_SIGNATURE
;
1120 ret
= kern_dump_update_header(outvars
);
1121 if (ret
== KERN_SUCCESS
) {
1122 *out_file_length
= outvars
->zipped
;
1129 kern_dump_seek_to_next_file(void *kdp_core_out_vars
, uint64_t next_file_offset
)
1131 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1134 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(next_file_offset
), &next_file_offset
)) != kIOReturnSuccess
) {
1135 kern_coredump_log(NULL
, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1136 sizeof(next_file_offset
), &next_file_offset
, next_file_offset
, ret
);
1143 do_kern_dump(kern_dump_output_proc outproc
, enum kern_dump_type kd_variant
)
1145 struct kdp_core_out_vars outvars
= { };
1147 char *log_start
= NULL
, *buf
= NULL
;
1148 size_t existing_log_size
= 0, new_log_len
= 0;
1149 uint64_t foffset
= 0;
1151 boolean_t output_opened
= FALSE
, dump_succeeded
= TRUE
;
1154 * Record the initial panic log buffer length so we can dump the coredump log
1155 * and panic log to disk
1157 log_start
= debug_buf_ptr
;
1159 assert(panic_info
->eph_other_log_offset
!= 0);
1160 assert(panic_info
->eph_panic_log_len
!= 0);
1161 /* Include any data from before the panic log as well */
1162 existing_log_size
= (panic_info
->eph_panic_log_offset
- sizeof(struct embedded_panic_header
)) +
1163 panic_info
->eph_panic_log_len
+ panic_info
->eph_other_log_len
;
1164 #else /* CONFIG_EMBEDDED */
1165 if (panic_info
->mph_panic_log_offset
!= 0) {
1166 existing_log_size
= (panic_info
->mph_panic_log_offset
- sizeof(struct macos_panic_header
)) +
1167 panic_info
->mph_panic_log_len
+ panic_info
->mph_other_log_len
;
1169 #endif /* CONFIG_EMBEDDED */
1171 assert (existing_log_size
<= debug_buf_size
);
1173 if (kd_variant
== KERN_DUMP_DISK
) {
1174 /* Open the file for output */
1175 if ((ret
= (*outproc
)(KDP_WRQ
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1176 kern_coredump_log(NULL
, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret
);
1177 dump_succeeded
= FALSE
;
1181 output_opened
= true;
1183 /* Initialize gzip, output context */
1184 bzero(&outvars
, sizeof(outvars
));
1185 outvars
.outproc
= outproc
;
1187 if (kd_variant
== KERN_DUMP_DISK
) {
1188 outvars
.zoutput
= kdp_core_zoutput
;
1189 /* Space for file header, panic log, core log */
1190 foffset
= (KERN_COREDUMP_HEADERSIZE
+ existing_log_size
+ KERN_COREDUMP_MAXDEBUGLOGSIZE
+
1191 KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1);
1192 kdp_core_header
.log_offset
= KERN_COREDUMP_HEADERSIZE
;
1194 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1195 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1196 kern_coredump_log(NULL
, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1197 sizeof(foffset
), &foffset
, foffset
, ret
);
1198 dump_succeeded
= FALSE
;
1201 } else if (kd_variant
== KERN_DUMP_NET
) {
1202 assert((kdp_core_zoffset
+ kdp_crashdump_pkt_size
) <= kdp_core_zsize
);
1203 outvars
.zoutput
= kdp_core_zoutputbuf
;
1205 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1206 outvars
.zoutput
= kdp_core_zoutput
;
1207 kern_dump_hw_shmem_dbg_reset();
1211 #if defined(__arm__) || defined(__arm64__)
1215 kern_coredump_log(NULL
, "%s", (kd_variant
== KERN_DUMP_DISK
) ? "Writing local cores..." :
1216 "Transmitting kernel state, please wait:\n");
1218 if (kd_variant
== KERN_DUMP_DISK
) {
1220 * Dump co-processors as well, foffset will be overwritten with the
1221 * offset of the next location in the file to be written to.
1223 if (kern_do_coredump(&outvars
, FALSE
, foffset
, &foffset
) != 0) {
1224 dump_succeeded
= FALSE
;
1227 /* Only the kernel */
1228 if (kern_do_coredump(&outvars
, TRUE
, foffset
, &foffset
) != 0) {
1229 dump_succeeded
= FALSE
;
1233 if (kd_variant
== KERN_DUMP_DISK
) {
1234 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1235 /* Write the macOS panic stackshot on its own to a separate 'corefile' */
1236 if (panic_stackshot_buf
&& panic_stackshot_len
) {
1237 uint64_t compressed_stackshot_len
= 0;
1239 /* Seek to the offset of the next 'file' (foffset provided/updated from kern_do_coredump) */
1240 if ((ret
= kern_dump_seek_to_next_file(&outvars
, foffset
)) != kIOReturnSuccess
) {
1241 kern_coredump_log(NULL
, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset
, ret
);
1242 dump_succeeded
= FALSE
;
1243 } else if ((ret
= kdp_reset_output_vars(&outvars
, panic_stackshot_len
)) != KERN_SUCCESS
) {
1244 kern_coredump_log(NULL
, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len
, ret
);
1245 dump_succeeded
= FALSE
;
1246 } else if ((ret
= kdp_core_output(&outvars
, panic_stackshot_len
, (void *)panic_stackshot_buf
)) != KERN_SUCCESS
) {
1247 kern_coredump_log(NULL
, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n",
1248 panic_stackshot_len
, (void *) panic_stackshot_buf
, ret
);
1249 dump_succeeded
= FALSE
;
1250 } else if ((ret
= kdp_core_output(&outvars
, 0, NULL
)) != KERN_SUCCESS
) {
1251 kern_coredump_log(NULL
, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars
, ret
);
1252 dump_succeeded
= FALSE
;
1253 } else if ((ret
= kern_dump_record_file(&outvars
, "panic_stackshot.kcdata", foffset
, &compressed_stackshot_len
)) != KERN_SUCCESS
) {
1254 kern_coredump_log(NULL
, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret
);
1255 dump_succeeded
= FALSE
;
1257 kern_coredump_log(NULL
, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset
, compressed_stackshot_len
);
1260 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1262 /* Write the debug log -- first seek to the end of the corefile header */
1263 foffset
= KERN_COREDUMP_HEADERSIZE
;
1264 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1265 kern_coredump_log(NULL
, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1266 sizeof(foffset
), &foffset
, foffset
, ret
);
1267 dump_succeeded
= FALSE
;
1271 new_log_len
= debug_buf_ptr
- log_start
;
1272 if (new_log_len
> KERN_COREDUMP_MAXDEBUGLOGSIZE
) {
1273 new_log_len
= KERN_COREDUMP_MAXDEBUGLOGSIZE
;
1276 /* This data is after the panic stackshot, we need to write it separately */
1278 existing_log_size
-= panic_info
->eph_other_log_len
;
1280 if (existing_log_size
) {
1281 existing_log_size
-= panic_info
->mph_other_log_len
;
1286 * Write out the paniclog (from the beginning of the debug
1287 * buffer until the start of the stackshot)
1289 buf
= debug_buf_base
;
1290 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, existing_log_size
, buf
)) != kIOReturnSuccess
) {
1291 kern_coredump_log(NULL
, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1292 existing_log_size
, buf
, ret
);
1293 dump_succeeded
= FALSE
;
1298 * The next part of the log we're interested in is the beginning of the 'other' log.
1299 * Include any data after the panic stackshot but before we started the coredump log
1303 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->eph_other_log_offset
);
1304 new_log_len
+= panic_info
->eph_other_log_len
;
1305 #else /* CONFIG_EMBEDDED */
1306 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->mph_other_log_offset
);
1307 new_log_len
+= panic_info
->mph_other_log_len
;
1308 #endif /* CONFIG_EMBEDDED */
1310 /* Write the coredump log */
1311 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, new_log_len
, buf
)) != kIOReturnSuccess
) {
1312 kern_coredump_log(NULL
, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1313 new_log_len
, buf
, ret
);
1314 dump_succeeded
= FALSE
;
1318 kdp_core_header
.log_length
= existing_log_size
+ new_log_len
;
1319 kern_dump_update_header(&outvars
);
1323 /* close / last packet */
1324 if (output_opened
&& (ret
= (*outproc
)(KDP_EOF
, NULL
, 0, ((void *) 0))) != kIOReturnSuccess
) {
1325 kern_coredump_log(NULL
, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret
);
1326 dump_succeeded
= FALSE
;
1329 /* If applicable, update the panic header and flush it so we update the CRC */
1331 panic_info
->eph_panic_flags
|= (dump_succeeded
? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1332 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1335 if (panic_info
->mph_panic_log_offset
!= 0) {
1336 panic_info
->mph_panic_flags
|= (dump_succeeded
? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1337 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1342 return (dump_succeeded
? 0 : -1);
1346 dumped_kernel_core()
1348 return kern_dump_successful
;
1352 kern_dump(enum kern_dump_type kd_variant
)
1354 static boolean_t local_dump_in_progress
= FALSE
, dumped_local
= FALSE
;
1359 if (kd_variant
== KERN_DUMP_DISK
) {
1360 if (dumped_local
) return (0);
1361 if (local_dump_in_progress
) return (-1);
1362 local_dump_in_progress
= TRUE
;
1364 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_BUSY
;
1366 ret
= do_kern_dump(&kern_dump_disk_proc
, KERN_DUMP_DISK
);
1368 dumped_local
= TRUE
;
1369 kern_dump_successful
= TRUE
;
1370 local_dump_in_progress
= FALSE
;
1375 } else if (kd_variant
== KERN_DUMP_HW_SHMEM_DBG
) {
1376 ret
= do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc
, KERN_DUMP_HW_SHMEM_DBG
);
1378 kern_dump_successful
= TRUE
;
1383 ret
= do_kern_dump(&kdp_send_crashdump_data
, KERN_DUMP_NET
);
1385 kern_dump_successful
= TRUE
;
1395 if (hwsd_info
== NULL
) {
1396 kern_coredump_log(NULL
, "handshake structure not initialized\n");
1400 kern_coredump_log(NULL
, "\nPlease go to https://panic.apple.com to report this panic\n");
1401 kern_coredump_log(NULL
, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1402 hwsd_info
, (void *)kvtophys((vm_offset_t
)hwsd_info
));
1404 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1405 hwsd_info
->xhsdci_seq_no
= 0;
1406 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1409 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1410 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BEGIN
) {
1411 kern_dump(KERN_DUMP_HW_SHMEM_DBG
);
1414 if ((hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_REMOTE_DONE
) ||
1415 (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
)) {
1416 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1417 hwsd_info
->xhsdci_seq_no
= 0;
1418 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1422 #endif /* CONFIG_EMBEDDED */
1425 kdp_core_zalloc(void * __unused ref
, u_int items
, u_int size
)
1429 result
= (void *) (kdp_core_zmem
+ kdp_core_zoffset
);
1430 kdp_core_zoffset
+= ~31L & (31 + (items
* size
)); // 32b align for vector crc
1431 assert(kdp_core_zoffset
<= kdp_core_zsize
);
1437 kdp_core_zfree(void * __unused ref
, void * __unused ptr
) {}
1441 #define LEVEL Z_BEST_SPEED
1444 #define LEVEL Z_BEST_SPEED
1456 vm_offset_t kdp_core_hw_shmem_buf
= 0;
1457 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
;
1458 cache_info_t
*cpuid_cache_info
= NULL
;
1460 kern_coredump_callback_config core_config
= { };
1462 if (kdp_core_zs
.zalloc
) return;
1463 kdp_core_zsize
= round_page(NETBUF
+ zlib_deflate_memory_size(wbits
, memlevel
));
1464 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize
);
1465 kr
= kmem_alloc(kernel_map
, &kdp_core_zmem
, kdp_core_zsize
, VM_KERN_MEMORY_DIAG
);
1466 assert (KERN_SUCCESS
== kr
);
1468 kdp_core_zoffset
= 0;
1469 kdp_core_zs
.zalloc
= kdp_core_zalloc
;
1470 kdp_core_zs
.zfree
= kdp_core_zfree
;
1472 if (deflateInit2(&kdp_core_zs
, LEVEL
, Z_DEFLATED
,
1473 wbits
+ 16 /*gzip mode*/, memlevel
, Z_DEFAULT_STRATEGY
)) {
1474 /* Allocation failed */
1475 bzero(&kdp_core_zs
, sizeof(kdp_core_zs
));
1476 kdp_core_zoffset
= 0;
1479 bzero(&kdp_core_header
, sizeof(kdp_core_header
));
1481 core_config
.kcc_coredump_init
= NULL
; /* TODO: consider doing mmu flush from an init function */
1482 core_config
.kcc_coredump_get_summary
= kern_dump_save_summary
;
1483 core_config
.kcc_coredump_save_segment_descriptions
= kern_dump_save_seg_descriptions
;
1484 core_config
.kcc_coredump_save_thread_state
= kern_dump_save_thread_state
;
1485 core_config
.kcc_coredump_save_sw_vers
= kern_dump_save_sw_vers
;
1486 core_config
.kcc_coredump_save_segment_data
= kern_dump_save_segment_data
;
1487 core_config
.kcc_coredump_save_misc_data
= NULL
;
1489 kr
= kern_register_xnu_coredump_helper(&core_config
);
1490 assert(KERN_SUCCESS
== kr
);
1493 if (!PE_consistent_debug_enabled()) {
1498 * We need to allocate physically contiguous memory since astris isn't capable
1499 * of doing address translations while the CPUs are running.
1501 kdp_hw_shmem_dbg_bufsize
= KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
;
1502 kr
= kmem_alloc_contig(kernel_map
, &kdp_core_hw_shmem_buf
, kdp_hw_shmem_dbg_bufsize
, VM_MAP_PAGE_MASK(kernel_map
),
1503 0, 0, KMA_KOBJECT
, VM_KERN_MEMORY_DIAG
);
1504 assert(KERN_SUCCESS
== kr
);
1507 * Put the connection info structure at the beginning of this buffer and adjust
1508 * the buffer size accordingly.
1510 hwsd_info
= (struct xnu_hw_shmem_dbg_command_info
*) kdp_core_hw_shmem_buf
;
1511 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_NONE
;
1512 hwsd_info
->xhsdci_seq_no
= 0;
1513 hwsd_info
->xhsdci_buf_phys_addr
= 0;
1514 hwsd_info
->xhsdci_buf_data_length
= 0;
1515 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
1516 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
1517 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
1519 cpuid_cache_info
= cache_info();
1520 assert(cpuid_cache_info
!= NULL
);
1522 kdp_core_hw_shmem_buf
+= sizeof(*hwsd_info
);
1523 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1524 kdp_core_hw_shmem_buf
= ROUNDUP(kdp_core_hw_shmem_buf
, (uint64_t) cpuid_cache_info
->c_linesz
);
1525 kdp_hw_shmem_dbg_bufsize
-= (uint32_t) (kdp_core_hw_shmem_buf
- (vm_offset_t
) hwsd_info
);
1526 kdp_hw_shmem_dbg_bufsize
/= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
;
1527 /* The buffer size should be a cache-line length multiple */
1528 kdp_hw_shmem_dbg_bufsize
-= (kdp_hw_shmem_dbg_bufsize
% ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE
, cpuid_cache_info
->c_linesz
));
1530 STAILQ_INIT(&free_hw_shmem_dbg_bufs
);
1531 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush
);
1533 for (i
= 0; i
< KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
; i
++) {
1534 cur_elm
= kalloc(sizeof(*cur_elm
));
1535 assert(cur_elm
!= NULL
);
1537 cur_elm
->khsd_buf
= kdp_core_hw_shmem_buf
;
1538 cur_elm
->khsd_data_length
= 0;
1540 kdp_core_hw_shmem_buf
+= kdp_hw_shmem_dbg_bufsize
;
1542 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
1545 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
* NSEC_PER_SEC
,
1546 &kdp_hw_shmem_dbg_contact_deadline_interval
);
1548 PE_consistent_debug_register(kDbgIdAstrisConnection
, kvtophys((vm_offset_t
) hwsd_info
), sizeof(pmap_paddr_t
));
1549 PE_consistent_debug_register(kDbgIdAstrisConnectionVers
, CUR_XNU_HWSDCI_STRUCT_VERS
, sizeof(uint32_t));
1550 #endif /* CONFIG_EMBEDDED */
1552 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1553 /* Allocate space in the kernel map for the panic stackshot */
1554 kr
= kmem_alloc(kernel_map
, &panic_stackshot_buf
, PANIC_STACKSHOT_BUFSIZE
, VM_KERN_MEMORY_DIAG
);
1555 assert (KERN_SUCCESS
== kr
);
1556 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1559 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */