2 * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <kdp/processor_core.h>
45 #include <IOKit/IOPolledInterface.h>
46 #include <IOKit/IOBSD.h>
47 #include <sys/errno.h>
48 #include <sys/msgbuf.h>
49 #include <san/kasan.h>
51 #if defined(__x86_64__)
52 #include <i386/pmap_internal.h>
53 #include <kdp/ml/i386/kdp_x86_common.h>
54 #include <kern/debug.h>
55 #endif /* defined(__x86_64__) */
57 #if defined(__arm__) || defined(__arm64__)
58 #include <arm/cpuid.h>
59 #include <arm/caches_internal.h>
60 #include <pexpert/arm/consistent_debug.h>
63 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
66 #if !defined(ROUNDDOWN)
67 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
69 #endif /* defined(__arm__) || defined(__arm64__) */
71 typedef int (*pmap_traverse_callback
)(vm_map_offset_t start
,
75 extern int pmap_traverse_present_mappings(pmap_t pmap
,
76 vm_map_offset_t start
,
78 pmap_traverse_callback callback
,
81 static int kern_dump_save_summary(void *refcon
, core_save_summary_cb callback
, void *context
);
82 static int kern_dump_save_seg_descriptions(void *refcon
, core_save_segment_descriptions_cb callback
, void *context
);
83 static int kern_dump_save_thread_state(void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
);
84 static int kern_dump_save_sw_vers(void *refcon
, core_save_sw_vers_cb callback
, void *context
);
85 static int kern_dump_save_segment_data(void *refcon
, core_save_segment_data_cb callback
, void *context
);
88 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
92 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
97 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
101 struct kdp_core_out_vars
;
102 typedef int (*kern_dump_output_proc
)(unsigned int request
, char *corename
,
103 uint64_t length
, void *panic_data
);
105 struct kdp_core_out_vars
{
106 kern_dump_output_proc outproc
;
107 z_output_func zoutput
;
110 uint64_t lastpercent
;
118 extern uint32_t kdp_crashdump_pkt_size
;
120 static vm_offset_t kdp_core_zmem
;
121 static size_t kdp_core_zsize
;
122 static size_t kdp_core_zoffset
;
123 static z_stream kdp_core_zs
;
125 static uint64_t kdp_core_total_size
;
126 static uint64_t kdp_core_total_size_sent_uncomp
;
127 #if defined(__arm__) || defined(__arm64__)
128 struct xnu_hw_shmem_dbg_command_info
*hwsd_info
= NULL
;
130 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
131 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
134 * Astris can read up to 4064 bytes at a time over
135 * the probe, so we should try to make our buffer
136 * size a multiple of this to make reads by astris
137 * (the bottleneck) most efficient.
139 #define OPTIMAL_ASTRIS_READSIZE 4064
141 struct kdp_hw_shmem_dbg_buf_elm
{
142 vm_offset_t khsd_buf
;
143 uint32_t khsd_data_length
;
144 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm
) khsd_elms
;
147 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) free_hw_shmem_dbg_bufs
=
148 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs
);
149 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) hw_shmem_dbg_bufs_to_flush
=
150 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush
);
152 static struct kdp_hw_shmem_dbg_buf_elm
*currently_filling_buf
= NULL
;
153 static struct kdp_hw_shmem_dbg_buf_elm
*currently_flushing_buf
= NULL
;
155 static uint32_t kdp_hw_shmem_dbg_bufsize
= 0;
157 static uint32_t kdp_hw_shmem_dbg_seq_no
= 0;
158 static uint64_t kdp_hw_shmem_dbg_contact_deadline
= 0;
159 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval
= 0;
161 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
162 #endif /* defined(__arm__) || defined(__arm64__) */
164 static boolean_t kern_dump_successful
= FALSE
;
166 struct mach_core_fileheader kdp_core_header
= { };
169 * These variables will be modified by the BSD layer if the root device is
172 uint64_t kdp_core_ramdisk_addr
= 0;
173 uint64_t kdp_core_ramdisk_size
= 0;
176 kdp_has_polled_corefile(void)
178 return NULL
!= gIOPolledCoreFileVars
;
182 kdp_polled_corefile_error(void)
184 return gIOPolledCoreFileOpenRet
;
187 #if defined(__arm__) || defined(__arm64__)
189 * Whenever we start a coredump, make sure the buffers
190 * are all on the free queue and the state is as expected.
191 * The buffers may have been left in a different state if
192 * a previous coredump attempt failed.
195 kern_dump_hw_shmem_dbg_reset(void)
197 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
, *tmp_elm
= NULL
;
199 STAILQ_FOREACH(cur_elm
, &free_hw_shmem_dbg_bufs
, khsd_elms
) {
200 cur_elm
->khsd_data_length
= 0;
203 if (currently_filling_buf
!= NULL
) {
204 currently_filling_buf
->khsd_data_length
= 0;
206 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_filling_buf
, khsd_elms
);
207 currently_filling_buf
= NULL
;
210 if (currently_flushing_buf
!= NULL
) {
211 currently_flushing_buf
->khsd_data_length
= 0;
213 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
214 currently_flushing_buf
= NULL
;
217 STAILQ_FOREACH_SAFE(cur_elm
, &hw_shmem_dbg_bufs_to_flush
, khsd_elms
, tmp_elm
) {
218 cur_elm
->khsd_data_length
= 0;
220 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush
, cur_elm
, kdp_hw_shmem_dbg_buf_elm
, khsd_elms
);
221 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
224 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_EMPTY
;
225 kdp_hw_shmem_dbg_seq_no
= 0;
226 hwsd_info
->xhsdci_buf_phys_addr
= 0;
227 hwsd_info
->xhsdci_buf_data_length
= 0;
228 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
229 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
230 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
231 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
233 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval
;
237 * Tries to move buffers forward in 'progress'. If
238 * the hardware debugger is done consuming the current buffer, we
239 * can put the next one on it and move the current
240 * buffer back to the free queue.
243 kern_dump_hw_shmem_dbg_process_buffers(void)
245 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
246 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
) {
247 kern_coredump_log(NULL
, "Detected remote error, terminating...\n");
249 } else if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BUF_EMPTY
) {
250 if (hwsd_info
->xhsdci_seq_no
!= (kdp_hw_shmem_dbg_seq_no
+ 1)) {
251 kern_coredump_log(NULL
, "Detected stale/invalid seq num. Expected: %d, received %d\n",
252 (kdp_hw_shmem_dbg_seq_no
+ 1), hwsd_info
->xhsdci_seq_no
);
253 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
254 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
258 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
260 if (currently_flushing_buf
!= NULL
) {
261 currently_flushing_buf
->khsd_data_length
= 0;
262 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
265 currently_flushing_buf
= STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush
);
266 if (currently_flushing_buf
!= NULL
) {
267 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush
, khsd_elms
);
269 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
270 hwsd_info
->xhsdci_buf_phys_addr
= kvtophys(currently_flushing_buf
->khsd_buf
);
271 hwsd_info
->xhsdci_buf_data_length
= currently_flushing_buf
->khsd_data_length
;
272 hwsd_info
->xhsdci_coredump_total_size_uncomp
= kdp_core_total_size
;
273 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= kdp_core_total_size_sent_uncomp
;
274 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
);
275 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
276 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_READY
;
277 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
280 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() +
281 kdp_hw_shmem_dbg_contact_deadline_interval
;
284 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline
) {
285 kern_coredump_log(NULL
, "Kernel timed out waiting for hardware debugger to update handshake structure.");
286 kern_coredump_log(NULL
, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
);
288 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
289 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
297 * Populates currently_filling_buf with a new buffer
298 * once one becomes available. Returns 0 on success
299 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
300 * if it is non-zero (an error).
303 kern_dump_hw_shmem_dbg_get_buffer(void)
307 assert(currently_filling_buf
== NULL
);
309 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs
)) {
310 ret
= kern_dump_hw_shmem_dbg_process_buffers();
316 currently_filling_buf
= STAILQ_FIRST(&free_hw_shmem_dbg_bufs
);
317 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs
, khsd_elms
);
319 assert(currently_filling_buf
->khsd_data_length
== 0);
324 * Output procedure for hardware shared memory core dumps
326 * Tries to fill up the buffer completely before flushing
329 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request
, __unused
char *corename
,
330 uint64_t length
, void * data
)
334 assert(length
< UINT32_MAX
);
335 uint32_t bytes_remaining
= (uint32_t) length
;
336 uint32_t bytes_to_copy
;
338 if (request
== KDP_EOF
) {
339 assert(currently_filling_buf
== NULL
);
342 * Wait until we've flushed all the buffers
343 * before setting the connection status to done.
345 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush
) ||
346 currently_flushing_buf
!= NULL
) {
347 ret
= kern_dump_hw_shmem_dbg_process_buffers();
354 * If the last status we saw indicates that the buffer was
355 * empty and we didn't flush any new data since then, we expect
356 * the sequence number to still match the last we saw.
358 if (hwsd_info
->xhsdci_seq_no
< kdp_hw_shmem_dbg_seq_no
) {
359 kern_coredump_log(NULL
, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
360 kdp_hw_shmem_dbg_seq_no
, hwsd_info
->xhsdci_seq_no
);
364 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
366 kern_coredump_log(NULL
, "Setting coredump status as done!\n");
367 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
368 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_STATUS_DONE
;
369 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
374 assert(request
== KDP_DATA
);
377 * The output procedure is called with length == 0 and data == NULL
378 * to flush any remaining output at the end of the coredump before
379 * we call it a final time to mark the dump as done.
382 assert(data
== NULL
);
384 if (currently_filling_buf
!= NULL
) {
385 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
386 currently_filling_buf
= NULL
;
390 * Move the current buffer along if possible.
392 ret
= kern_dump_hw_shmem_dbg_process_buffers();
396 while (bytes_remaining
!= 0) {
398 * Make sure we have a buffer to work with.
400 while (currently_filling_buf
== NULL
) {
401 ret
= kern_dump_hw_shmem_dbg_get_buffer();
407 assert(kdp_hw_shmem_dbg_bufsize
>= currently_filling_buf
->khsd_data_length
);
408 bytes_to_copy
= MIN(bytes_remaining
, kdp_hw_shmem_dbg_bufsize
-
409 currently_filling_buf
->khsd_data_length
);
410 bcopy(data
, (void *)(currently_filling_buf
->khsd_buf
+ currently_filling_buf
->khsd_data_length
),
413 currently_filling_buf
->khsd_data_length
+= bytes_to_copy
;
415 if (currently_filling_buf
->khsd_data_length
== kdp_hw_shmem_dbg_bufsize
) {
416 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
417 currently_filling_buf
= NULL
;
420 * Move it along if possible.
422 ret
= kern_dump_hw_shmem_dbg_process_buffers();
428 bytes_remaining
-= bytes_to_copy
;
429 data
= (void *) ((uintptr_t)data
+ bytes_to_copy
);
434 #endif /* defined(__arm__) || defined(__arm64__) */
437 kern_dump_disk_proc(unsigned int request
, __unused
char *corename
,
438 uint64_t length
, void * data
)
441 uint32_t err
= kIOReturnSuccess
;
445 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, 0);
446 if (kIOReturnSuccess
!= err
) {
447 kern_coredump_log(NULL
, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err
);
450 err
= IOPolledFilePollersOpen(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
, false);
454 noffset
= *((uint64_t *) data
);
455 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
456 if (kIOReturnSuccess
!= err
) {
457 kern_coredump_log(NULL
, "IOPolledFileWrite (during seek) returned 0x%x\n", err
);
460 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, noffset
);
461 if (kIOReturnSuccess
!= err
) {
462 kern_coredump_log(NULL
, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset
, err
);
467 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, data
, length
, NULL
);
468 if (kIOReturnSuccess
!= err
) {
469 kern_coredump_log(NULL
, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n",
475 #if defined(__arm__) || defined(__arm64__)
476 /* Only supported on embedded by the underlying polled mode driver */
478 err
= IOPolledFileFlush(gIOPolledCoreFileVars
);
479 if (kIOReturnSuccess
!= err
) {
480 kern_coredump_log(NULL
, "IOPolledFileFlush() returned 0x%x\n", err
);
484 #endif /* defined(__arm__) || defined(__arm64__) */
487 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
488 if (kIOReturnSuccess
!= err
) {
489 kern_coredump_log(NULL
, "IOPolledFileWrite (during EOF) returned 0x%x\n", err
);
492 err
= IOPolledFilePollersClose(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
);
493 if (kIOReturnSuccess
!= err
) {
494 kern_coredump_log(NULL
, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err
);
504 * flushes any data to the output proc immediately
507 kdp_core_zoutput(z_streamp strm
, Bytef
*buf
, unsigned len
)
509 struct kdp_core_out_vars
* vars
= (typeof(vars
))strm
->opaque
;
514 if (vars
->error
>= 0) {
515 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
, len
, buf
)) != kIOReturnSuccess
) {
516 kern_coredump_log(NULL
, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
521 kern_coredump_log(NULL
, "100..");
528 * tries to fill the buffer with data before flushing it via the output proc.
531 kdp_core_zoutputbuf(z_streamp strm
, Bytef
*inbuf
, unsigned inlen
)
533 struct kdp_core_out_vars
* vars
= (typeof(vars
))strm
->opaque
;
540 vars
->zipped
+= inlen
;
541 flush
= (!inbuf
&& !inlen
);
543 while ((vars
->error
>= 0) && (remain
|| flush
)) {
544 chunk
= vars
->outremain
;
545 if (chunk
> remain
) {
549 bzero(&vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
551 bcopy(inbuf
, &vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
554 vars
->outremain
-= chunk
;
557 if (vars
->outremain
&& !flush
) {
560 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
,
561 vars
->outlen
- vars
->outremain
,
562 vars
->outbuf
)) != kIOReturnSuccess
) {
563 kern_coredump_log(NULL
, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
564 (vars
->outlen
- vars
->outremain
), vars
->outbuf
, ret
);
568 kern_coredump_log(NULL
, "100..");
571 vars
->outremain
= vars
->outlen
;
577 kdp_core_zinput(z_streamp strm
, Bytef
*buf
, unsigned size
)
579 struct kdp_core_out_vars
* vars
= (typeof(vars
))strm
->opaque
;
580 uint64_t percent
, total_in
= 0;
583 len
= strm
->avail_in
;
591 if (strm
->next_in
!= (Bytef
*) strm
) {
592 memcpy(buf
, strm
->next_in
, len
);
596 strm
->adler
= z_crc32(strm
->adler
, buf
, len
);
598 strm
->avail_in
-= len
;
599 strm
->next_in
+= len
;
600 strm
->total_in
+= len
;
602 if (0 == (511 & vars
->writes
++)) {
603 total_in
= strm
->total_in
;
604 kdp_core_total_size_sent_uncomp
= strm
->total_in
;
606 percent
= (total_in
* 100) / vars
->totalbytes
;
607 if ((percent
- vars
->lastpercent
) >= 10) {
608 vars
->lastpercent
= percent
;
609 kern_coredump_log(NULL
, "%lld..\n", percent
);
617 kdp_core_stream_output_chunk(struct kdp_core_out_vars
* vars
, unsigned length
, void * data
)
625 if (kdp_corezip_disabled
) {
626 (*vars
->zoutput
)(zs
, data
, length
);
628 flush
= (!length
&& !data
);
631 assert(!zs
->avail_in
);
633 while (vars
->error
>= 0) {
634 if (!zs
->avail_in
&& !flush
) {
638 zs
->next_in
= data
? data
: (Bytef
*) zs
/* zero marker */;
639 zs
->avail_in
= length
;
642 if (!zs
->avail_out
) {
643 zs
->next_out
= (Bytef
*) zs
;
644 zs
->avail_out
= UINT32_MAX
;
646 zr
= deflate(zs
, flush
? Z_FINISH
: Z_NO_FLUSH
);
647 if (Z_STREAM_END
== zr
) {
651 kern_coredump_log(NULL
, "ZERR %d\n", zr
);
657 (*vars
->zoutput
)(zs
, NULL
, 0);
665 kdp_core_output(void *kdp_core_out_vars
, uint64_t length
, void * data
)
669 enum { kMaxZLibChunk
= 1024 * 1024 * 1024 };
670 struct kdp_core_out_vars
*vars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
673 if (length
<= kMaxZLibChunk
) {
674 chunk
= (typeof(chunk
))length
;
676 chunk
= kMaxZLibChunk
;
678 err
= kdp_core_stream_output_chunk(vars
, chunk
, data
);
682 data
= (void *) (((uintptr_t) data
) + chunk
);
684 }while (length
&& (kIOReturnSuccess
== err
));
689 #if defined(__arm__) || defined(__arm64__)
690 extern pmap_paddr_t avail_start
, avail_end
;
691 extern struct vm_object pmap_object_store
;
693 extern vm_offset_t c_buffers
;
694 extern vm_size_t c_buffers_size
;
697 kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
, uintptr_t * pvphysaddr
)
700 uint64_t vincr
= PAGE_SIZE_64
;
702 assert(!(vaddr
& PAGE_MASK_64
));
704 /* VA ranges to exclude */
705 if (vaddr
== c_buffers
) {
706 /* compressor data */
708 vincr
= c_buffers_size
;
709 } else if (vaddr
== kdp_core_zmem
) {
710 /* zlib working memory */
712 vincr
= kdp_core_zsize
;
713 } else if ((kdp_core_ramdisk_addr
!= 0) && (vaddr
== kdp_core_ramdisk_addr
)) {
715 vincr
= kdp_core_ramdisk_size
;
717 #if defined(__arm64__) && defined(CONFIG_XNUPOST)
718 if (vaddr
== _COMM_HIGH_PAGE64_BASE_ADDRESS
) {
721 vincr
= _COMM_PAGE_AREA_LENGTH
;
723 #endif /* defined(__arm64__) */
724 #if defined(__arm__) || defined(__arm64__)
725 if (vaddr
== phystokv(avail_start
)) {
726 /* physical memory map */
728 vincr
= (avail_end
- avail_start
);
730 #endif /* defined(__arm__) || defined(__arm64__) */
732 ppn
= (pvphysaddr
!= NULL
?
733 pmap_find_phys(kernel_pmap
, vaddr
) :
734 pmap_find_phys_nofault(kernel_pmap
, vaddr
));
737 *pvincr
= round_page_64(vincr
);
739 if (ppn
&& pvphysaddr
) {
740 uint64_t phys
= ptoa_64(ppn
);
741 if (physmap_enclosed(phys
)) {
742 *pvphysaddr
= phystokv(phys
);
752 pmap_traverse_present_mappings(pmap_t __unused pmap
,
753 vm_map_offset_t start
,
755 pmap_traverse_callback callback
,
759 vm_map_offset_t vcurstart
, vcur
;
761 vm_map_offset_t debug_start
= trunc_page((vm_map_offset_t
) debug_buf_base
);
762 vm_map_offset_t debug_end
= round_page((vm_map_offset_t
) (debug_buf_base
+ debug_buf_size
));
763 #if defined(XNU_TARGET_OS_BRIDGE)
764 vm_map_offset_t macos_panic_start
= trunc_page((vm_map_offset_t
) macos_panic_base
);
765 vm_map_offset_t macos_panic_end
= round_page((vm_map_offset_t
) (macos_panic_base
+ macos_panic_size
));
768 boolean_t lastvavalid
;
769 #if defined(__arm__) || defined(__arm64__)
770 vm_page_t m
= VM_PAGE_NULL
;
773 #if defined(__x86_64__)
774 assert(!is_ept_pmap(pmap
));
777 /* Assumes pmap is locked, or being called from the kernel debugger */
780 return KERN_INVALID_ARGUMENT
;
785 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
);) {
788 #if defined(__arm__) || defined(__arm64__)
789 /* We're at the start of the physmap, so pull out the pagetable pages that
790 * are accessed through that region.*/
791 if (vcur
== phystokv(avail_start
) && vm_object_lock_try_shared(&pmap_object_store
)) {
792 m
= (vm_page_t
)vm_page_queue_first(&pmap_object_store
.memq
);
795 if (m
!= VM_PAGE_NULL
) {
796 vm_map_offset_t vprev
= vcur
;
797 ppn
= (ppnum_t
)atop(avail_end
);
798 while (!vm_page_queue_end(&pmap_object_store
.memq
, (vm_page_queue_entry_t
)m
)) {
799 /* Ignore pages that come from the static region and have already been dumped.*/
800 if (VM_PAGE_GET_PHYS_PAGE(m
) >= atop(avail_start
)) {
801 ppn
= VM_PAGE_GET_PHYS_PAGE(m
);
804 m
= (vm_page_t
)vm_page_queue_next(&m
->vmp_listq
);
806 vincr
= PAGE_SIZE_64
;
807 if (ppn
== atop(avail_end
)) {
808 vm_object_unlock(&pmap_object_store
);
810 // avail_end is not a valid physical address,
811 // so phystokv(avail_end) may not produce the expected result.
812 vcur
= phystokv(avail_start
) + (avail_end
- avail_start
);
814 m
= (vm_page_t
)vm_page_queue_next(&m
->vmp_listq
);
815 vcur
= phystokv(ptoa(ppn
));
818 ret
= callback(vcurstart
, vprev
, context
);
822 if (m
== VM_PAGE_NULL
) {
823 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
825 #else /* defined(__arm__) || defined(__arm64__) */
826 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
829 if (((vcur
< debug_start
) || (vcur
>= debug_end
))
830 && !(pmap_valid_page(ppn
) || bootloader_valid_page(ppn
))
831 #if defined(XNU_TARGET_OS_BRIDGE)
832 // include the macOS panic region if it's mapped
833 && ((vcur
< macos_panic_start
) || (vcur
>= macos_panic_end
))
836 /* not something we want */
843 /* Start of a new virtual region */
849 /* end of a virtual region */
850 ret
= callback(vcurstart
, vcur
, context
);
854 #if defined(__x86_64__)
855 /* Try to skip by 2MB if possible */
856 if ((vcur
& PDMASK
) == 0) {
858 pde
= pmap_pde(pmap
, vcur
);
859 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
860 /* Make sure we wouldn't overflow */
861 if (vcur
< (end
- NBPD
)) {
866 #endif /* defined(__x86_64__) */
871 if ((ret
== KERN_SUCCESS
) && lastvavalid
) {
872 /* send previous run */
873 ret
= callback(vcurstart
, vcur
, context
);
877 if (ret
== KERN_SUCCESS
) {
878 ret
= kasan_traverse_mappings(callback
, context
);
885 struct kern_dump_preflight_context
{
886 uint32_t region_count
;
887 uint64_t dumpable_bytes
;
891 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
895 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
896 IOReturn ret
= KERN_SUCCESS
;
899 kdc
->dumpable_bytes
+= (end
- start
);
905 struct kern_dump_send_seg_desc_context
{
906 core_save_segment_descriptions_cb callback
;
911 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
915 struct kern_dump_send_seg_desc_context
*kds_context
= (struct kern_dump_send_seg_desc_context
*)context
;
916 uint64_t seg_start
= (uint64_t) start
;
917 uint64_t seg_end
= (uint64_t) end
;
919 return kds_context
->callback(seg_start
, seg_end
, kds_context
->context
);
922 struct kern_dump_send_segdata_context
{
923 core_save_segment_data_cb callback
;
928 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
932 struct kern_dump_send_segdata_context
*kds_context
= (struct kern_dump_send_segdata_context
*)context
;
934 return kds_context
->callback((void *)start
, (uint64_t)(end
- start
), kds_context
->context
);
938 kern_dump_save_summary(__unused
void *refcon
, core_save_summary_cb callback
, void *context
)
940 struct kern_dump_preflight_context kdc_preflight
= { };
941 uint64_t thread_state_size
= 0, thread_count
= 0;
942 vm_map_offset_t vstart
= kdp_core_start_addr();
945 ret
= pmap_traverse_present_mappings(kernel_pmap
,
947 VM_MAX_KERNEL_ADDRESS
,
948 kern_dump_pmap_traverse_preflight_callback
,
950 if (ret
!= KERN_SUCCESS
) {
951 kern_coredump_log(context
, "save_summary: pmap traversal failed: %d\n", ret
);
955 kern_collectth_state_size(&thread_count
, &thread_state_size
);
957 ret
= callback(kdc_preflight
.region_count
, kdc_preflight
.dumpable_bytes
,
958 thread_count
, thread_state_size
, 0, context
);
963 kern_dump_save_seg_descriptions(__unused
void *refcon
, core_save_segment_descriptions_cb callback
, void *context
)
965 vm_map_offset_t vstart
= kdp_core_start_addr();
967 struct kern_dump_send_seg_desc_context kds_context
;
969 kds_context
.callback
= callback
;
970 kds_context
.context
= context
;
972 ret
= pmap_traverse_present_mappings(kernel_pmap
,
974 VM_MAX_KERNEL_ADDRESS
,
975 kern_dump_pmap_traverse_send_segdesc_callback
,
977 if (ret
!= KERN_SUCCESS
) {
978 kern_coredump_log(context
, "save_seg_desc: pmap traversal failed: %d\n", ret
);
986 kern_dump_save_thread_state(__unused
void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
)
989 uint64_t thread_state_size
= 0, thread_count
= 0;
991 kern_collectth_state_size(&thread_count
, &thread_state_size
);
993 if (thread_state_size
> 0) {
996 kern_collectth_state(current_thread(), buf
, thread_state_size
, &iter
);
998 ret
= callback(buf
, context
);
999 if (ret
!= KERN_SUCCESS
) {
1005 return KERN_SUCCESS
;
1009 kern_dump_save_sw_vers(__unused
void *refcon
, core_save_sw_vers_cb callback
, void *context
)
1011 return callback(&kdp_kernelversion_string
, sizeof(kdp_kernelversion_string
), context
);
1015 kern_dump_save_segment_data(__unused
void *refcon
, core_save_segment_data_cb callback
, void *context
)
1017 vm_map_offset_t vstart
= kdp_core_start_addr();
1019 struct kern_dump_send_segdata_context kds_context
;
1021 kds_context
.callback
= callback
;
1022 kds_context
.context
= context
;
1024 ret
= pmap_traverse_present_mappings(kernel_pmap
,
1026 VM_MAX_KERNEL_ADDRESS
, kern_dump_pmap_traverse_send_segdata_callback
, &kds_context
);
1027 if (ret
!= KERN_SUCCESS
) {
1028 kern_coredump_log(context
, "save_seg_data: pmap traversal failed: %d\n", ret
);
1032 return KERN_SUCCESS
;
1036 kdp_reset_output_vars(void *kdp_core_out_vars
, uint64_t totalbytes
)
1038 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1040 /* Re-initialize kdp_outvars */
1041 outvars
->zipped
= 0;
1042 outvars
->totalbytes
= totalbytes
;
1043 outvars
->lastpercent
= 0;
1044 outvars
->error
= kIOReturnSuccess
;
1045 outvars
->outremain
= 0;
1046 outvars
->outlen
= 0;
1047 outvars
->writes
= 0;
1048 outvars
->outbuf
= NULL
;
1050 if (outvars
->outproc
== &kdp_send_crashdump_data
) {
1052 outvars
->outbuf
= (Bytef
*) (kdp_core_zmem
+ kdp_core_zoffset
);
1053 outvars
->outremain
= outvars
->outlen
= kdp_crashdump_pkt_size
;
1056 kdp_core_total_size
= totalbytes
;
1058 /* Re-initialize zstream variables */
1059 kdp_core_zs
.avail_in
= 0;
1060 kdp_core_zs
.next_in
= NULL
;
1061 kdp_core_zs
.avail_out
= 0;
1062 kdp_core_zs
.next_out
= NULL
;
1063 kdp_core_zs
.opaque
= outvars
;
1065 deflateResetWithIO(&kdp_core_zs
, kdp_core_zinput
, outvars
->zoutput
);
1067 return KERN_SUCCESS
;
1071 kern_dump_update_header(struct kdp_core_out_vars
*outvars
)
1076 /* Write the file header -- first seek to the beginning of the file */
1078 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1079 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1080 sizeof(foffset
), &foffset
, foffset
, ret
);
1084 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, sizeof(kdp_core_header
), &kdp_core_header
)) != kIOReturnSuccess
) {
1085 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1086 sizeof(kdp_core_header
), &kdp_core_header
, ret
);
1090 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1091 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret
);
1095 #if defined(__arm__) || defined(__arm64__)
1096 if ((ret
= (outvars
->outproc
)(KDP_FLUSH
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1097 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret
);
1100 #endif /* defined(__arm__) || defined(__arm64__) */
1102 return KERN_SUCCESS
;
1106 kern_dump_record_file(void *kdp_core_out_vars
, const char *filename
, uint64_t file_offset
, uint64_t *out_file_length
)
1109 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1111 assert(kdp_core_header
.num_files
< KERN_COREDUMP_MAX_CORES
);
1112 assert(out_file_length
!= NULL
);
1113 *out_file_length
= 0;
1115 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_offset
= file_offset
;
1116 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_length
= outvars
->zipped
;
1117 strncpy((char *)&kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
, filename
,
1118 MACH_CORE_FILEHEADER_NAMELEN
);
1119 kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
[MACH_CORE_FILEHEADER_NAMELEN
- 1] = '\0';
1120 kdp_core_header
.num_files
++;
1121 kdp_core_header
.signature
= MACH_CORE_FILEHEADER_SIGNATURE
;
1123 ret
= kern_dump_update_header(outvars
);
1124 if (ret
== KERN_SUCCESS
) {
1125 *out_file_length
= outvars
->zipped
;
1132 kern_dump_seek_to_next_file(void *kdp_core_out_vars
, uint64_t next_file_offset
)
1134 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1137 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(next_file_offset
), &next_file_offset
)) != kIOReturnSuccess
) {
1138 kern_coredump_log(NULL
, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1139 sizeof(next_file_offset
), &next_file_offset
, next_file_offset
, ret
);
1146 do_kern_dump(kern_dump_output_proc outproc
, enum kern_dump_type kd_variant
)
1148 struct kdp_core_out_vars outvars
= { };
1150 char *coredump_log_start
= NULL
, *buf
= NULL
;
1151 size_t reserved_debug_logsize
= 0, prior_debug_logsize
= 0;
1152 uint64_t foffset
= 0;
1154 boolean_t output_opened
= FALSE
, dump_succeeded
= TRUE
;
1157 * Record the initial panic log buffer length so we can dump the coredump log
1158 * and panic log to disk
1160 coredump_log_start
= debug_buf_ptr
;
1161 #if defined(__arm__) || defined(__arm64__)
1162 assert(panic_info
->eph_other_log_offset
!= 0);
1163 assert(panic_info
->eph_panic_log_len
!= 0);
1164 /* Include any data from before the panic log as well */
1165 prior_debug_logsize
= (panic_info
->eph_panic_log_offset
- sizeof(struct embedded_panic_header
)) +
1166 panic_info
->eph_panic_log_len
+ panic_info
->eph_other_log_len
;
1167 #else /* defined(__arm__) || defined(__arm64__) */
1168 if (panic_info
->mph_panic_log_offset
!= 0) {
1169 prior_debug_logsize
= (panic_info
->mph_panic_log_offset
- sizeof(struct macos_panic_header
)) +
1170 panic_info
->mph_panic_log_len
+ panic_info
->mph_other_log_len
;
1172 #endif /* defined(__arm__) || defined(__arm64__) */
1174 assert(prior_debug_logsize
<= debug_buf_size
);
1176 if ((kd_variant
== KERN_DUMP_DISK
) || (kd_variant
== KERN_DUMP_STACKSHOT_DISK
)) {
1177 /* Open the file for output */
1178 if ((ret
= (*outproc
)(KDP_WRQ
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1179 kern_coredump_log(NULL
, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret
);
1180 dump_succeeded
= FALSE
;
1184 output_opened
= true;
1186 /* Initialize gzip, output context */
1187 bzero(&outvars
, sizeof(outvars
));
1188 outvars
.outproc
= outproc
;
1190 if ((kd_variant
== KERN_DUMP_DISK
) || (kd_variant
== KERN_DUMP_STACKSHOT_DISK
)) {
1191 outvars
.zoutput
= kdp_core_zoutput
;
1192 reserved_debug_logsize
= prior_debug_logsize
+ KERN_COREDUMP_MAXDEBUGLOGSIZE
;
1193 /* Space for file header, panic log, core log */
1194 foffset
= ((KERN_COREDUMP_HEADERSIZE
+ reserved_debug_logsize
+ (KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1)) \
1195 & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1));
1196 kdp_core_header
.log_offset
= KERN_COREDUMP_HEADERSIZE
;
1198 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1199 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1200 kern_coredump_log(NULL
, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1201 sizeof(foffset
), &foffset
, foffset
, ret
);
1202 dump_succeeded
= FALSE
;
1205 } else if (kd_variant
== KERN_DUMP_NET
) {
1206 assert((kdp_core_zoffset
+ kdp_crashdump_pkt_size
) <= kdp_core_zsize
);
1207 outvars
.zoutput
= kdp_core_zoutputbuf
;
1208 #if defined(__arm__) || defined(__arm64__)
1209 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1210 outvars
.zoutput
= kdp_core_zoutput
;
1211 kern_dump_hw_shmem_dbg_reset();
1215 #if defined(__arm__) || defined(__arm64__)
1219 kern_coredump_log(NULL
, "%s", (kd_variant
== KERN_DUMP_DISK
) ? "Writing local cores..." :
1220 "Transmitting kernel state, please wait:\n");
1223 #if defined(__x86_64__)
1224 if (((kd_variant
== KERN_DUMP_STACKSHOT_DISK
) || (kd_variant
== KERN_DUMP_DISK
)) && ((panic_stackshot_buf
!= 0) && (panic_stackshot_len
!= 0))) {
1225 uint64_t compressed_stackshot_len
= 0;
1227 if ((ret
= kdp_reset_output_vars(&outvars
, panic_stackshot_len
)) != KERN_SUCCESS
) {
1228 kern_coredump_log(NULL
, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len
, ret
);
1229 dump_succeeded
= FALSE
;
1230 } else if ((ret
= kdp_core_output(&outvars
, panic_stackshot_len
, (void *)panic_stackshot_buf
)) != KERN_SUCCESS
) {
1231 kern_coredump_log(NULL
, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n",
1232 panic_stackshot_len
, (void *) panic_stackshot_buf
, ret
);
1233 dump_succeeded
= FALSE
;
1234 } else if ((ret
= kdp_core_output(&outvars
, 0, NULL
)) != KERN_SUCCESS
) {
1235 kern_coredump_log(NULL
, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars
, ret
);
1236 dump_succeeded
= FALSE
;
1237 } else if ((ret
= kern_dump_record_file(&outvars
, "panic_stackshot.kcdata", foffset
, &compressed_stackshot_len
)) != KERN_SUCCESS
) {
1238 kern_coredump_log(NULL
, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret
);
1239 dump_succeeded
= FALSE
;
1241 kern_coredump_log(NULL
, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset
, compressed_stackshot_len
);
1242 foffset
= roundup((foffset
+ compressed_stackshot_len
), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
);
1243 if ((ret
= kern_dump_seek_to_next_file(&outvars
, foffset
)) != kIOReturnSuccess
) {
1244 kern_coredump_log(NULL
, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset
, ret
);
1245 dump_succeeded
= FALSE
;
1251 if (kd_variant
== KERN_DUMP_DISK
) {
1253 * Dump co-processors as well, foffset will be overwritten with the
1254 * offset of the next location in the file to be written to.
1256 if (kern_do_coredump(&outvars
, FALSE
, foffset
, &foffset
) != 0) {
1257 dump_succeeded
= FALSE
;
1259 } else if (kd_variant
!= KERN_DUMP_STACKSHOT_DISK
) {
1260 /* Only the kernel */
1261 if (kern_do_coredump(&outvars
, TRUE
, foffset
, &foffset
) != 0) {
1262 dump_succeeded
= FALSE
;
1266 if (kd_variant
== KERN_DUMP_DISK
) {
1267 assert(reserved_debug_logsize
!= 0);
1268 size_t remaining_debug_logspace
= reserved_debug_logsize
;
1270 /* Write the debug log -- first seek to the end of the corefile header */
1271 foffset
= KERN_COREDUMP_HEADERSIZE
;
1272 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1273 kern_coredump_log(NULL
, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1274 sizeof(foffset
), &foffset
, foffset
, ret
);
1275 dump_succeeded
= FALSE
;
1279 /* First flush the data from just the paniclog */
1280 size_t initial_log_length
= 0;
1281 #if defined(__arm__) || defined(__arm64__)
1282 initial_log_length
= (panic_info
->eph_panic_log_offset
- sizeof(struct embedded_panic_header
)) +
1283 panic_info
->eph_panic_log_len
;
1285 if (panic_info
->mph_panic_log_offset
!= 0) {
1286 initial_log_length
= (panic_info
->mph_panic_log_offset
- sizeof(struct macos_panic_header
)) +
1287 panic_info
->mph_panic_log_len
;
1291 buf
= debug_buf_base
;
1292 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, initial_log_length
, buf
)) != kIOReturnSuccess
) {
1293 kern_coredump_log(NULL
, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1294 initial_log_length
, buf
, ret
);
1295 dump_succeeded
= FALSE
;
1299 remaining_debug_logspace
-= initial_log_length
;
1301 /* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1302 #if defined(__arm__) || defined(__arm64__)
1303 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->eph_other_log_offset
);
1306 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1307 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1308 * we began taking a coredump.
1310 if (panic_info
->mph_other_log_offset
!= 0) {
1311 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->mph_other_log_offset
);
1313 buf
= coredump_log_start
;
1316 assert(debug_buf_ptr
>= buf
);
1318 size_t other_log_length
= debug_buf_ptr
- buf
;
1319 if (other_log_length
> remaining_debug_logspace
) {
1320 other_log_length
= remaining_debug_logspace
;
1323 /* Write the coredump log */
1324 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, other_log_length
, buf
)) != kIOReturnSuccess
) {
1325 kern_coredump_log(NULL
, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1326 other_log_length
, buf
, ret
);
1327 dump_succeeded
= FALSE
;
1331 kdp_core_header
.log_length
= initial_log_length
+ other_log_length
;
1332 kern_dump_update_header(&outvars
);
1336 /* close / last packet */
1337 if (output_opened
&& (ret
= (*outproc
)(KDP_EOF
, NULL
, 0, ((void *) 0))) != kIOReturnSuccess
) {
1338 kern_coredump_log(NULL
, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret
);
1339 dump_succeeded
= FALSE
;
1342 /* If applicable, update the panic header and flush it so we update the CRC */
1343 #if defined(__arm__) || defined(__arm64__)
1344 panic_info
->eph_panic_flags
|= (dump_succeeded
? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1345 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1348 if (panic_info
->mph_panic_log_offset
!= 0) {
1349 panic_info
->mph_panic_flags
|= (dump_succeeded
? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1350 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1355 return dump_succeeded
? 0 : -1;
1359 dumped_kernel_core(void)
1361 return kern_dump_successful
;
1365 kern_dump(enum kern_dump_type kd_variant
)
1367 static boolean_t local_dump_in_progress
= FALSE
, dumped_local
= FALSE
;
1372 if ((kd_variant
== KERN_DUMP_DISK
) || (kd_variant
== KERN_DUMP_STACKSHOT_DISK
)) {
1376 if (local_dump_in_progress
) {
1379 local_dump_in_progress
= TRUE
;
1380 #if defined(__arm__) || defined(__arm64__)
1381 if (hwsd_info
!= NULL
) {
1382 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_BUSY
;
1385 ret
= do_kern_dump(&kern_dump_disk_proc
, kd_variant
);
1387 dumped_local
= TRUE
;
1388 kern_dump_successful
= TRUE
;
1389 local_dump_in_progress
= FALSE
;
1393 #if defined(__arm__) || defined(__arm64__)
1394 } else if (kd_variant
== KERN_DUMP_HW_SHMEM_DBG
) {
1395 ret
= do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc
, KERN_DUMP_HW_SHMEM_DBG
);
1397 kern_dump_successful
= TRUE
;
1402 ret
= do_kern_dump(&kdp_send_crashdump_data
, KERN_DUMP_NET
);
1404 kern_dump_successful
= TRUE
;
1410 #if defined(__arm__) || defined(__arm64__)
1412 panic_spin_shmcon(void)
1414 if (!PE_i_can_has_debugger(NULL
)) {
1418 if (hwsd_info
== NULL
) {
1419 kern_coredump_log(NULL
, "handshake structure not initialized\n");
1423 kern_coredump_log(NULL
, "\nPlease go to https://panic.apple.com to report this panic\n");
1424 kern_coredump_log(NULL
, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1425 hwsd_info
, (void *)kvtophys((vm_offset_t
)hwsd_info
));
1427 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1428 hwsd_info
->xhsdci_seq_no
= 0;
1429 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1432 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1433 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BEGIN
) {
1434 kern_dump(KERN_DUMP_HW_SHMEM_DBG
);
1437 if ((hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_REMOTE_DONE
) ||
1438 (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
)) {
1439 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1440 hwsd_info
->xhsdci_seq_no
= 0;
1441 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1445 #endif /* defined(__arm__) || defined(__arm64__) */
1448 kdp_core_zalloc(void * __unused ref
, u_int items
, u_int size
)
1452 result
= (void *) (kdp_core_zmem
+ kdp_core_zoffset
);
1453 kdp_core_zoffset
+= ~31L & (31 + (items
* size
)); // 32b align for vector crc
1454 assert(kdp_core_zoffset
<= kdp_core_zsize
);
1460 kdp_core_zfree(void * __unused ref
, void * __unused ptr
)
1465 #if defined(__arm__) || defined(__arm64__)
1466 #define LEVEL Z_BEST_SPEED
1469 #define LEVEL Z_BEST_SPEED
1479 #if defined(__arm__) || defined(__arm64__)
1481 vm_offset_t kdp_core_hw_shmem_buf
= 0;
1482 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
;
1483 cache_info_t
*cpuid_cache_info
= NULL
;
1484 #endif /* defined(__arm__) || defined(__arm64__) */
1485 kern_coredump_callback_config core_config
= { };
1487 if (kdp_core_zs
.zalloc
) {
1490 kdp_core_zsize
= round_page(NETBUF
+ zlib_deflate_memory_size(wbits
, memlevel
));
1491 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize
);
1492 kr
= kmem_alloc(kernel_map
, &kdp_core_zmem
, kdp_core_zsize
, VM_KERN_MEMORY_DIAG
);
1493 assert(KERN_SUCCESS
== kr
);
1495 kdp_core_zoffset
= 0;
1496 kdp_core_zs
.zalloc
= kdp_core_zalloc
;
1497 kdp_core_zs
.zfree
= kdp_core_zfree
;
1499 if (deflateInit2(&kdp_core_zs
, LEVEL
, Z_DEFLATED
,
1500 wbits
+ 16 /*gzip mode*/, memlevel
, Z_DEFAULT_STRATEGY
)) {
1501 /* Allocation failed */
1502 bzero(&kdp_core_zs
, sizeof(kdp_core_zs
));
1503 kdp_core_zoffset
= 0;
1506 bzero(&kdp_core_header
, sizeof(kdp_core_header
));
1508 core_config
.kcc_coredump_init
= NULL
; /* TODO: consider doing mmu flush from an init function */
1509 core_config
.kcc_coredump_get_summary
= kern_dump_save_summary
;
1510 core_config
.kcc_coredump_save_segment_descriptions
= kern_dump_save_seg_descriptions
;
1511 core_config
.kcc_coredump_save_thread_state
= kern_dump_save_thread_state
;
1512 core_config
.kcc_coredump_save_sw_vers
= kern_dump_save_sw_vers
;
1513 core_config
.kcc_coredump_save_segment_data
= kern_dump_save_segment_data
;
1514 core_config
.kcc_coredump_save_misc_data
= NULL
;
1516 kr
= kern_register_xnu_coredump_helper(&core_config
);
1517 assert(KERN_SUCCESS
== kr
);
1519 #if defined(__arm__) || defined(__arm64__)
1520 if (!PE_consistent_debug_enabled()) {
1524 if (!PE_i_can_has_debugger(NULL
)) {
1529 * We need to allocate physically contiguous memory since astris isn't capable
1530 * of doing address translations while the CPUs are running.
1532 kdp_hw_shmem_dbg_bufsize
= KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
;
1533 kr
= kmem_alloc_contig(kernel_map
, &kdp_core_hw_shmem_buf
,
1534 kdp_hw_shmem_dbg_bufsize
, VM_MAP_PAGE_MASK(kernel_map
),
1535 0, 0, KMA_KOBJECT
, VM_KERN_MEMORY_DIAG
);
1536 assert(KERN_SUCCESS
== kr
);
1539 * Put the connection info structure at the beginning of this buffer and adjust
1540 * the buffer size accordingly.
1542 hwsd_info
= (struct xnu_hw_shmem_dbg_command_info
*) kdp_core_hw_shmem_buf
;
1543 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_NONE
;
1544 hwsd_info
->xhsdci_seq_no
= 0;
1545 hwsd_info
->xhsdci_buf_phys_addr
= 0;
1546 hwsd_info
->xhsdci_buf_data_length
= 0;
1547 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
1548 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
1549 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
1551 cpuid_cache_info
= cache_info();
1552 assert(cpuid_cache_info
!= NULL
);
1554 kdp_core_hw_shmem_buf
+= sizeof(*hwsd_info
);
1555 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1556 kdp_core_hw_shmem_buf
= ROUNDUP(kdp_core_hw_shmem_buf
, (uint64_t) cpuid_cache_info
->c_linesz
);
1557 kdp_hw_shmem_dbg_bufsize
-= (uint32_t) (kdp_core_hw_shmem_buf
- (vm_offset_t
) hwsd_info
);
1558 kdp_hw_shmem_dbg_bufsize
/= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
;
1559 /* The buffer size should be a cache-line length multiple */
1560 kdp_hw_shmem_dbg_bufsize
-= (kdp_hw_shmem_dbg_bufsize
% ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE
, cpuid_cache_info
->c_linesz
));
1562 STAILQ_INIT(&free_hw_shmem_dbg_bufs
);
1563 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush
);
1565 for (i
= 0; i
< KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
; i
++) {
1566 cur_elm
= zalloc_permanent_type(typeof(*cur_elm
));
1567 assert(cur_elm
!= NULL
);
1569 cur_elm
->khsd_buf
= kdp_core_hw_shmem_buf
;
1570 cur_elm
->khsd_data_length
= 0;
1572 kdp_core_hw_shmem_buf
+= kdp_hw_shmem_dbg_bufsize
;
1574 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
1577 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
* NSEC_PER_SEC
,
1578 &kdp_hw_shmem_dbg_contact_deadline_interval
);
1580 PE_consistent_debug_register(kDbgIdAstrisConnection
, kvtophys((vm_offset_t
) hwsd_info
), sizeof(pmap_paddr_t
));
1581 PE_consistent_debug_register(kDbgIdAstrisConnectionVers
, CUR_XNU_HWSDCI_STRUCT_VERS
, sizeof(uint32_t));
1582 #endif /* defined(__arm__) || defined(__arm64__) */
1585 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */