2 * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <kdp/processor_core.h>
45 #include <IOKit/IOPolledInterface.h>
46 #include <IOKit/IOBSD.h>
47 #include <sys/errno.h>
48 #include <sys/msgbuf.h>
49 #include <san/kasan.h>
51 #if defined(__x86_64__)
52 #include <i386/pmap_internal.h>
53 #include <kdp/ml/i386/kdp_x86_common.h>
54 #include <kern/debug.h>
55 #endif /* defined(__x86_64__) */
58 #include <arm/cpuid.h>
59 #include <arm/caches_internal.h>
60 #include <pexpert/arm/consistent_debug.h>
63 #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
66 #if !defined(ROUNDDOWN)
67 #define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
69 #endif /* CONFIG_EMBEDDED */
71 typedef int (*pmap_traverse_callback
)(vm_map_offset_t start
,
75 extern int pmap_traverse_present_mappings(pmap_t pmap
,
76 vm_map_offset_t start
,
78 pmap_traverse_callback callback
,
81 static int kern_dump_save_summary(void *refcon
, core_save_summary_cb callback
, void *context
);
82 static int kern_dump_save_seg_descriptions(void *refcon
, core_save_segment_descriptions_cb callback
, void *context
);
83 static int kern_dump_save_thread_state(void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
);
84 static int kern_dump_save_sw_vers(void *refcon
, core_save_sw_vers_cb callback
, void *context
);
85 static int kern_dump_save_segment_data(void *refcon
, core_save_segment_data_cb callback
, void *context
);
88 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
92 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
97 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
101 struct kdp_core_out_vars
;
102 typedef int (*kern_dump_output_proc
)(unsigned int request
, char *corename
,
103 uint64_t length
, void *panic_data
);
105 struct kdp_core_out_vars
107 kern_dump_output_proc outproc
;
108 z_output_func zoutput
;
111 uint64_t lastpercent
;
119 extern uint32_t kdp_crashdump_pkt_size
;
121 static vm_offset_t kdp_core_zmem
;
122 static size_t kdp_core_zsize
;
123 static size_t kdp_core_zoffset
;
124 static z_stream kdp_core_zs
;
126 static uint64_t kdp_core_total_size
;
127 static uint64_t kdp_core_total_size_sent_uncomp
;
129 struct xnu_hw_shmem_dbg_command_info
*hwsd_info
= NULL
;
131 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
132 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
135 * Astris can read up to 4064 bytes at a time over
136 * the probe, so we should try to make our buffer
137 * size a multiple of this to make reads by astris
138 * (the bottleneck) most efficient.
140 #define OPTIMAL_ASTRIS_READSIZE 4064
142 struct kdp_hw_shmem_dbg_buf_elm
{
143 vm_offset_t khsd_buf
;
144 uint32_t khsd_data_length
;
145 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm
) khsd_elms
;
148 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) free_hw_shmem_dbg_bufs
=
149 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs
);
150 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) hw_shmem_dbg_bufs_to_flush
=
151 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush
);
153 static struct kdp_hw_shmem_dbg_buf_elm
*currently_filling_buf
= NULL
;
154 static struct kdp_hw_shmem_dbg_buf_elm
*currently_flushing_buf
= NULL
;
156 static uint32_t kdp_hw_shmem_dbg_bufsize
= 0;
158 static uint32_t kdp_hw_shmem_dbg_seq_no
= 0;
159 static uint64_t kdp_hw_shmem_dbg_contact_deadline
= 0;
160 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval
= 0;
162 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
163 #endif /* CONFIG_EMBEDDED */
165 static boolean_t kern_dump_successful
= FALSE
;
167 struct mach_core_fileheader kdp_core_header
= { };
170 * These variables will be modified by the BSD layer if the root device is
173 uint64_t kdp_core_ramdisk_addr
= 0;
174 uint64_t kdp_core_ramdisk_size
= 0;
176 boolean_t
kdp_has_polled_corefile(void)
178 return (NULL
!= gIOPolledCoreFileVars
);
183 * Whenever we start a coredump, make sure the buffers
184 * are all on the free queue and the state is as expected.
185 * The buffers may have been left in a different state if
186 * a previous coredump attempt failed.
189 kern_dump_hw_shmem_dbg_reset()
191 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
, *tmp_elm
= NULL
;
193 STAILQ_FOREACH(cur_elm
, &free_hw_shmem_dbg_bufs
, khsd_elms
) {
194 cur_elm
->khsd_data_length
= 0;
197 if (currently_filling_buf
!= NULL
) {
198 currently_filling_buf
->khsd_data_length
= 0;
200 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_filling_buf
, khsd_elms
);
201 currently_filling_buf
= NULL
;
204 if (currently_flushing_buf
!= NULL
) {
205 currently_flushing_buf
->khsd_data_length
= 0;
207 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
208 currently_flushing_buf
= NULL
;
211 STAILQ_FOREACH_SAFE(cur_elm
, &hw_shmem_dbg_bufs_to_flush
, khsd_elms
, tmp_elm
) {
212 cur_elm
->khsd_data_length
= 0;
214 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush
, cur_elm
, kdp_hw_shmem_dbg_buf_elm
, khsd_elms
);
215 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
218 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_EMPTY
;
219 kdp_hw_shmem_dbg_seq_no
= 0;
220 hwsd_info
->xhsdci_buf_phys_addr
= 0;
221 hwsd_info
->xhsdci_buf_data_length
= 0;
222 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
223 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
224 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
225 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
227 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval
;
231 * Tries to move buffers forward in 'progress'. If
232 * the hardware debugger is done consuming the current buffer, we
233 * can put the next one on it and move the current
234 * buffer back to the free queue.
237 kern_dump_hw_shmem_dbg_process_buffers()
239 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
240 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
) {
241 kern_coredump_log(NULL
, "Detected remote error, terminating...\n");
243 } else if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BUF_EMPTY
) {
244 if (hwsd_info
->xhsdci_seq_no
!= (kdp_hw_shmem_dbg_seq_no
+ 1)) {
245 kern_coredump_log(NULL
, "Detected stale/invalid seq num. Expected: %d, received %d\n",
246 (kdp_hw_shmem_dbg_seq_no
+ 1), hwsd_info
->xhsdci_seq_no
);
247 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
248 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
252 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
254 if (currently_flushing_buf
!= NULL
) {
255 currently_flushing_buf
->khsd_data_length
= 0;
256 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
259 currently_flushing_buf
= STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush
);
260 if (currently_flushing_buf
!= NULL
) {
261 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush
, khsd_elms
);
263 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
264 hwsd_info
->xhsdci_buf_phys_addr
= kvtophys(currently_flushing_buf
->khsd_buf
);
265 hwsd_info
->xhsdci_buf_data_length
= currently_flushing_buf
->khsd_data_length
;
266 hwsd_info
->xhsdci_coredump_total_size_uncomp
= kdp_core_total_size
;
267 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= kdp_core_total_size_sent_uncomp
;
268 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
);
269 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
270 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_READY
;
271 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
274 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() +
275 kdp_hw_shmem_dbg_contact_deadline_interval
;
278 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline
) {
279 kern_coredump_log(NULL
, "Kernel timed out waiting for hardware debugger to update handshake structure.");
280 kern_coredump_log(NULL
, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
);
282 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
283 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
291 * Populates currently_filling_buf with a new buffer
292 * once one becomes available. Returns 0 on success
293 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
294 * if it is non-zero (an error).
297 kern_dump_hw_shmem_dbg_get_buffer()
301 assert(currently_filling_buf
== NULL
);
303 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs
)) {
304 ret
= kern_dump_hw_shmem_dbg_process_buffers();
310 currently_filling_buf
= STAILQ_FIRST(&free_hw_shmem_dbg_bufs
);
311 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs
, khsd_elms
);
313 assert(currently_filling_buf
->khsd_data_length
== 0);
318 * Output procedure for hardware shared memory core dumps
320 * Tries to fill up the buffer completely before flushing
323 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request
, __unused
char *corename
,
324 uint64_t length
, void * data
)
328 assert(length
< UINT32_MAX
);
329 uint32_t bytes_remaining
= (uint32_t) length
;
330 uint32_t bytes_to_copy
;
332 if (request
== KDP_EOF
) {
333 assert(currently_filling_buf
== NULL
);
336 * Wait until we've flushed all the buffers
337 * before setting the connection status to done.
339 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush
) ||
340 currently_flushing_buf
!= NULL
) {
341 ret
= kern_dump_hw_shmem_dbg_process_buffers();
348 * If the last status we saw indicates that the buffer was
349 * empty and we didn't flush any new data since then, we expect
350 * the sequence number to still match the last we saw.
352 if (hwsd_info
->xhsdci_seq_no
< kdp_hw_shmem_dbg_seq_no
) {
353 kern_coredump_log(NULL
, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
354 kdp_hw_shmem_dbg_seq_no
, hwsd_info
->xhsdci_seq_no
);
358 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
360 kern_coredump_log(NULL
, "Setting coredump status as done!\n");
361 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
362 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_STATUS_DONE
;
363 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
368 assert(request
== KDP_DATA
);
371 * The output procedure is called with length == 0 and data == NULL
372 * to flush any remaining output at the end of the coredump before
373 * we call it a final time to mark the dump as done.
376 assert(data
== NULL
);
378 if (currently_filling_buf
!= NULL
) {
379 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
380 currently_filling_buf
= NULL
;
384 * Move the current buffer along if possible.
386 ret
= kern_dump_hw_shmem_dbg_process_buffers();
390 while (bytes_remaining
!= 0) {
392 * Make sure we have a buffer to work with.
394 while (currently_filling_buf
== NULL
) {
395 ret
= kern_dump_hw_shmem_dbg_get_buffer();
401 assert(kdp_hw_shmem_dbg_bufsize
>= currently_filling_buf
->khsd_data_length
);
402 bytes_to_copy
= MIN(bytes_remaining
, kdp_hw_shmem_dbg_bufsize
-
403 currently_filling_buf
->khsd_data_length
);
404 bcopy(data
, (void *)(currently_filling_buf
->khsd_buf
+ currently_filling_buf
->khsd_data_length
),
407 currently_filling_buf
->khsd_data_length
+= bytes_to_copy
;
409 if (currently_filling_buf
->khsd_data_length
== kdp_hw_shmem_dbg_bufsize
) {
410 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
411 currently_filling_buf
= NULL
;
414 * Move it along if possible.
416 ret
= kern_dump_hw_shmem_dbg_process_buffers();
422 bytes_remaining
-= bytes_to_copy
;
423 data
= (void *) ((uintptr_t)data
+ bytes_to_copy
);
428 #endif /* CONFIG_EMBEDDED */
431 kern_dump_disk_proc(unsigned int request
, __unused
char *corename
,
432 uint64_t length
, void * data
)
435 uint32_t err
= kIOReturnSuccess
;
440 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, 0);
441 if (kIOReturnSuccess
!= err
) {
442 kern_coredump_log(NULL
, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err
);
445 err
= IOPolledFilePollersOpen(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
, false);
449 noffset
= *((uint64_t *) data
);
450 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
451 if (kIOReturnSuccess
!= err
) {
452 kern_coredump_log(NULL
, "IOPolledFileWrite (during seek) returned 0x%x\n", err
);
455 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, noffset
);
456 if (kIOReturnSuccess
!= err
) {
457 kern_coredump_log(NULL
, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset
, err
);
462 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, data
, length
, NULL
);
463 if (kIOReturnSuccess
!= err
) {
464 kern_coredump_log(NULL
, "IOPolledFileWrite(gIOPolledCoreFileVars, 0x%p, 0x%llx, NULL) returned 0x%x\n",
471 /* Only supported on embedded by the underlying polled mode driver */
473 err
= IOPolledFileFlush(gIOPolledCoreFileVars
);
474 if (kIOReturnSuccess
!= err
) {
475 kern_coredump_log(NULL
, "IOPolledFileFlush() returned 0x%x\n", err
);
482 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
483 if (kIOReturnSuccess
!= err
) {
484 kern_coredump_log(NULL
, "IOPolledFileWrite (during EOF) returned 0x%x\n", err
);
487 err
= IOPolledFilePollersClose(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
);
488 if (kIOReturnSuccess
!= err
) {
489 kern_coredump_log(NULL
, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err
);
499 * flushes any data to the output proc immediately
502 kdp_core_zoutput(z_streamp strm
, Bytef
*buf
, unsigned len
)
504 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
509 if (vars
->error
>= 0)
511 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
, len
, buf
)) != kIOReturnSuccess
)
513 kern_coredump_log(NULL
, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
517 if (!buf
&& !len
) kern_coredump_log(NULL
, "100..");
523 * tries to fill the buffer with data before flushing it via the output proc.
526 kdp_core_zoutputbuf(z_streamp strm
, Bytef
*inbuf
, unsigned inlen
)
528 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
535 vars
->zipped
+= inlen
;
536 flush
= (!inbuf
&& !inlen
);
538 while ((vars
->error
>= 0) && (remain
|| flush
))
540 chunk
= vars
->outremain
;
541 if (chunk
> remain
) chunk
= remain
;
542 if (!inbuf
) bzero(&vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
545 bcopy(inbuf
, &vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
548 vars
->outremain
-= chunk
;
551 if (vars
->outremain
&& !flush
) break;
552 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
,
553 vars
->outlen
- vars
->outremain
,
554 vars
->outbuf
)) != kIOReturnSuccess
)
556 kern_coredump_log(NULL
, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, 0x%p) returned 0x%x\n",
557 (vars
->outlen
- vars
->outremain
), vars
->outbuf
, ret
);
562 kern_coredump_log(NULL
, "100..");
565 vars
->outremain
= vars
->outlen
;
571 kdp_core_zinput(z_streamp strm
, Bytef
*buf
, unsigned size
)
573 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
574 uint64_t percent
, total_in
= 0;
577 len
= strm
->avail_in
;
578 if (len
> size
) len
= size
;
579 if (len
== 0) return 0;
581 if (strm
->next_in
!= (Bytef
*) strm
) memcpy(buf
, strm
->next_in
, len
);
582 else bzero(buf
, len
);
583 strm
->adler
= z_crc32(strm
->adler
, buf
, len
);
585 strm
->avail_in
-= len
;
586 strm
->next_in
+= len
;
587 strm
->total_in
+= len
;
589 if (0 == (511 & vars
->writes
++))
591 total_in
= strm
->total_in
;
592 kdp_core_total_size_sent_uncomp
= strm
->total_in
;
594 percent
= (total_in
* 100) / vars
->totalbytes
;
595 if ((percent
- vars
->lastpercent
) >= 10)
597 vars
->lastpercent
= percent
;
598 kern_coredump_log(NULL
, "%lld..\n", percent
);
606 kdp_core_stream_output_chunk(struct kdp_core_out_vars
* vars
, unsigned length
, void * data
)
614 if (kdp_corezip_disabled
)
616 (*vars
->zoutput
)(zs
, data
, length
);
621 flush
= (!length
&& !data
);
624 assert(!zs
->avail_in
);
626 while (vars
->error
>= 0)
628 if (!zs
->avail_in
&& !flush
)
631 zs
->next_in
= data
? data
: (Bytef
*) zs
/* zero marker */;
632 zs
->avail_in
= length
;
637 zs
->next_out
= (Bytef
*) zs
;
638 zs
->avail_out
= UINT32_MAX
;
640 zr
= deflate(zs
, flush
? Z_FINISH
: Z_NO_FLUSH
);
641 if (Z_STREAM_END
== zr
) break;
644 kern_coredump_log(NULL
, "ZERR %d\n", zr
);
649 if (flush
) (*vars
->zoutput
)(zs
, NULL
, 0);
652 return (vars
->error
);
656 kdp_core_output(void *kdp_core_out_vars
, uint64_t length
, void * data
)
660 enum { kMaxZLibChunk
= 1024*1024*1024 };
661 struct kdp_core_out_vars
*vars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
665 if (length
<= kMaxZLibChunk
) chunk
= (typeof(chunk
)) length
;
666 else chunk
= kMaxZLibChunk
;
667 err
= kdp_core_stream_output_chunk(vars
, chunk
, data
);
670 if (data
) data
= (void *) (((uintptr_t) data
) + chunk
);
672 while (length
&& (kIOReturnSuccess
== err
));
677 #if defined(__arm__) || defined(__arm64__)
678 extern pmap_paddr_t avail_start
, avail_end
;
679 extern struct vm_object pmap_object_store
;
681 extern vm_offset_t c_buffers
;
682 extern vm_size_t c_buffers_size
;
685 kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
, uintptr_t * pvphysaddr
)
688 uint64_t vincr
= PAGE_SIZE_64
;
690 assert(!(vaddr
& PAGE_MASK_64
));
692 /* VA ranges to exclude */
693 if (vaddr
== c_buffers
)
695 /* compressor data */
697 vincr
= c_buffers_size
;
699 else if (vaddr
== kdp_core_zmem
)
701 /* zlib working memory */
703 vincr
= kdp_core_zsize
;
705 else if ((kdp_core_ramdisk_addr
!= 0) && (vaddr
== kdp_core_ramdisk_addr
))
708 vincr
= kdp_core_ramdisk_size
;
711 #if defined(__arm64__)
712 if (vaddr
== _COMM_HIGH_PAGE64_BASE_ADDRESS
)
716 vincr
= _COMM_PAGE_AREA_LENGTH
;
719 #endif /* defined(__arm64__) */
720 #if defined(__arm__) || defined(__arm64__)
721 if (vaddr
== phystokv(avail_start
))
723 /* physical memory map */
725 vincr
= (avail_end
- avail_start
);
728 #endif /* defined(__arm__) || defined(__arm64__) */
729 ppn
= pmap_find_phys(kernel_pmap
, vaddr
);
731 *pvincr
= round_page_64(vincr
);
733 if (ppn
&& pvphysaddr
)
735 uint64_t phys
= ptoa_64(ppn
);
736 #if defined(__arm__) || defined(__arm64__)
737 if (isphysmem(phys
)) *pvphysaddr
= phystokv(phys
);
739 if (physmap_enclosed(phys
)) *pvphysaddr
= (uintptr_t)PHYSMAP_PTOV(phys
);
748 pmap_traverse_present_mappings(pmap_t __unused pmap
,
749 vm_map_offset_t start
,
751 pmap_traverse_callback callback
,
755 vm_map_offset_t vcurstart
, vcur
;
757 vm_map_offset_t debug_start
;
758 vm_map_offset_t debug_end
;
759 boolean_t lastvavalid
;
760 #if defined(__arm__) || defined(__arm64__)
761 vm_page_t m
= VM_PAGE_NULL
;
764 debug_start
= trunc_page((vm_map_offset_t
) debug_buf_base
);
765 debug_end
= round_page((vm_map_offset_t
) (debug_buf_base
+ debug_buf_size
));
767 #if defined(__x86_64__)
768 assert(!is_ept_pmap(pmap
));
771 /* Assumes pmap is locked, or being called from the kernel debugger */
773 if (start
> end
) return (KERN_INVALID_ARGUMENT
);
777 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
780 #if defined(__arm__) || defined(__arm64__)
781 /* We're at the start of the physmap, so pull out the pagetable pages that
782 * are accessed through that region.*/
783 if (vcur
== phystokv(avail_start
) && vm_object_lock_try_shared(&pmap_object_store
))
784 m
= (vm_page_t
)vm_page_queue_first(&pmap_object_store
.memq
);
786 if (m
!= VM_PAGE_NULL
)
788 vm_map_offset_t vprev
= vcur
;
789 ppn
= (ppnum_t
)atop(avail_end
);
790 while (!vm_page_queue_end(&pmap_object_store
.memq
, (vm_page_queue_entry_t
)m
))
792 /* Ignore pages that come from the static region and have already been dumped.*/
793 if (VM_PAGE_GET_PHYS_PAGE(m
) >= atop(avail_start
))
795 ppn
= VM_PAGE_GET_PHYS_PAGE(m
);
798 m
= (vm_page_t
)vm_page_queue_next(&m
->listq
);
800 vcur
= phystokv(ptoa(ppn
));
803 ret
= callback(vcurstart
, vprev
, context
);
806 vincr
= PAGE_SIZE_64
;
807 if (ppn
== atop(avail_end
))
809 vm_object_unlock(&pmap_object_store
);
813 m
= (vm_page_t
)vm_page_queue_next(&m
->listq
);
815 if (m
== VM_PAGE_NULL
)
816 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
817 #else /* defined(__arm__) || defined(__arm64__) */
818 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
822 if (((vcur
< debug_start
) || (vcur
>= debug_end
))
823 && !(EFI_VALID_PAGE(ppn
) ||
824 pmap_valid_page(ppn
)))
826 /* not something we want */
833 /* Start of a new virtual region */
839 /* end of a virtual region */
840 ret
= callback(vcurstart
, vcur
, context
);
844 #if defined(__x86_64__)
845 /* Try to skip by 2MB if possible */
846 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
848 pde
= pmap_pde(pmap
, vcur
);
849 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
850 /* Make sure we wouldn't overflow */
851 if (vcur
< (end
- NBPD
)) {
856 #endif /* defined(__x86_64__) */
861 if ((ret
== KERN_SUCCESS
) && lastvavalid
) {
862 /* send previous run */
863 ret
= callback(vcurstart
, vcur
, context
);
868 struct kern_dump_preflight_context
870 uint32_t region_count
;
871 uint64_t dumpable_bytes
;
875 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
879 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
880 IOReturn ret
= KERN_SUCCESS
;
883 kdc
->dumpable_bytes
+= (end
- start
);
889 struct kern_dump_send_seg_desc_context
891 core_save_segment_descriptions_cb callback
;
896 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start
,
900 struct kern_dump_send_seg_desc_context
*kds_context
= (struct kern_dump_send_seg_desc_context
*)context
;
901 uint64_t seg_start
= (uint64_t) start
;
902 uint64_t seg_end
= (uint64_t) end
;
904 return kds_context
->callback(seg_start
, seg_end
, kds_context
->context
);
907 struct kern_dump_send_segdata_context
909 core_save_segment_data_cb callback
;
914 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
918 struct kern_dump_send_segdata_context
*kds_context
= (struct kern_dump_send_segdata_context
*)context
;
920 return kds_context
->callback((void *)start
, (uint64_t)(end
- start
), kds_context
->context
);
924 kern_dump_save_summary(__unused
void *refcon
, core_save_summary_cb callback
, void *context
)
926 struct kern_dump_preflight_context kdc_preflight
= { };
927 uint64_t thread_state_size
= 0, thread_count
= 0;
930 ret
= pmap_traverse_present_mappings(kernel_pmap
,
931 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
932 VM_MAX_KERNEL_ADDRESS
,
933 kern_dump_pmap_traverse_preflight_callback
,
935 if (ret
!= KERN_SUCCESS
) {
936 kern_coredump_log(context
, "save_summary: pmap traversal failed: %d\n", ret
);
940 kern_collectth_state_size(&thread_count
, &thread_state_size
);
942 ret
= callback(kdc_preflight
.region_count
, kdc_preflight
.dumpable_bytes
,
943 thread_count
, thread_state_size
, 0, context
);
948 kern_dump_save_seg_descriptions(__unused
void *refcon
, core_save_segment_descriptions_cb callback
, void *context
)
951 struct kern_dump_send_seg_desc_context kds_context
;
953 kds_context
.callback
= callback
;
954 kds_context
.context
= context
;
956 ret
= pmap_traverse_present_mappings(kernel_pmap
,
957 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
958 VM_MAX_KERNEL_ADDRESS
,
959 kern_dump_pmap_traverse_send_segdesc_callback
,
961 if (ret
!= KERN_SUCCESS
) {
962 kern_coredump_log(context
, "save_seg_desc: pmap traversal failed: %d\n", ret
);
970 kern_dump_save_thread_state(__unused
void *refcon
, void *buf
, core_save_thread_state_cb callback
, void *context
)
973 uint64_t thread_state_size
= 0, thread_count
= 0;
975 kern_collectth_state_size(&thread_count
, &thread_state_size
);
977 if (thread_state_size
> 0) {
980 kern_collectth_state (current_thread(), buf
, thread_state_size
, &iter
);
982 ret
= callback(buf
, context
);
983 if (ret
!= KERN_SUCCESS
) {
993 kern_dump_save_sw_vers(__unused
void *refcon
, core_save_sw_vers_cb callback
, void *context
)
995 return callback(&kdp_kernelversion_string
, sizeof(kdp_kernelversion_string
), context
);
999 kern_dump_save_segment_data(__unused
void *refcon
, core_save_segment_data_cb callback
, void *context
)
1002 struct kern_dump_send_segdata_context kds_context
;
1004 kds_context
.callback
= callback
;
1005 kds_context
.context
= context
;
1007 ret
= pmap_traverse_present_mappings(kernel_pmap
,
1008 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1009 VM_MAX_KERNEL_ADDRESS
, kern_dump_pmap_traverse_send_segdata_callback
, &kds_context
);
1010 if (ret
!= KERN_SUCCESS
) {
1011 kern_coredump_log(context
, "save_seg_data: pmap traversal failed: %d\n", ret
);
1015 return KERN_SUCCESS
;
1019 kdp_reset_output_vars(void *kdp_core_out_vars
, uint64_t totalbytes
)
1021 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1023 /* Re-initialize kdp_outvars */
1024 outvars
->zipped
= 0;
1025 outvars
->totalbytes
= totalbytes
;
1026 outvars
->lastpercent
= 0;
1027 outvars
->error
= kIOReturnSuccess
;
1028 outvars
->outremain
= 0;
1029 outvars
->outlen
= 0;
1030 outvars
->writes
= 0;
1031 outvars
->outbuf
= NULL
;
1033 if (outvars
->outproc
== &kdp_send_crashdump_data
) {
1035 outvars
->outbuf
= (Bytef
*) (kdp_core_zmem
+ kdp_core_zoffset
);
1036 outvars
->outremain
= outvars
->outlen
= kdp_crashdump_pkt_size
;
1039 kdp_core_total_size
= totalbytes
;
1041 /* Re-initialize zstream variables */
1042 kdp_core_zs
.avail_in
= 0;
1043 kdp_core_zs
.next_in
= NULL
;
1044 kdp_core_zs
.avail_out
= 0;
1045 kdp_core_zs
.next_out
= NULL
;
1046 kdp_core_zs
.opaque
= outvars
;
1048 deflateResetWithIO(&kdp_core_zs
, kdp_core_zinput
, outvars
->zoutput
);
1050 return KERN_SUCCESS
;
1054 kern_dump_update_header(struct kdp_core_out_vars
*outvars
)
1059 /* Write the file header -- first seek to the beginning of the file */
1061 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1062 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1063 sizeof(foffset
), &foffset
, foffset
, ret
);
1067 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, sizeof(kdp_core_header
), &kdp_core_header
)) != kIOReturnSuccess
) {
1068 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1069 sizeof(kdp_core_header
), &kdp_core_header
, ret
);
1073 if ((ret
= (outvars
->outproc
)(KDP_DATA
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1074 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret
);
1079 if ((ret
= (outvars
->outproc
)(KDP_FLUSH
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1080 kern_coredump_log(NULL
, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret
);
1085 return KERN_SUCCESS
;
1089 kern_dump_record_file(void *kdp_core_out_vars
, const char *filename
, uint64_t file_offset
, uint64_t *out_file_length
)
1092 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1094 assert(kdp_core_header
.num_files
< KERN_COREDUMP_MAX_CORES
);
1095 assert(out_file_length
!= NULL
);
1096 *out_file_length
= 0;
1098 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_offset
= file_offset
;
1099 kdp_core_header
.files
[kdp_core_header
.num_files
].gzip_length
= outvars
->zipped
;
1100 strncpy((char *)&kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
, filename
,
1101 MACH_CORE_FILEHEADER_NAMELEN
);
1102 kdp_core_header
.files
[kdp_core_header
.num_files
].core_name
[MACH_CORE_FILEHEADER_NAMELEN
- 1] = '\0';
1103 kdp_core_header
.num_files
++;
1104 kdp_core_header
.signature
= MACH_CORE_FILEHEADER_SIGNATURE
;
1106 ret
= kern_dump_update_header(outvars
);
1107 if (ret
== KERN_SUCCESS
) {
1108 *out_file_length
= outvars
->zipped
;
1115 kern_dump_seek_to_next_file(void *kdp_core_out_vars
, uint64_t next_file_offset
)
1117 struct kdp_core_out_vars
*outvars
= (struct kdp_core_out_vars
*)kdp_core_out_vars
;
1120 if ((ret
= (outvars
->outproc
)(KDP_SEEK
, NULL
, sizeof(next_file_offset
), &next_file_offset
)) != kIOReturnSuccess
) {
1121 kern_coredump_log(NULL
, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1122 sizeof(next_file_offset
), &next_file_offset
, next_file_offset
, ret
);
1129 do_kern_dump(kern_dump_output_proc outproc
, enum kern_dump_type kd_variant
)
1131 struct kdp_core_out_vars outvars
= { };
1133 char *log_start
= NULL
, *buf
= NULL
;
1134 size_t existing_log_size
= 0, new_log_len
= 0;
1135 uint64_t foffset
= 0;
1137 boolean_t output_opened
= FALSE
, dump_succeeded
= TRUE
;
1140 * Record the initial panic log buffer length so we can dump the coredump log
1141 * and panic log to disk
1143 log_start
= debug_buf_ptr
;
1145 assert(panic_info
->eph_other_log_offset
!= 0);
1146 assert(panic_info
->eph_panic_log_len
!= 0);
1147 /* Include any data from before the panic log as well */
1148 existing_log_size
= (panic_info
->eph_panic_log_offset
- sizeof(struct embedded_panic_header
)) +
1149 panic_info
->eph_panic_log_len
+ panic_info
->eph_other_log_len
;
1150 #else /* CONFIG_EMBEDDED */
1151 if (panic_info
->mph_panic_log_offset
!= 0) {
1152 existing_log_size
= (panic_info
->mph_panic_log_offset
- sizeof(struct macos_panic_header
)) +
1153 panic_info
->mph_panic_log_len
+ panic_info
->mph_other_log_len
;
1155 #endif /* CONFIG_EMBEDDED */
1157 assert (existing_log_size
<= debug_buf_size
);
1159 if (kd_variant
== KERN_DUMP_DISK
) {
1160 /* Open the file for output */
1161 if ((ret
= (*outproc
)(KDP_WRQ
, NULL
, 0, NULL
)) != kIOReturnSuccess
) {
1162 kern_coredump_log(NULL
, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret
);
1163 dump_succeeded
= FALSE
;
1167 output_opened
= true;
1169 /* Initialize gzip, output context */
1170 bzero(&outvars
, sizeof(outvars
));
1171 outvars
.outproc
= outproc
;
1173 if (kd_variant
== KERN_DUMP_DISK
) {
1174 outvars
.zoutput
= kdp_core_zoutput
;
1175 /* Space for file header, panic log, core log */
1176 foffset
= (KERN_COREDUMP_HEADERSIZE
+ existing_log_size
+ KERN_COREDUMP_MAXDEBUGLOGSIZE
+
1177 KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
- 1);
1178 kdp_core_header
.log_offset
= KERN_COREDUMP_HEADERSIZE
;
1180 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1181 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1182 kern_coredump_log(NULL
, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1183 sizeof(foffset
), &foffset
, foffset
, ret
);
1184 dump_succeeded
= FALSE
;
1187 } else if (kd_variant
== KERN_DUMP_NET
) {
1188 assert((kdp_core_zoffset
+ kdp_crashdump_pkt_size
) <= kdp_core_zsize
);
1189 outvars
.zoutput
= kdp_core_zoutputbuf
;
1191 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1192 outvars
.zoutput
= kdp_core_zoutput
;
1193 kern_dump_hw_shmem_dbg_reset();
1197 #if defined(__arm__) || defined(__arm64__)
1201 kern_coredump_log(NULL
, "%s", (kd_variant
== KERN_DUMP_DISK
) ? "Writing local cores..." :
1202 "Transmitting kernel state, please wait:\n");
1204 if (kd_variant
== KERN_DUMP_DISK
) {
1206 * Dump co-processors as well, foffset will be overwritten with the
1207 * offset of the next location in the file to be written to.
1209 if (kern_do_coredump(&outvars
, FALSE
, foffset
, &foffset
) != 0) {
1210 dump_succeeded
= FALSE
;
1213 /* Only the kernel */
1214 if (kern_do_coredump(&outvars
, TRUE
, foffset
, &foffset
) != 0) {
1215 dump_succeeded
= FALSE
;
1219 if (kd_variant
== KERN_DUMP_DISK
) {
1220 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1221 /* Write the macOS panic stackshot on its own to a separate 'corefile' */
1222 if (panic_stackshot_buf
&& panic_stackshot_len
) {
1223 uint64_t compressed_stackshot_len
= 0;
1225 /* Seek to the offset of the next 'file' (foffset provided/updated from kern_do_coredump) */
1226 if ((ret
= kern_dump_seek_to_next_file(&outvars
, foffset
)) != kIOReturnSuccess
) {
1227 kern_coredump_log(NULL
, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset
, ret
);
1228 dump_succeeded
= FALSE
;
1229 } else if ((ret
= kdp_reset_output_vars(&outvars
, panic_stackshot_len
)) != KERN_SUCCESS
) {
1230 kern_coredump_log(NULL
, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len
, ret
);
1231 dump_succeeded
= FALSE
;
1232 } else if ((ret
= kdp_core_output(&outvars
, panic_stackshot_len
, (void *)panic_stackshot_buf
)) != KERN_SUCCESS
) {
1233 kern_coredump_log(NULL
, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, 0x%p) returned 0x%x\n",
1234 panic_stackshot_len
, (void *) panic_stackshot_buf
, ret
);
1235 dump_succeeded
= FALSE
;
1236 } else if ((ret
= kdp_core_output(&outvars
, 0, NULL
)) != KERN_SUCCESS
) {
1237 kern_coredump_log(NULL
, "Failed to flush stackshot data : kdp_core_output(0x%p, 0, NULL) returned 0x%x\n", &outvars
, ret
);
1238 dump_succeeded
= FALSE
;
1239 } else if ((ret
= kern_dump_record_file(&outvars
, "panic_stackshot.kcdata", foffset
, &compressed_stackshot_len
)) != KERN_SUCCESS
) {
1240 kern_coredump_log(NULL
, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret
);
1241 dump_succeeded
= FALSE
;
1243 kern_coredump_log(NULL
, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset
, compressed_stackshot_len
);
1246 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1248 /* Write the debug log -- first seek to the end of the corefile header */
1249 foffset
= KERN_COREDUMP_HEADERSIZE
;
1250 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1251 kern_coredump_log(NULL
, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, 0x%p) foffset = 0x%llx returned 0x%x\n",
1252 sizeof(foffset
), &foffset
, foffset
, ret
);
1253 dump_succeeded
= FALSE
;
1257 new_log_len
= debug_buf_ptr
- log_start
;
1258 if (new_log_len
> KERN_COREDUMP_MAXDEBUGLOGSIZE
) {
1259 new_log_len
= KERN_COREDUMP_MAXDEBUGLOGSIZE
;
1262 /* This data is after the panic stackshot, we need to write it separately */
1264 existing_log_size
-= panic_info
->eph_other_log_len
;
1266 if (existing_log_size
) {
1267 existing_log_size
-= panic_info
->mph_other_log_len
;
1272 * Write out the paniclog (from the beginning of the debug
1273 * buffer until the start of the stackshot)
1275 buf
= debug_buf_base
;
1276 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, existing_log_size
, buf
)) != kIOReturnSuccess
) {
1277 kern_coredump_log(NULL
, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1278 existing_log_size
, buf
, ret
);
1279 dump_succeeded
= FALSE
;
1284 * The next part of the log we're interested in is the beginning of the 'other' log.
1285 * Include any data after the panic stackshot but before we started the coredump log
1289 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->eph_other_log_offset
);
1290 new_log_len
+= panic_info
->eph_other_log_len
;
1291 #else /* CONFIG_EMBEDDED */
1292 buf
= (char *)(((char *)panic_info
) + (uintptr_t) panic_info
->mph_other_log_offset
);
1293 new_log_len
+= panic_info
->mph_other_log_len
;
1294 #endif /* CONFIG_EMBEDDED */
1296 /* Write the coredump log */
1297 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, new_log_len
, buf
)) != kIOReturnSuccess
) {
1298 kern_coredump_log(NULL
, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, 0x%p) returned 0x%x\n",
1299 new_log_len
, buf
, ret
);
1300 dump_succeeded
= FALSE
;
1304 kdp_core_header
.log_length
= existing_log_size
+ new_log_len
;
1305 kern_dump_update_header(&outvars
);
1309 /* close / last packet */
1310 if (output_opened
&& (ret
= (*outproc
)(KDP_EOF
, NULL
, 0, ((void *) 0))) != kIOReturnSuccess
) {
1311 kern_coredump_log(NULL
, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret
);
1312 dump_succeeded
= FALSE
;
1315 /* If applicable, update the panic header and flush it so we update the CRC */
1317 panic_info
->eph_panic_flags
|= (dump_succeeded
? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1318 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1321 if (panic_info
->mph_panic_log_offset
!= 0) {
1322 panic_info
->mph_panic_flags
|= (dump_succeeded
? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE
:
1323 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED
);
1328 return (dump_succeeded
? 0 : -1);
1332 dumped_kernel_core()
1334 return kern_dump_successful
;
1338 kern_dump(enum kern_dump_type kd_variant
)
1340 static boolean_t local_dump_in_progress
= FALSE
, dumped_local
= FALSE
;
1345 if (kd_variant
== KERN_DUMP_DISK
) {
1346 if (dumped_local
) return (0);
1347 if (local_dump_in_progress
) return (-1);
1348 local_dump_in_progress
= TRUE
;
1350 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_BUSY
;
1352 ret
= do_kern_dump(&kern_dump_disk_proc
, KERN_DUMP_DISK
);
1354 dumped_local
= TRUE
;
1355 kern_dump_successful
= TRUE
;
1356 local_dump_in_progress
= FALSE
;
1361 } else if (kd_variant
== KERN_DUMP_HW_SHMEM_DBG
) {
1362 ret
= do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc
, KERN_DUMP_HW_SHMEM_DBG
);
1364 kern_dump_successful
= TRUE
;
1369 ret
= do_kern_dump(&kdp_send_crashdump_data
, KERN_DUMP_NET
);
1371 kern_dump_successful
= TRUE
;
1378 #pragma clang diagnostic push
1379 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1383 #pragma clang diagnostic pop
1384 kern_coredump_log(NULL
, "\nPlease go to https://panic.apple.com to report this panic\n");
1385 kern_coredump_log(NULL
, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1386 hwsd_info
, (void *)kvtophys((vm_offset_t
)hwsd_info
));
1388 assert(hwsd_info
!= NULL
);
1389 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1390 hwsd_info
->xhsdci_seq_no
= 0;
1391 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1394 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1395 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BEGIN
) {
1396 kern_dump(KERN_DUMP_HW_SHMEM_DBG
);
1399 if ((hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_REMOTE_DONE
) ||
1400 (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
)) {
1401 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_KERNEL_READY
;
1402 hwsd_info
->xhsdci_seq_no
= 0;
1403 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
1407 #endif /* CONFIG_EMBEDDED */
1410 kdp_core_zalloc(void * __unused ref
, u_int items
, u_int size
)
1414 result
= (void *) (kdp_core_zmem
+ kdp_core_zoffset
);
1415 kdp_core_zoffset
+= ~31L & (31 + (items
* size
)); // 32b align for vector crc
1416 assert(kdp_core_zoffset
<= kdp_core_zsize
);
1422 kdp_core_zfree(void * __unused ref
, void * __unused ptr
) {}
1426 #define LEVEL Z_BEST_SPEED
1429 #define LEVEL Z_BEST_SPEED
1441 vm_offset_t kdp_core_hw_shmem_buf
= 0;
1442 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
;
1443 cache_info_t
*cpuid_cache_info
= NULL
;
1445 kern_coredump_callback_config core_config
= { };
1447 if (kdp_core_zs
.zalloc
) return;
1448 kdp_core_zsize
= round_page(NETBUF
+ zlib_deflate_memory_size(wbits
, memlevel
));
1449 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize
);
1450 kr
= kmem_alloc(kernel_map
, &kdp_core_zmem
, kdp_core_zsize
, VM_KERN_MEMORY_DIAG
);
1451 assert (KERN_SUCCESS
== kr
);
1453 kdp_core_zoffset
= 0;
1454 kdp_core_zs
.zalloc
= kdp_core_zalloc
;
1455 kdp_core_zs
.zfree
= kdp_core_zfree
;
1457 if (deflateInit2(&kdp_core_zs
, LEVEL
, Z_DEFLATED
,
1458 wbits
+ 16 /*gzip mode*/, memlevel
, Z_DEFAULT_STRATEGY
)) {
1459 /* Allocation failed */
1460 bzero(&kdp_core_zs
, sizeof(kdp_core_zs
));
1461 kdp_core_zoffset
= 0;
1464 bzero(&kdp_core_header
, sizeof(kdp_core_header
));
1466 core_config
.kcc_coredump_init
= NULL
; /* TODO: consider doing mmu flush from an init function */
1467 core_config
.kcc_coredump_get_summary
= kern_dump_save_summary
;
1468 core_config
.kcc_coredump_save_segment_descriptions
= kern_dump_save_seg_descriptions
;
1469 core_config
.kcc_coredump_save_thread_state
= kern_dump_save_thread_state
;
1470 core_config
.kcc_coredump_save_sw_vers
= kern_dump_save_sw_vers
;
1471 core_config
.kcc_coredump_save_segment_data
= kern_dump_save_segment_data
;
1472 core_config
.kcc_coredump_save_misc_data
= NULL
;
1474 kr
= kern_register_xnu_coredump_helper(&core_config
);
1475 assert(KERN_SUCCESS
== kr
);
1478 if (!PE_consistent_debug_enabled()) {
1483 * We need to allocate physically contiguous memory since astris isn't capable
1484 * of doing address translations while the CPUs are running.
1486 kdp_hw_shmem_dbg_bufsize
= KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
;
1487 kr
= kmem_alloc_contig(kernel_map
, &kdp_core_hw_shmem_buf
, kdp_hw_shmem_dbg_bufsize
, VM_MAP_PAGE_MASK(kernel_map
),
1488 0, 0, KMA_KOBJECT
, VM_KERN_MEMORY_DIAG
);
1489 assert(KERN_SUCCESS
== kr
);
1492 * Put the connection info structure at the beginning of this buffer and adjust
1493 * the buffer size accordingly.
1495 hwsd_info
= (struct xnu_hw_shmem_dbg_command_info
*) kdp_core_hw_shmem_buf
;
1496 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_NONE
;
1497 hwsd_info
->xhsdci_seq_no
= 0;
1498 hwsd_info
->xhsdci_buf_phys_addr
= 0;
1499 hwsd_info
->xhsdci_buf_data_length
= 0;
1500 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
1501 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
1502 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
1504 cpuid_cache_info
= cache_info();
1505 assert(cpuid_cache_info
!= NULL
);
1507 kdp_core_hw_shmem_buf
+= sizeof(*hwsd_info
);
1508 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1509 kdp_core_hw_shmem_buf
= ROUNDUP(kdp_core_hw_shmem_buf
, (uint64_t) cpuid_cache_info
->c_linesz
);
1510 kdp_hw_shmem_dbg_bufsize
-= (uint32_t) (kdp_core_hw_shmem_buf
- (vm_offset_t
) hwsd_info
);
1511 kdp_hw_shmem_dbg_bufsize
/= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
;
1512 /* The buffer size should be a cache-line length multiple */
1513 kdp_hw_shmem_dbg_bufsize
-= (kdp_hw_shmem_dbg_bufsize
% ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE
, cpuid_cache_info
->c_linesz
));
1515 STAILQ_INIT(&free_hw_shmem_dbg_bufs
);
1516 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush
);
1518 for (i
= 0; i
< KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
; i
++) {
1519 cur_elm
= kalloc(sizeof(*cur_elm
));
1520 assert(cur_elm
!= NULL
);
1522 cur_elm
->khsd_buf
= kdp_core_hw_shmem_buf
;
1523 cur_elm
->khsd_data_length
= 0;
1525 kdp_core_hw_shmem_buf
+= kdp_hw_shmem_dbg_bufsize
;
1527 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
1530 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
* NSEC_PER_SEC
,
1531 &kdp_hw_shmem_dbg_contact_deadline_interval
);
1533 PE_consistent_debug_register(kDbgIdAstrisConnection
, kvtophys((vm_offset_t
) hwsd_info
), sizeof(pmap_paddr_t
));
1534 PE_consistent_debug_register(kDbgIdAstrisConnectionVers
, CUR_XNU_HWSDCI_STRUCT_VERS
, sizeof(uint32_t));
1535 #endif /* CONFIG_EMBEDDED */
1537 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1538 /* Allocate space in the kernel map for the panic stackshot */
1539 kr
= kmem_alloc(kernel_map
, &panic_stackshot_buf
, PANIC_STACKSHOT_BUFSIZE
, VM_KERN_MEMORY_DIAG
);
1540 assert (KERN_SUCCESS
== kr
);
1541 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1544 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */