2 * Copyright (c) 2015 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <IOKit/IOPolledInterface.h>
45 #include <IOKit/IOBSD.h>
46 #include <sys/errno.h>
47 #include <sys/msgbuf.h>
49 #if defined(__i386__) || defined(__x86_64__)
50 #include <i386/pmap_internal.h>
51 #include <kdp/ml/i386/kdp_x86_common.h>
52 #endif /* defined(__i386__) || defined(__x86_64__) */
55 #if WITH_CONSISTENT_DBG
56 #include <pexpert/arm/consistent_debug.h>
57 #endif /* WITH_CONSISTENT_DBG */
59 typedef int (*pmap_traverse_callback
)(vm_map_offset_t start
,
63 extern int pmap_traverse_present_mappings(pmap_t pmap
,
64 vm_map_offset_t start
,
66 pmap_traverse_callback callback
,
71 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
75 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start
,
79 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
83 struct kdp_core_out_vars
;
84 typedef int (*kern_dump_output_proc
)(unsigned int request
, char *corename
,
85 uint64_t length
, void *panic_data
);
87 struct kdp_core_out_vars
89 kern_dump_output_proc outproc
;
90 z_output_func zoutput
;
101 struct kern_dump_preflight_context
103 uint32_t region_count
;
104 uint64_t dumpable_bytes
;
107 struct kern_dump_send_context
109 struct kdp_core_out_vars
* outvars
;
112 uint64_t header_size
;
113 uint64_t dumpable_bytes
;
114 uint32_t region_count
;
117 extern uint32_t kdp_crashdump_pkt_size
;
119 static vm_offset_t kdp_core_zmem
;
120 static size_t kdp_core_zsize
;
121 static size_t kdp_core_zoffset
;
122 static z_stream kdp_core_zs
;
124 static uint64_t kdp_core_total_size
;
125 static uint64_t kdp_core_total_size_sent_uncomp
;
126 #if WITH_CONSISTENT_DBG
127 struct xnu_hw_shmem_dbg_command_info
*hwsd_info
= NULL
;
129 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
130 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
133 * Astris can read up to 4064 bytes at a time over
134 * the probe, so we should try to make our buffer
135 * size a multiple of this to make reads by astris
136 * (the bottleneck) most efficient.
138 #define OPTIMAL_ASTRIS_READSIZE 4064
140 struct kdp_hw_shmem_dbg_buf_elm
{
141 vm_offset_t khsd_buf
;
142 uint32_t khsd_data_length
;
143 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm
) khsd_elms
;
146 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) free_hw_shmem_dbg_bufs
=
147 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs
);
148 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm
) hw_shmem_dbg_bufs_to_flush
=
149 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush
);
151 static struct kdp_hw_shmem_dbg_buf_elm
*currently_filling_buf
= NULL
;
152 static struct kdp_hw_shmem_dbg_buf_elm
*currently_flushing_buf
= NULL
;
154 static uint32_t kdp_hw_shmem_dbg_bufsize
= 0;
156 static uint32_t kdp_hw_shmem_dbg_seq_no
= 0;
157 static uint64_t kdp_hw_shmem_dbg_contact_deadline
= 0;
158 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval
= 0;
160 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
161 #endif /* WITH_CONSISTENT_DBG */
164 * These variables will be modified by the BSD layer if the root device is
167 uint64_t kdp_core_ramdisk_addr
= 0;
168 uint64_t kdp_core_ramdisk_size
= 0;
170 #define DEBG kdb_printf
172 boolean_t
kdp_has_polled_corefile(void)
174 return (NULL
!= gIOPolledCoreFileVars
);
177 #if WITH_CONSISTENT_DBG
179 * Whenever we start a coredump, make sure the buffers
180 * are all on the free queue and the state is as expected.
181 * The buffers may have been left in a different state if
182 * a previous coredump attempt failed.
185 kern_dump_hw_shmem_dbg_reset()
187 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
, *tmp_elm
= NULL
;
189 STAILQ_FOREACH(cur_elm
, &free_hw_shmem_dbg_bufs
, khsd_elms
) {
190 cur_elm
->khsd_data_length
= 0;
193 if (currently_filling_buf
!= NULL
) {
194 currently_filling_buf
->khsd_data_length
= 0;
196 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_filling_buf
, khsd_elms
);
197 currently_filling_buf
= NULL
;
200 if (currently_flushing_buf
!= NULL
) {
201 currently_flushing_buf
->khsd_data_length
= 0;
203 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
204 currently_flushing_buf
= NULL
;
207 STAILQ_FOREACH_SAFE(cur_elm
, &hw_shmem_dbg_bufs_to_flush
, khsd_elms
, tmp_elm
) {
208 cur_elm
->khsd_data_length
= 0;
210 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush
, cur_elm
, kdp_hw_shmem_dbg_buf_elm
, khsd_elms
);
211 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
214 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_EMPTY
;
215 kdp_hw_shmem_dbg_seq_no
= 0;
216 hwsd_info
->xhsdci_buf_phys_addr
= 0;
217 hwsd_info
->xhsdci_buf_data_length
= 0;
218 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
219 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
220 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
221 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
223 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval
;
227 * Tries to move buffers forward in 'progress'. If
228 * the hardware debugger is done consuming the current buffer, we
229 * can put the next one on it and move the current
230 * buffer back to the free queue.
233 kern_dump_hw_shmem_dbg_process_buffers()
235 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
236 if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_ERROR
) {
237 kdb_printf("Detected remote error, terminating...\n");
239 } else if (hwsd_info
->xhsdci_status
== XHSDCI_COREDUMP_BUF_EMPTY
) {
240 if (hwsd_info
->xhsdci_seq_no
!= (kdp_hw_shmem_dbg_seq_no
+ 1)) {
241 kdb_printf("Detected stale/invalid seq num. Expected: %d, received %d\n",
242 (kdp_hw_shmem_dbg_seq_no
+ 1), hwsd_info
->xhsdci_seq_no
);
243 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
244 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
248 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
250 if (currently_flushing_buf
!= NULL
) {
251 currently_flushing_buf
->khsd_data_length
= 0;
252 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs
, currently_flushing_buf
, khsd_elms
);
255 currently_flushing_buf
= STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush
);
256 if (currently_flushing_buf
!= NULL
) {
257 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush
, khsd_elms
);
259 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
260 hwsd_info
->xhsdci_buf_phys_addr
= kvtophys(currently_flushing_buf
->khsd_buf
);
261 hwsd_info
->xhsdci_buf_data_length
= currently_flushing_buf
->khsd_data_length
;
262 hwsd_info
->xhsdci_coredump_total_size_uncomp
= kdp_core_total_size
;
263 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= kdp_core_total_size_sent_uncomp
;
264 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
);
265 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
266 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_BUF_READY
;
267 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
270 kdp_hw_shmem_dbg_contact_deadline
= mach_absolute_time() +
271 kdp_hw_shmem_dbg_contact_deadline_interval
;
274 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline
) {
275 kdb_printf("Kernel timed out waiting for hardware debugger to update handshake structure.");
276 kdb_printf(" No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
);
278 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_ERROR
;
279 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
287 * Populates currently_filling_buf with a new buffer
288 * once one becomes available. Returns 0 on success
289 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
290 * if it is non-zero (an error).
293 kern_dump_hw_shmem_dbg_get_buffer()
297 assert(currently_filling_buf
== NULL
);
299 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs
)) {
300 ret
= kern_dump_hw_shmem_dbg_process_buffers();
306 currently_filling_buf
= STAILQ_FIRST(&free_hw_shmem_dbg_bufs
);
307 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs
, khsd_elms
);
309 assert(currently_filling_buf
->khsd_data_length
== 0);
314 * Output procedure for hardware shared memory core dumps
316 * Tries to fill up the buffer completely before flushing
319 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request
, __unused
char *corename
,
320 uint64_t length
, void * data
)
324 assert(length
< UINT32_MAX
);
325 uint32_t bytes_remaining
= (uint32_t) length
;
326 uint32_t bytes_to_copy
;
328 if (request
== KDP_EOF
) {
329 assert(currently_filling_buf
== NULL
);
332 * Wait until we've flushed all the buffers
333 * before setting the connection status to done.
335 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush
) ||
336 currently_flushing_buf
!= NULL
) {
337 ret
= kern_dump_hw_shmem_dbg_process_buffers();
344 * If the last status we saw indicates that the buffer was
345 * empty and we didn't flush any new data since then, we expect
346 * the sequence number to still match the last we saw.
348 if (hwsd_info
->xhsdci_seq_no
< kdp_hw_shmem_dbg_seq_no
) {
349 kdb_printf("EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
350 kdp_hw_shmem_dbg_seq_no
, hwsd_info
->xhsdci_seq_no
);
354 kdp_hw_shmem_dbg_seq_no
= hwsd_info
->xhsdci_seq_no
;
356 kdb_printf("Setting coredump status as done!\n");
357 hwsd_info
->xhsdci_seq_no
= ++kdp_hw_shmem_dbg_seq_no
;
358 hwsd_info
->xhsdci_status
= XHSDCI_COREDUMP_STATUS_DONE
;
359 FlushPoC_DcacheRegion((vm_offset_t
) hwsd_info
, sizeof(*hwsd_info
));
364 assert(request
== KDP_DATA
);
367 * The output procedure is called with length == 0 and data == NULL
368 * to flush any remaining output at the end of the coredump before
369 * we call it a final time to mark the dump as done.
372 assert(data
== NULL
);
374 if (currently_filling_buf
!= NULL
) {
375 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
376 currently_filling_buf
= NULL
;
380 * Move the current buffer along if possible.
382 ret
= kern_dump_hw_shmem_dbg_process_buffers();
386 while (bytes_remaining
!= 0) {
388 * Make sure we have a buffer to work with.
390 while (currently_filling_buf
== NULL
) {
391 ret
= kern_dump_hw_shmem_dbg_get_buffer();
397 assert(kdp_hw_shmem_dbg_bufsize
>= currently_filling_buf
->khsd_data_length
);
398 bytes_to_copy
= MIN(bytes_remaining
, kdp_hw_shmem_dbg_bufsize
-
399 currently_filling_buf
->khsd_data_length
);
400 bcopy(data
, (void *)(currently_filling_buf
->khsd_buf
+ currently_filling_buf
->khsd_data_length
),
403 currently_filling_buf
->khsd_data_length
+= bytes_to_copy
;
405 if (currently_filling_buf
->khsd_data_length
== kdp_hw_shmem_dbg_bufsize
) {
406 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush
, currently_filling_buf
, khsd_elms
);
407 currently_filling_buf
= NULL
;
410 * Move it along if possible.
412 ret
= kern_dump_hw_shmem_dbg_process_buffers();
418 bytes_remaining
-= bytes_to_copy
;
419 data
= (void *) ((uintptr_t)data
+ bytes_to_copy
);
424 #endif /* WITH_CONSISTENT_DBG */
427 kern_dump_disk_proc(unsigned int request
, __unused
char *corename
,
428 uint64_t length
, void * data
)
431 uint32_t err
= kIOReturnSuccess
;
436 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, 0);
437 if (kIOReturnSuccess
!= err
) break;
438 err
= IOPolledFilePollersOpen(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
, false);
442 noffset
= *((uint64_t *) data
);
443 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
444 if (kIOReturnSuccess
!= err
) break;
445 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, noffset
);
449 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, data
, length
, NULL
);
450 if (kIOReturnSuccess
!= err
) break;
454 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
455 if (kIOReturnSuccess
!= err
) break;
456 err
= IOPolledFilePollersClose(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
);
457 if (kIOReturnSuccess
!= err
) break;
465 * flushes any data to the output proc immediately
468 kdp_core_zoutput(z_streamp strm
, Bytef
*buf
, unsigned len
)
470 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
475 if (vars
->error
>= 0)
477 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
, len
, buf
)) != kIOReturnSuccess
)
479 DEBG("KDP_DATA(0x%x)\n", ret
);
482 if (!buf
&& !len
) DEBG("100..");
488 * tries to fill the buffer with data before flushing it via the output proc.
491 kdp_core_zoutputbuf(z_streamp strm
, Bytef
*inbuf
, unsigned inlen
)
493 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
500 vars
->zipped
+= inlen
;
501 flush
= (!inbuf
&& !inlen
);
503 while ((vars
->error
>= 0) && (remain
|| flush
))
505 chunk
= vars
->outremain
;
506 if (chunk
> remain
) chunk
= remain
;
507 if (!inbuf
) bzero(&vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
510 bcopy(inbuf
, &vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
513 vars
->outremain
-= chunk
;
516 if (vars
->outremain
&& !flush
) break;
517 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
,
518 vars
->outlen
- vars
->outremain
,
519 vars
->outbuf
)) != kIOReturnSuccess
)
521 DEBG("KDP_DATA(0x%x)\n", ret
);
529 vars
->outremain
= vars
->outlen
;
535 kdp_core_zinput(z_streamp strm
, Bytef
*buf
, unsigned size
)
537 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
538 uint64_t percent
, total_in
= 0;
541 len
= strm
->avail_in
;
542 if (len
> size
) len
= size
;
543 if (len
== 0) return 0;
545 if (strm
->next_in
!= (Bytef
*) strm
) memcpy(buf
, strm
->next_in
, len
);
546 else bzero(buf
, len
);
547 strm
->adler
= z_crc32(strm
->adler
, buf
, len
);
549 strm
->avail_in
-= len
;
550 strm
->next_in
+= len
;
551 strm
->total_in
+= len
;
553 if (0 == (511 & vars
->writes
++))
555 total_in
= strm
->total_in
;
556 kdp_core_total_size_sent_uncomp
= strm
->total_in
;
558 percent
= (total_in
* 100) / vars
->totalbytes
;
559 if ((percent
- vars
->lastpercent
) >= 10)
561 vars
->lastpercent
= percent
;
562 DEBG("%lld..\n", percent
);
570 kdp_core_stream_output_chunk(struct kdp_core_out_vars
* vars
, unsigned length
, void * data
)
578 if (kdp_corezip_disabled
)
580 (*vars
->zoutput
)(zs
, data
, length
);
585 flush
= (!length
&& !data
);
588 assert(!zs
->avail_in
);
590 while (vars
->error
>= 0)
592 if (!zs
->avail_in
&& !flush
)
595 zs
->next_in
= data
? data
: (Bytef
*) zs
/* zero marker */;
596 zs
->avail_in
= length
;
601 zs
->next_out
= (Bytef
*) zs
;
602 zs
->avail_out
= UINT32_MAX
;
604 zr
= deflate(zs
, flush
? Z_FINISH
: Z_NO_FLUSH
);
605 if (Z_STREAM_END
== zr
) break;
608 DEBG("ZERR %d\n", zr
);
613 if (flush
) (*vars
->zoutput
)(zs
, NULL
, 0);
616 return (vars
->error
);
620 kdp_core_stream_output(struct kdp_core_out_vars
* vars
, uint64_t length
, void * data
)
624 enum { kMaxZLibChunk
= 1024*1024*1024 };
628 if (length
<= kMaxZLibChunk
) chunk
= (typeof(chunk
)) length
;
629 else chunk
= kMaxZLibChunk
;
630 err
= kdp_core_stream_output_chunk(vars
, chunk
, data
);
633 if (data
) data
= (void *) (((uintptr_t) data
) + chunk
);
635 while (length
&& (kIOReturnSuccess
== err
));
640 extern vm_offset_t c_buffers
;
641 extern vm_size_t c_buffers_size
;
644 kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
, uintptr_t * pvphysaddr
)
647 uint64_t vincr
= PAGE_SIZE_64
;
649 assert(!(vaddr
& PAGE_MASK_64
));
651 /* VA ranges to exclude */
652 if (vaddr
== c_buffers
)
654 /* compressor data */
656 vincr
= c_buffers_size
;
658 else if (vaddr
== kdp_core_zmem
)
660 /* zlib working memory */
662 vincr
= kdp_core_zsize
;
664 else if ((kdp_core_ramdisk_addr
!= 0) && (vaddr
== kdp_core_ramdisk_addr
))
667 vincr
= kdp_core_ramdisk_size
;
670 ppn
= pmap_find_phys(kernel_pmap
, vaddr
);
672 *pvincr
= round_page_64(vincr
);
674 if (ppn
&& pvphysaddr
)
676 uint64_t phys
= ptoa_64(ppn
);
677 if (physmap_enclosed(phys
)) *pvphysaddr
= PHYSMAP_PTOV(phys
);
685 pmap_traverse_present_mappings(pmap_t __unused pmap
,
686 vm_map_offset_t start
,
688 pmap_traverse_callback callback
,
692 vm_map_offset_t vcurstart
, vcur
;
694 vm_map_offset_t debug_start
;
695 vm_map_offset_t debug_end
;
696 boolean_t lastvavalid
;
698 debug_start
= trunc_page((vm_map_offset_t
) debug_buf_addr
);
699 debug_end
= round_page((vm_map_offset_t
) (debug_buf_addr
+ debug_buf_size
));
701 #if defined(__i386__) || defined(__x86_64__)
702 assert(!is_ept_pmap(pmap
));
705 /* Assumes pmap is locked, or being called from the kernel debugger */
707 if (start
> end
) return (KERN_INVALID_ARGUMENT
);
711 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
714 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
, NULL
);
717 if (((vcur
< debug_start
) || (vcur
>= debug_end
))
718 && !pmap_valid_page(ppn
))
720 /* not something we want */
727 /* Start of a new virtual region */
733 /* end of a virtual region */
734 ret
= callback(vcurstart
, vcur
, context
);
738 #if defined(__i386__) || defined(__x86_64__)
739 /* Try to skip by 2MB if possible */
740 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
742 pde
= pmap_pde(pmap
, vcur
);
743 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
744 /* Make sure we wouldn't overflow */
745 if (vcur
< (end
- NBPD
)) {
750 #endif /* defined(__i386__) || defined(__x86_64__) */
755 if ((ret
== KERN_SUCCESS
) && lastvavalid
) {
756 /* send previous run */
757 ret
= callback(vcurstart
, vcur
, context
);
763 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
767 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
768 IOReturn ret
= KERN_SUCCESS
;
771 kdc
->dumpable_bytes
+= (end
- start
);
777 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start
,
781 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
782 IOReturn ret
= KERN_SUCCESS
;
783 kernel_segment_command_t sc
;
784 vm_size_t size
= (vm_size_t
)(end
- start
);
786 if (kdc
->hoffset
+ sizeof(sc
) > kdc
->header_size
) {
787 return (KERN_NO_SPACE
);
791 kdc
->dumpable_bytes
+= (end
- start
);
794 * Fill in segment command structure.
797 sc
.cmd
= LC_SEGMENT_KERNEL
;
798 sc
.cmdsize
= sizeof(kernel_segment_command_t
);
800 sc
.vmaddr
= (vm_address_t
)start
;
802 sc
.fileoff
= (vm_address_t
)kdc
->foffset
;
804 sc
.maxprot
= VM_PROT_READ
;
805 sc
.initprot
= VM_PROT_READ
;
809 if ((ret
= kdp_core_stream_output(kdc
->outvars
, sizeof(kernel_segment_command_t
), (caddr_t
) &sc
)) != kIOReturnSuccess
) {
810 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
814 kdc
->hoffset
+= sizeof(kernel_segment_command_t
);
815 kdc
->foffset
+= size
;
823 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
827 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
828 int ret
= KERN_SUCCESS
;
829 vm_size_t size
= (vm_size_t
)(end
- start
);
832 kdc
->dumpable_bytes
+= size
;
833 if ((ret
= kdp_core_stream_output(kdc
->outvars
, size
, (caddr_t
)(uintptr_t)start
)) != kIOReturnSuccess
) {
834 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
837 kdc
->foffset
+= size
;
844 do_kern_dump(kern_dump_output_proc outproc
, enum kern_dump_type kd_variant
)
846 struct kern_dump_preflight_context kdc_preflight
= { };
847 struct kern_dump_send_context kdc_sendseg
= { };
848 struct kern_dump_send_context kdc_send
= { };
849 struct kdp_core_out_vars outvars
= { };
850 struct mach_core_fileheader hdr
= { };
851 struct ident_command ident
= { };
852 kernel_mach_header_t mh
= { };
854 uint32_t segment_count
= 0, tstate_count
= 0;
855 size_t command_size
= 0, header_size
= 0, tstate_size
= 0;
856 uint64_t hoffset
= 0, foffset
= 0;
861 uint64_t new_logs
= 0;
865 log_start
= debug_buf_ptr
;
866 log_size
= debug_buf_ptr
- debug_buf_addr
;
867 assert (log_size
<= debug_buf_size
);
868 if (debug_buf_stackshot_start
)
870 assert(debug_buf_stackshot_end
>= debug_buf_stackshot_start
);
871 log_size
-= (debug_buf_stackshot_end
- debug_buf_stackshot_start
);
874 if (kd_variant
== KERN_DUMP_DISK
)
876 if ((ret
= (*outproc
)(KDP_WRQ
, NULL
, 0, &hoffset
)) != kIOReturnSuccess
) {
877 DEBG("KDP_WRQ(0x%x)\n", ret
);
884 bzero(&outvars
, sizeof(outvars
));
885 bzero(&hdr
, sizeof(hdr
));
886 outvars
.outproc
= outproc
;
889 * Initialize zstream variables that point to input and output
892 kdp_core_zs
.avail_in
= 0;
893 kdp_core_zs
.next_in
= NULL
;
894 kdp_core_zs
.avail_out
= 0;
895 kdp_core_zs
.next_out
= NULL
;
896 kdp_core_zs
.opaque
= &outvars
;
897 kdc_sendseg
.outvars
= &outvars
;
898 kdc_send
.outvars
= &outvars
;
900 enum { kHdrOffset
= 4096, kMaxCoreLog
= 16384 };
902 if (kd_variant
== KERN_DUMP_DISK
) {
903 outvars
.outbuf
= NULL
;
905 outvars
.outremain
= 0;
906 outvars
.zoutput
= kdp_core_zoutput
;
907 // space for file header, panic log, core log
908 foffset
= (kHdrOffset
+ log_size
+ kMaxCoreLog
+ 4095) & ~4095ULL;
909 hdr
.log_offset
= kHdrOffset
;
910 hdr
.gzip_offset
= foffset
;
911 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
912 DEBG("KDP_SEEK(0x%x)\n", ret
);
915 } else if (kd_variant
== KERN_DUMP_NET
) {
916 outvars
.outbuf
= (Bytef
*) (kdp_core_zmem
+ kdp_core_zoffset
);
917 assert((kdp_core_zoffset
+ kdp_crashdump_pkt_size
) <= kdp_core_zsize
);
918 outvars
.outlen
= kdp_crashdump_pkt_size
;
919 outvars
.outremain
= outvars
.outlen
;
920 outvars
.zoutput
= kdp_core_zoutputbuf
;
921 #if WITH_CONSISTENT_DBG
922 } else { /* KERN_DUMP_HW_SHMEM_DBG */
923 outvars
.outbuf
= NULL
;
925 outvars
.outremain
= 0;
926 outvars
.zoutput
= kdp_core_zoutput
;
927 kern_dump_hw_shmem_dbg_reset();
931 deflateResetWithIO(&kdp_core_zs
, kdp_core_zinput
, outvars
.zoutput
);
934 kdc_preflight
.region_count
= 0;
935 kdc_preflight
.dumpable_bytes
= 0;
937 ret
= pmap_traverse_present_mappings(kernel_pmap
,
938 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
939 VM_MAX_KERNEL_ADDRESS
,
940 kern_dump_pmap_traverse_preflight_callback
,
944 DEBG("pmap traversal failed: %d\n", ret
);
948 outvars
.totalbytes
= kdc_preflight
.dumpable_bytes
;
949 assert(outvars
.totalbytes
);
950 segment_count
= kdc_preflight
.region_count
;
952 kdp_core_total_size
= outvars
.totalbytes
;
953 kdp_core_total_size_sent_uncomp
= 0;
955 kern_collectth_state_size(&tstate_count
, &tstate_size
);
957 command_size
= segment_count
* sizeof(kernel_segment_command_t
)
958 + tstate_count
* tstate_size
959 + sizeof(struct ident_command
) + sizeof(kdp_kernelversion_string
);
961 header_size
= command_size
+ sizeof(kernel_mach_header_t
);
964 * Set up Mach-O header for currently executing kernel.
967 mh
.magic
= _mh_execute_header
.magic
;
968 mh
.cputype
= _mh_execute_header
.cputype
;;
969 mh
.cpusubtype
= _mh_execute_header
.cpusubtype
;
970 mh
.filetype
= MH_CORE
;
971 mh
.ncmds
= segment_count
+ tstate_count
+ 1;
972 mh
.sizeofcmds
= (uint32_t)command_size
;
974 #if defined(__LP64__)
978 hoffset
= 0; /* offset into header */
979 foffset
= (uint64_t) round_page(header_size
); /* offset into file */
981 /* Transmit the Mach-O MH_CORE header, and segment and thread commands
983 if ((ret
= kdp_core_stream_output(&outvars
, sizeof(kernel_mach_header_t
), (caddr_t
) &mh
) != kIOReturnSuccess
))
985 DEBG("KDP_DATA(0x%x)\n", ret
);
989 hoffset
+= sizeof(kernel_mach_header_t
);
991 DEBG("%s", (kd_variant
== KERN_DUMP_DISK
) ? "Writing local kernel core..." :
992 "Transmitting kernel state, please wait:\n");
994 kdc_sendseg
.region_count
= 0;
995 kdc_sendseg
.dumpable_bytes
= 0;
996 kdc_sendseg
.hoffset
= hoffset
;
997 kdc_sendseg
.foffset
= foffset
;
998 kdc_sendseg
.header_size
= header_size
;
1000 if ((ret
= pmap_traverse_present_mappings(kernel_pmap
,
1001 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1002 VM_MAX_KERNEL_ADDRESS
,
1003 kern_dump_pmap_traverse_send_seg_callback
,
1004 &kdc_sendseg
)) != kIOReturnSuccess
)
1006 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret
);
1010 hoffset
= kdc_sendseg
.hoffset
;
1012 * Now send out the LC_THREAD load command, with the thread information
1013 * for the current activation.
1016 if (tstate_size
> 0)
1019 char tstate
[tstate_size
];
1023 * Now send out the LC_THREAD load command, with the thread information
1025 kern_collectth_state (current_thread(), tstate
, tstate_size
, &iter
);
1027 if ((ret
= kdp_core_stream_output(&outvars
, tstate_size
, tstate
)) != kIOReturnSuccess
) {
1028 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
1035 ident
.cmd
= LC_IDENT
;
1036 ident
.cmdsize
= (uint32_t) (sizeof(struct ident_command
) + sizeof(kdp_kernelversion_string
));
1037 if ((ret
= kdp_core_stream_output(&outvars
, sizeof(ident
), &ident
)) != kIOReturnSuccess
) {
1038 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
1041 if ((ret
= kdp_core_stream_output(&outvars
, sizeof(kdp_kernelversion_string
), &kdp_kernelversion_string
[0])) != kIOReturnSuccess
) {
1042 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
1046 kdc_send
.region_count
= 0;
1047 kdc_send
.dumpable_bytes
= 0;
1048 foffset
= (uint64_t) round_page(header_size
); /* offset into file */
1049 kdc_send
.foffset
= foffset
;
1050 kdc_send
.hoffset
= 0;
1051 foffset
= round_page_64(header_size
) - header_size
;
1054 // zero fill to page align
1055 if ((ret
= kdp_core_stream_output(&outvars
, foffset
, NULL
)) != kIOReturnSuccess
) {
1056 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
1061 ret
= pmap_traverse_present_mappings(kernel_pmap
,
1062 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
1063 VM_MAX_KERNEL_ADDRESS
,
1064 kern_dump_pmap_traverse_send_segdata_callback
,
1067 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret
);
1071 if ((ret
= kdp_core_stream_output(&outvars
, 0, NULL
) != kIOReturnSuccess
)) {
1072 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
1077 if (kIOReturnSuccess
== ret
) DEBG("success\n");
1078 else outvars
.zipped
= 0;
1080 DEBG("Mach-o header: %lu\n", header_size
);
1081 DEBG("Region counts: [%u, %u, %u]\n", kdc_preflight
.region_count
,
1082 kdc_sendseg
.region_count
,
1083 kdc_send
.region_count
);
1084 DEBG("Byte counts : [%llu, %llu, %llu, %lu, %lu]\n", kdc_preflight
.dumpable_bytes
,
1085 kdc_sendseg
.dumpable_bytes
,
1086 kdc_send
.dumpable_bytes
,
1088 (long) (debug_buf_ptr
- debug_buf_addr
));
1089 if ((kd_variant
== KERN_DUMP_DISK
) && opened
)
1092 foffset
= kHdrOffset
;
1093 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1094 DEBG("KDP_SEEK(0x%x)\n", ret
);
1098 new_logs
= debug_buf_ptr
- log_start
;
1099 if (new_logs
> kMaxCoreLog
) new_logs
= kMaxCoreLog
;
1100 buf
= debug_buf_addr
;
1101 if (debug_buf_stackshot_start
)
1103 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, (debug_buf_stackshot_start
- debug_buf_addr
), debug_buf_addr
)) != kIOReturnSuccess
)
1105 DEBG("KDP_DATA(0x%x)\n", ret
);
1108 buf
= debug_buf_stackshot_end
;
1110 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, (log_start
+ new_logs
- buf
), buf
)) != kIOReturnSuccess
)
1112 DEBG("KDP_DATA(0x%x)\n", ret
);
1119 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
1120 DEBG("KDP_SEEK(0x%x)\n", ret
);
1124 hdr
.signature
= MACH_CORE_FILEHEADER_SIGNATURE
;
1125 hdr
.log_length
= new_logs
+ log_size
;
1126 hdr
.gzip_length
= outvars
.zipped
;
1128 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, sizeof(hdr
), &hdr
)) != kIOReturnSuccess
)
1130 DEBG("KDP_DATA(0x%x)\n", ret
);
1136 /* close / last packet */
1137 if (opened
&& (ret
= (*outproc
)(KDP_EOF
, NULL
, 0, ((void *) 0))) != kIOReturnSuccess
)
1139 DEBG("KDP_EOF(0x%x)\n", ret
);
1147 kern_dump(enum kern_dump_type kd_variant
)
1149 static boolean_t dumped_local
;
1150 if (kd_variant
== KERN_DUMP_DISK
) {
1151 if (dumped_local
) return (0);
1152 dumped_local
= TRUE
;
1153 return (do_kern_dump(&kern_dump_disk_proc
, KERN_DUMP_DISK
));
1154 #if WITH_CONSISTENT_DBG
1155 } else if (kd_variant
== KERN_DUMP_HW_SHMEM_DBG
) {
1156 return (do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc
, KERN_DUMP_HW_SHMEM_DBG
));
1159 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1160 return (do_kern_dump(&kdp_send_crashdump_data
, KERN_DUMP_NET
));
1167 kdp_core_zalloc(void * __unused ref
, u_int items
, u_int size
)
1171 result
= (void *) (kdp_core_zmem
+ kdp_core_zoffset
);
1172 kdp_core_zoffset
+= ~31L & (31 + (items
* size
)); // 32b align for vector crc
1173 assert(kdp_core_zoffset
<= kdp_core_zsize
);
1179 kdp_core_zfree(void * __unused ref
, void * __unused ptr
) {}
1182 #define LEVEL Z_BEST_SPEED
1191 #if WITH_CONSISTENT_DBG
1193 vm_offset_t kdp_core_hw_shmem_buf
= 0;
1194 struct kdp_hw_shmem_dbg_buf_elm
*cur_elm
= NULL
;
1197 if (kdp_core_zs
.zalloc
) return;
1198 kdp_core_zsize
= round_page(NETBUF
+ zlib_deflate_memory_size(wbits
, memlevel
));
1199 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize
);
1200 kr
= kmem_alloc(kernel_map
, &kdp_core_zmem
, kdp_core_zsize
, VM_KERN_MEMORY_DIAG
);
1201 assert (KERN_SUCCESS
== kr
);
1203 kdp_core_zoffset
= 0;
1204 kdp_core_zs
.zalloc
= kdp_core_zalloc
;
1205 kdp_core_zs
.zfree
= kdp_core_zfree
;
1207 if (deflateInit2(&kdp_core_zs
, LEVEL
, Z_DEFLATED
,
1208 wbits
+ 16 /*gzip mode*/, memlevel
, Z_DEFAULT_STRATEGY
)) {
1209 /* Allocation failed */
1210 bzero(&kdp_core_zs
, sizeof(kdp_core_zs
));
1211 kdp_core_zoffset
= 0;
1214 #if WITH_CONSISTENT_DBG
1215 if (!PE_consistent_debug_enabled()) {
1220 * We need to allocate physically contiguous memory since astris isn't capable
1221 * of doing address translations while the CPUs are running.
1223 kdp_hw_shmem_dbg_bufsize
= KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE
;
1224 kr
= kmem_alloc_contig(kernel_map
, &kdp_core_hw_shmem_buf
, kdp_hw_shmem_dbg_bufsize
, VM_MAP_PAGE_MASK(kernel_map
),
1225 0, 0, KMA_KOBJECT
, VM_KERN_MEMORY_DIAG
);
1226 assert(KERN_SUCCESS
== kr
);
1229 * Put the connection info structure at the beginning of this buffer and adjust
1230 * the buffer size accordingly.
1232 hwsd_info
= (struct xnu_hw_shmem_dbg_command_info
*) kdp_core_hw_shmem_buf
;
1233 hwsd_info
->xhsdci_status
= XHSDCI_STATUS_NONE
;
1234 hwsd_info
->xhsdci_seq_no
= 0;
1235 hwsd_info
->xhsdci_buf_phys_addr
= 0;
1236 hwsd_info
->xhsdci_buf_data_length
= 0;
1237 hwsd_info
->xhsdci_coredump_total_size_uncomp
= 0;
1238 hwsd_info
->xhsdci_coredump_total_size_sent_uncomp
= 0;
1239 hwsd_info
->xhsdci_page_size
= PAGE_SIZE
;
1241 kdp_core_hw_shmem_buf
+= sizeof(*hwsd_info
);
1242 kdp_hw_shmem_dbg_bufsize
-= sizeof(*hwsd_info
);
1243 kdp_hw_shmem_dbg_bufsize
= (kdp_hw_shmem_dbg_bufsize
/ KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
);
1244 kdp_hw_shmem_dbg_bufsize
-= (kdp_hw_shmem_dbg_bufsize
% OPTIMAL_ASTRIS_READSIZE
);
1246 STAILQ_INIT(&free_hw_shmem_dbg_bufs
);
1247 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush
);
1249 for (i
= 0; i
< KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS
; i
++) {
1250 cur_elm
= kalloc(sizeof(*cur_elm
));
1251 assert(cur_elm
!= NULL
);
1253 cur_elm
->khsd_buf
= kdp_core_hw_shmem_buf
;
1254 cur_elm
->khsd_data_length
= 0;
1256 kdp_core_hw_shmem_buf
+= kdp_hw_shmem_dbg_bufsize
;
1258 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs
, cur_elm
, khsd_elms
);
1261 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS
* NSEC_PER_SEC
,
1262 &kdp_hw_shmem_dbg_contact_deadline_interval
);
1264 PE_consistent_debug_register(kDbgIdAstrisConnection
, kvtophys((vm_offset_t
) hwsd_info
), sizeof(pmap_paddr_t
));
1265 PE_consistent_debug_register(kDbgIdAstrisConnectionVers
, CUR_XNU_HWSDCI_STRUCT_VERS
, sizeof(uint32_t));
1266 #endif /* WITH_CONSISTENT_DBG */
1269 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */