2 * Copyright (c) 2015 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <libsa/types.h>
39 #include <libkern/kernel_mach_header.h>
40 #include <libkern/zlib.h>
41 #include <kdp/kdp_internal.h>
42 #include <kdp/kdp_core.h>
43 #include <IOKit/IOPolledInterface.h>
44 #include <IOKit/IOBSD.h>
45 #include <sys/errno.h>
46 #include <sys/msgbuf.h>
48 #if defined(__i386__) || defined(__x86_64__)
49 #include <i386/pmap_internal.h>
50 #include <kdp/ml/i386/kdp_x86_common.h>
51 #endif /* defined(__i386__) || defined(__x86_64__) */
55 typedef int (*pmap_traverse_callback
)(vm_map_offset_t start
,
59 extern int pmap_traverse_present_mappings(pmap_t pmap
,
60 vm_map_offset_t start
,
62 pmap_traverse_callback callback
,
67 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
71 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start
,
75 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
79 struct kdp_core_out_vars
;
80 typedef int (*kern_dump_output_proc
)(unsigned int request
, char *corename
,
81 uint64_t length
, void *panic_data
);
83 struct kdp_core_out_vars
85 kern_dump_output_proc outproc
;
86 z_output_func zoutput
;
97 struct kern_dump_preflight_context
99 uint32_t region_count
;
100 uint64_t dumpable_bytes
;
103 struct kern_dump_send_context
105 struct kdp_core_out_vars
* outvars
;
108 uint64_t header_size
;
109 uint64_t dumpable_bytes
;
110 uint32_t region_count
;
113 extern uint32_t kdp_crashdump_pkt_size
;
115 static vm_offset_t kdp_core_zmem
;
116 static size_t kdp_core_zsize
;
117 static size_t kdp_core_zoffset
;
118 static z_stream kdp_core_zs
;
121 #define DEBG kdb_printf
123 boolean_t
kdp_has_polled_corefile(void)
125 return (NULL
!= gIOPolledCoreFileVars
);
129 kern_dump_disk_proc(unsigned int request
, __unused
char *corename
,
130 uint64_t length
, void * data
)
133 uint32_t err
= kIOReturnSuccess
;
138 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, 0);
139 if (kIOReturnSuccess
!= err
) break;
140 err
= IOPolledFilePollersOpen(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
, false);
144 noffset
= *((uint64_t *) data
);
145 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
146 if (kIOReturnSuccess
!= err
) break;
147 err
= IOPolledFileSeek(gIOPolledCoreFileVars
, noffset
);
151 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, data
, length
, NULL
);
152 if (kIOReturnSuccess
!= err
) break;
156 err
= IOPolledFileWrite(gIOPolledCoreFileVars
, 0, 0, NULL
);
157 if (kIOReturnSuccess
!= err
) break;
158 err
= IOPolledFilePollersClose(gIOPolledCoreFileVars
, kIOPolledBeforeSleepState
);
159 if (kIOReturnSuccess
!= err
) break;
167 kdp_core_zoutput(z_streamp strm
, Bytef
*buf
, unsigned len
)
169 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
174 if (vars
->error
>= 0)
176 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
, len
, buf
)) != kIOReturnSuccess
)
178 DEBG("KDP_DATA(0x%x)\n", ret
);
181 if (!buf
&& !len
) DEBG("100..");
187 kdp_core_zoutputbuf(z_streamp strm
, Bytef
*inbuf
, unsigned inlen
)
189 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
196 vars
->zipped
+= inlen
;
197 flush
= (!inbuf
&& !inlen
);
199 while ((vars
->error
>= 0) && (remain
|| flush
))
201 chunk
= vars
->outremain
;
202 if (chunk
> remain
) chunk
= remain
;
203 bcopy(inbuf
, &vars
->outbuf
[vars
->outlen
- vars
->outremain
], chunk
);
204 vars
->outremain
-= chunk
;
208 if (vars
->outremain
&& !flush
) break;
209 if ((ret
= (*vars
->outproc
)(KDP_DATA
, NULL
,
210 vars
->outlen
- vars
->outremain
,
211 vars
->outbuf
)) != kIOReturnSuccess
)
213 DEBG("KDP_DATA(0x%x)\n", ret
);
221 vars
->outremain
= vars
->outlen
;
227 kdp_core_zinput(z_streamp strm
, Bytef
*buf
, unsigned size
)
229 struct kdp_core_out_vars
* vars
= (typeof(vars
)) strm
->opaque
;
233 len
= strm
->avail_in
;
234 if (len
> size
) len
= size
;
235 if (len
== 0) return 0;
237 if (strm
->next_in
!= (Bytef
*) strm
) memcpy(buf
, strm
->next_in
, len
);
238 else bzero(buf
, len
);
239 strm
->adler
= z_crc32(strm
->adler
, buf
, len
);
241 strm
->avail_in
-= len
;
242 strm
->next_in
+= len
;
243 strm
->total_in
+= len
;
245 if (0 == (511 & vars
->writes
++))
247 percent
= (strm
->total_in
* 100) / vars
->totalbytes
;
248 if ((percent
- vars
->lastpercent
) >= 10)
250 vars
->lastpercent
= percent
;
251 DEBG("%lld..", percent
);
259 kdp_core_stream_output(struct kdp_core_out_vars
* vars
, uint64_t length
, void * data
)
265 flush
= (!length
&& !data
);
269 assert(!zs
->avail_in
);
271 while (vars
->error
>= 0)
273 if (!zs
->avail_in
&& !flush
)
276 zs
->next_in
= data
? data
: (Bytef
*) zs
/* zero marker */;
277 zs
->avail_in
= (uInt
)length
;
282 zs
->next_out
= (Bytef
*) zs
;
283 zs
->avail_out
= UINT32_MAX
;
285 zr
= deflate(zs
, flush
? Z_FINISH
: Z_NO_FLUSH
);
286 if (Z_STREAM_END
== zr
) break;
289 DEBG("ZERR %d\n", zr
);
294 if (flush
) (*vars
->zoutput
)(zs
, NULL
, 0);
296 return (vars
->error
);
299 extern vm_offset_t c_buffers
;
300 extern vm_size_t c_buffers_size
;
303 kernel_pmap_present_mapping(uint64_t vaddr
, uint64_t * pvincr
)
307 vincr
= PAGE_SIZE_64
;
309 assert(!(vaddr
& PAGE_MASK_64
));
311 /* VA ranges to exclude */
312 if (vaddr
== c_buffers
)
314 /* compressor data */
316 vincr
= c_buffers_size
;
318 else if (vaddr
== kdp_core_zmem
)
320 /* zlib working memory */
322 vincr
= kdp_core_zsize
;
325 ppn
= pmap_find_phys(kernel_pmap
, vaddr
);
332 pmap_traverse_present_mappings(pmap_t __unused pmap
,
333 vm_map_offset_t start
,
335 pmap_traverse_callback callback
,
339 vm_map_offset_t vcurstart
, vcur
;
341 vm_map_offset_t debug_start
;
342 vm_map_offset_t debug_end
;
343 boolean_t lastvavalid
;
345 debug_start
= trunc_page((vm_map_offset_t
) debug_buf_addr
);
346 debug_end
= round_page((vm_map_offset_t
) (debug_buf_addr
+ debug_buf_size
));
348 #if defined(__i386__) || defined(__x86_64__)
349 assert(!is_ept_pmap(pmap
));
352 /* Assumes pmap is locked, or being called from the kernel debugger */
354 if (start
> end
) return (KERN_INVALID_ARGUMENT
);
358 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
361 ppn
= kernel_pmap_present_mapping(vcur
, &vincr
);
364 if (((vcur
< debug_start
) || (vcur
>= debug_end
))
365 && !pmap_valid_page(ppn
))
367 /* not something we want */
374 /* Start of a new virtual region */
380 /* end of a virtual region */
381 ret
= callback(vcurstart
, vcur
, context
);
385 #if defined(__i386__) || defined(__x86_64__)
386 /* Try to skip by 2MB if possible */
387 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
389 pde
= pmap_pde(pmap
, vcur
);
390 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
391 /* Make sure we wouldn't overflow */
392 if (vcur
< (end
- NBPD
)) {
397 #endif /* defined(__i386__) || defined(__x86_64__) */
402 if ((ret
== KERN_SUCCESS
) && lastvavalid
) {
403 /* send previous run */
404 ret
= callback(vcurstart
, vcur
, context
);
410 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
414 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
415 IOReturn ret
= KERN_SUCCESS
;
418 kdc
->dumpable_bytes
+= (end
- start
);
424 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start
,
428 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
429 IOReturn ret
= KERN_SUCCESS
;
430 kernel_segment_command_t sc
;
431 vm_size_t size
= (vm_size_t
)(end
- start
);
433 if (kdc
->hoffset
+ sizeof(sc
) > kdc
->header_size
) {
434 return (KERN_NO_SPACE
);
438 kdc
->dumpable_bytes
+= (end
- start
);
441 * Fill in segment command structure.
444 sc
.cmd
= LC_SEGMENT_KERNEL
;
445 sc
.cmdsize
= sizeof(kernel_segment_command_t
);
447 sc
.vmaddr
= (vm_address_t
)start
;
449 sc
.fileoff
= (vm_address_t
)kdc
->foffset
;
451 sc
.maxprot
= VM_PROT_READ
;
452 sc
.initprot
= VM_PROT_READ
;
456 if ((ret
= kdp_core_stream_output(kdc
->outvars
, sizeof(kernel_segment_command_t
), (caddr_t
) &sc
)) != kIOReturnSuccess
) {
457 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
461 kdc
->hoffset
+= sizeof(kernel_segment_command_t
);
462 kdc
->foffset
+= size
;
470 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start
,
474 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
475 int ret
= KERN_SUCCESS
;
476 vm_size_t size
= (vm_size_t
)(end
- start
);
479 kdc
->dumpable_bytes
+= size
;
480 if ((ret
= kdp_core_stream_output(kdc
->outvars
, (unsigned int)size
, (caddr_t
)(uintptr_t)start
)) != kIOReturnSuccess
) {
481 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
484 kdc
->foffset
+= size
;
491 do_kern_dump(kern_dump_output_proc outproc
, bool local
)
493 struct kern_dump_preflight_context kdc_preflight
;
494 struct kern_dump_send_context kdc_sendseg
;
495 struct kern_dump_send_context kdc_send
;
496 struct kdp_core_out_vars outvars
;
497 struct mach_core_fileheader hdr
;
498 kernel_mach_header_t mh
;
499 uint32_t segment_count
, tstate_count
;
500 size_t command_size
= 0, header_size
= 0, tstate_size
= 0;
501 uint64_t hoffset
, foffset
;
509 log_start
= debug_buf_ptr
;
511 if (log_start
>= debug_buf_addr
)
513 log_length
= log_start
- debug_buf_addr
;
514 if (log_length
<= debug_buf_size
) log_length
= debug_buf_size
- log_length
;
520 if ((ret
= (*outproc
)(KDP_WRQ
, NULL
, 0, &hoffset
)) != kIOReturnSuccess
) {
521 DEBG("KDP_WRQ(0x%x)\n", ret
);
528 bzero(&outvars
, sizeof(outvars
));
529 bzero(&hdr
, sizeof(hdr
));
530 outvars
.outproc
= outproc
;
531 kdp_core_zs
.avail_in
= 0;
532 kdp_core_zs
.next_in
= NULL
;
533 kdp_core_zs
.avail_out
= 0;
534 kdp_core_zs
.next_out
= NULL
;
535 kdp_core_zs
.opaque
= &outvars
;
536 kdc_sendseg
.outvars
= &outvars
;
537 kdc_send
.outvars
= &outvars
;
541 outvars
.outbuf
= NULL
;
543 outvars
.outremain
= 0;
544 outvars
.zoutput
= kdp_core_zoutput
;
545 // space for file header & log
546 foffset
= (4096 + log_length
+ 4095) & ~4095ULL;
547 hdr
.log_offset
= 4096;
548 hdr
.gzip_offset
= foffset
;
549 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
550 DEBG("KDP_SEEK(0x%x)\n", ret
);
556 outvars
.outbuf
= (Bytef
*) (kdp_core_zmem
+ kdp_core_zoffset
);
557 assert((kdp_core_zoffset
+ kdp_crashdump_pkt_size
) <= kdp_core_zsize
);
558 outvars
.outlen
= kdp_crashdump_pkt_size
;
559 outvars
.outremain
= outvars
.outlen
;
560 outvars
.zoutput
= kdp_core_zoutputbuf
;
563 deflateResetWithIO(&kdp_core_zs
, kdp_core_zinput
, outvars
.zoutput
);
566 kdc_preflight
.region_count
= 0;
567 kdc_preflight
.dumpable_bytes
= 0;
569 ret
= pmap_traverse_present_mappings(kernel_pmap
,
570 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
571 VM_MAX_KERNEL_ADDRESS
,
572 kern_dump_pmap_traverse_preflight_callback
,
576 DEBG("pmap traversal failed: %d\n", ret
);
580 outvars
.totalbytes
= kdc_preflight
.dumpable_bytes
;
581 assert(outvars
.totalbytes
);
582 segment_count
= kdc_preflight
.region_count
;
584 kern_collectth_state_size(&tstate_count
, &tstate_size
);
586 command_size
= segment_count
* sizeof(kernel_segment_command_t
) + tstate_count
* tstate_size
;
588 header_size
= command_size
+ sizeof(kernel_mach_header_t
);
591 * Set up Mach-O header for currently executing kernel.
594 mh
.magic
= _mh_execute_header
.magic
;
595 mh
.cputype
= _mh_execute_header
.cputype
;;
596 mh
.cpusubtype
= _mh_execute_header
.cpusubtype
;
597 mh
.filetype
= MH_CORE
;
598 mh
.ncmds
= segment_count
+ tstate_count
;
599 mh
.sizeofcmds
= (uint32_t)command_size
;
601 #if defined(__LP64__)
605 hoffset
= 0; /* offset into header */
606 foffset
= (uint64_t) round_page(header_size
); /* offset into file */
608 /* Transmit the Mach-O MH_CORE header, and segment and thread commands
610 if ((ret
= kdp_core_stream_output(&outvars
, sizeof(kernel_mach_header_t
), (caddr_t
) &mh
) != kIOReturnSuccess
))
612 DEBG("KDP_DATA(0x%x)\n", ret
);
616 hoffset
+= sizeof(kernel_mach_header_t
);
618 DEBG("%s", local
? "Writing local kernel core..." :
619 "Transmitting kernel state, please wait:\n");
621 kdc_sendseg
.region_count
= 0;
622 kdc_sendseg
.dumpable_bytes
= 0;
623 kdc_sendseg
.hoffset
= hoffset
;
624 kdc_sendseg
.foffset
= foffset
;
625 kdc_sendseg
.header_size
= header_size
;
627 if ((ret
= pmap_traverse_present_mappings(kernel_pmap
,
628 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
629 VM_MAX_KERNEL_ADDRESS
,
630 kern_dump_pmap_traverse_send_seg_callback
,
631 &kdc_sendseg
)) != kIOReturnSuccess
)
633 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret
);
637 hoffset
= kdc_sendseg
.hoffset
;
639 * Now send out the LC_THREAD load command, with the thread information
640 * for the current activation.
646 char tstate
[tstate_size
];
650 * Now send out the LC_THREAD load command, with the thread information
652 kern_collectth_state (current_thread(), tstate
, tstate_size
, &iter
);
654 if ((ret
= kdp_core_stream_output(&outvars
, tstate_size
, tstate
)) != kIOReturnSuccess
) {
655 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
662 kdc_send
.region_count
= 0;
663 kdc_send
.dumpable_bytes
= 0;
664 foffset
= (uint64_t) round_page(header_size
); /* offset into file */
665 kdc_send
.foffset
= foffset
;
666 kdc_send
.hoffset
= 0;
667 foffset
= round_page_64(header_size
) - header_size
;
670 // zero fill to page align
671 if ((ret
= kdp_core_stream_output(&outvars
, foffset
, NULL
)) != kIOReturnSuccess
) {
672 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
677 ret
= pmap_traverse_present_mappings(kernel_pmap
,
678 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
679 VM_MAX_KERNEL_ADDRESS
,
680 kern_dump_pmap_traverse_send_segdata_callback
,
683 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret
);
687 if ((ret
= kdp_core_stream_output(&outvars
, 0, NULL
) != kIOReturnSuccess
)) {
688 DEBG("kdp_core_stream_output(0x%x)\n", ret
);
693 if (kIOReturnSuccess
== ret
) DEBG("success\n");
694 else outvars
.zipped
= 0;
696 DEBG("Mach-o header: %lu\n", header_size
);
697 DEBG("Region counts: [%u, %u, %u]\n", kdc_preflight
.region_count
,
698 kdc_sendseg
.region_count
,
699 kdc_send
.region_count
);
700 DEBG("Byte counts : [%llu, %llu, %llu, %lu, %llu]\n", kdc_preflight
.dumpable_bytes
,
701 kdc_sendseg
.dumpable_bytes
,
702 kdc_send
.dumpable_bytes
,
703 outvars
.zipped
, log_length
);
708 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
709 DEBG("KDP_SEEK(0x%x)\n", ret
);
713 new_logs
= debug_buf_ptr
- log_start
;
714 if (new_logs
> log_length
) new_logs
= log_length
;
716 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, new_logs
, log_start
)) != kIOReturnSuccess
)
718 DEBG("KDP_DATA(0x%x)\n", ret
);
725 if ((ret
= (*outproc
)(KDP_SEEK
, NULL
, sizeof(foffset
), &foffset
)) != kIOReturnSuccess
) {
726 DEBG("KDP_SEEK(0x%x)\n", ret
);
730 hdr
.signature
= MACH_CORE_FILEHEADER_SIGNATURE
;
731 hdr
.log_length
= new_logs
;
732 hdr
.gzip_length
= outvars
.zipped
;
734 if ((ret
= (*outproc
)(KDP_DATA
, NULL
, sizeof(hdr
), &hdr
)) != kIOReturnSuccess
)
736 DEBG("KDP_DATA(0x%x)\n", ret
);
742 /* close / last packet */
743 if ((ret
= (*outproc
)(KDP_EOF
, NULL
, 0, ((void *) 0))) != kIOReturnSuccess
)
745 DEBG("KDP_EOF(0x%x)\n", ret
);
753 kern_dump(boolean_t local
)
755 static boolean_t dumped_local
;
757 if (dumped_local
) return (0);
759 return (do_kern_dump(&kern_dump_disk_proc
, true));
761 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
762 return (do_kern_dump(&kdp_send_crashdump_data
, false));
769 kdp_core_zalloc(void * __unused ref
, u_int items
, u_int size
)
773 result
= (void *) (kdp_core_zmem
+ kdp_core_zoffset
);
774 kdp_core_zoffset
+= ~31L & (31 + (items
* size
)); // 32b align for vector crc
775 assert(kdp_core_zoffset
<= kdp_core_zsize
);
781 kdp_core_zfree(void * __unused ref
, void * __unused ptr
) {}
784 #define LEVEL Z_BEST_SPEED
794 if (kdp_core_zs
.zalloc
) return;
795 kdp_core_zsize
= round_page(NETBUF
+ zlib_deflate_memory_size(wbits
, memlevel
));
796 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize
);
797 kr
= kmem_alloc(kernel_map
, &kdp_core_zmem
, kdp_core_zsize
, VM_KERN_MEMORY_DIAG
);
798 assert (KERN_SUCCESS
== kr
);
800 kdp_core_zoffset
= 0;
801 kdp_core_zs
.zalloc
= kdp_core_zalloc
;
802 kdp_core_zs
.zfree
= kdp_core_zfree
;
804 if (deflateInit2(&kdp_core_zs
, LEVEL
, Z_DEFLATED
,
805 wbits
+ 16 /*gzip mode*/, memlevel
, Z_DEFAULT_STRATEGY
))
807 /* Allocation failed */
808 bzero(&kdp_core_zs
, sizeof(kdp_core_zs
));
809 kdp_core_zoffset
= 0;
813 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */