2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/vm_attributes.h>
31 #include <mach/vm_param.h>
32 #include <libsa/types.h>
34 #include <vm/vm_map.h>
35 #include <i386/pmap.h>
36 #include <i386/pmap_internal.h> /* pmap_pde */
38 #include <i386/misc_protos.h>
40 #include <i386/proc_reg.h>
42 #include <i386/pmap_internal.h>
44 #include <kdp/kdp_internal.h>
45 #include <kdp/kdp_core.h>
46 #include <kdp/ml/i386/kdp_x86_common.h>
47 #include <mach/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <vm/vm_kern.h>
52 #include <machine/pal_routines.h>
53 #include <libkern/kernel_mach_header.h>
55 // #define KDP_VM_READ_DEBUG 1
56 // #define KDP_VM_WRITE_DEBUG 1
58 boolean_t kdp_read_io
;
59 boolean_t kdp_trans_off
;
61 static addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
63 int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
66 int kern_dump_pmap_traverse_send_callback(vm_map_offset_t start
,
80 pp
= pmap_find_phys(pmap
, va
);
83 pa
= ((addr64_t
)pp
<< 12) | (va
& 0x0000000000000FFFULL
);
89 kdp_machine_vm_read( mach_vm_address_t src
, caddr_t dst
, mach_vm_size_t len
)
91 addr64_t cur_virt_src
= PAL_KDP_ADDR((addr64_t
)src
);
92 addr64_t cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)(intptr_t)dst
);
93 addr64_t cur_phys_dst
, cur_phys_src
;
94 mach_vm_size_t resid
= len
;
95 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
96 pmap_t src_pmap
= kernel_pmap
;
98 #ifdef KDP_VM_READ_DEBUG
99 printf("kdp_vm_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
103 kdp_readphysmem64_req_t rq
;
107 rq
.nbytes
= (uint32_t)len
;
108 ret
= kdp_machine_phys_read(&rq
, dst
, KDP_CURRENT_LCPU
);
112 /* If a different pmap has been specified with kdp_pmap, use it to translate the
113 * source (cur_virt_src); otherwise, the source is translated using the
120 if (!(cur_phys_src
= kdp_vtophys(src_pmap
,
124 /* Always translate the destination buffer using the kernel_pmap */
125 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
128 /* Validate physical page numbers unless kdp_read_io is set */
129 if (kdp_read_io
== FALSE
)
130 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
133 /* Get length left on page */
134 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
135 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
136 if (cnt_src
> cnt_dst
)
143 /* Do a physical copy */
144 ml_copy_phys(cur_phys_src
, cur_phys_dst
, (vm_size_t
)cnt
);
151 return (len
- resid
);
155 kdp_machine_phys_read(kdp_readphysmem64_req_t
*rq
, caddr_t dst
,
158 mach_vm_address_t src
= rq
->address
;
159 mach_vm_size_t len
= rq
->nbytes
;
161 addr64_t cur_virt_dst
;
162 addr64_t cur_phys_dst
, cur_phys_src
;
163 mach_vm_size_t resid
= len
;
164 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
166 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
167 return (mach_vm_size_t
)
168 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_read
, rq
, dst
);
171 #ifdef KDP_VM_READ_DEBUG
172 printf("kdp_phys_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
175 cur_virt_dst
= (addr64_t
)(intptr_t)dst
;
176 cur_phys_src
= (addr64_t
)src
;
180 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
183 /* Get length left on page */
184 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
185 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
186 if (cnt_src
> cnt_dst
)
193 /* Do a physical copy; use ml_copy_phys() in the event this is
194 * a short read with potential side effects.
196 ml_copy_phys(cur_phys_src
, cur_phys_dst
, (vm_size_t
)cnt
);
202 return (len
- resid
);
209 kdp_machine_vm_write( caddr_t src
, mach_vm_address_t dst
, mach_vm_size_t len
)
211 addr64_t cur_virt_src
, cur_virt_dst
;
212 addr64_t cur_phys_src
, cur_phys_dst
;
213 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
215 #ifdef KDP_VM_WRITE_DEBUG
216 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
219 cur_virt_src
= PAL_KDP_ADDR((addr64_t
)(intptr_t)src
);
220 cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)dst
);
222 resid
= (unsigned)len
;
225 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
228 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
231 /* Copy as many bytes as possible without crossing a page */
232 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
233 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
235 if (cnt_src
> cnt_dst
)
242 ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
249 return (len
- resid
);
256 kdp_machine_phys_write(kdp_writephysmem64_req_t
*rq
, caddr_t src
,
259 mach_vm_address_t dst
= rq
->address
;
260 mach_vm_size_t len
= rq
->nbytes
;
261 addr64_t cur_virt_src
;
262 addr64_t cur_phys_src
, cur_phys_dst
;
263 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
265 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
266 return (mach_vm_size_t
)
267 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_write
, rq
, src
);
270 #ifdef KDP_VM_WRITE_DEBUG
271 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
274 cur_virt_src
= (addr64_t
)(intptr_t)src
;
275 cur_phys_dst
= (addr64_t
)dst
;
277 resid
= (unsigned)len
;
280 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
283 /* Copy as many bytes as possible without crossing a page */
284 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
285 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
287 if (cnt_src
> cnt_dst
)
294 ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
302 return (len
- resid
);
306 kdp_machine_ioport_read(kdp_readioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
308 uint16_t addr
= rq
->address
;
309 uint16_t size
= rq
->nbytes
;
311 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
312 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_read
, rq
, data
);
318 *((uint8_t *) data
) = inb(addr
);
321 *((uint16_t *) data
) = inw(addr
);
324 *((uint32_t *) data
) = inl(addr
);
327 return KDPERR_BADFLAVOR
;
331 return KDPERR_NO_ERROR
;
335 kdp_machine_ioport_write(kdp_writeioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
337 uint16_t addr
= rq
->address
;
338 uint16_t size
= rq
->nbytes
;
340 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
341 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_write
, rq
, data
);
347 outb(addr
, *((uint8_t *) data
));
350 outw(addr
, *((uint16_t *) data
));
353 outl(addr
, *((uint32_t *) data
));
356 return KDPERR_BADFLAVOR
;
360 return KDPERR_NO_ERROR
;
364 kdp_machine_msr64_read(kdp_readmsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
366 uint64_t *value
= (uint64_t *) data
;
367 uint32_t msr
= rq
->address
;
369 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
370 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_read
, rq
, data
);
373 *value
= rdmsr64(msr
);
374 return KDPERR_NO_ERROR
;
378 kdp_machine_msr64_write(kdp_writemsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
380 uint64_t *value
= (uint64_t *) data
;
381 uint32_t msr
= rq
->address
;
383 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
384 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_write
, rq
, data
);
387 wrmsr64(msr
, *value
);
388 return KDPERR_NO_ERROR
;
392 pmap_traverse_present_mappings(pmap_t pmap
,
393 vm_map_offset_t start
,
395 pmap_traverse_callback callback
,
398 int ret
= KERN_SUCCESS
;
399 vm_map_offset_t vcurstart
, vcur
;
400 boolean_t lastvavalid
= FALSE
;
402 /* Assumes pmap is locked, or being called from the kernel debugger */
405 return (KERN_INVALID_ARGUMENT
);
408 if (start
& PAGE_MASK_64
) {
409 return (KERN_INVALID_ARGUMENT
);
412 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
413 ppnum_t ppn
= pmap_find_phys(pmap
, vcur
);
415 if (ppn
!= 0 && !pmap_valid_page(ppn
)) {
416 /* not something we want */
422 /* Start of a new virtual region */
428 /* end of a virtual region */
430 ret
= callback(vcurstart
, vcur
, context
);
435 /* Try to skip by 2MB if possible */
436 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
439 pde
= pmap_pde(pmap
, vcur
);
440 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
441 /* Make sure we wouldn't overflow */
442 if (vcur
< (end
- NBPD
)) {
450 vcur
+= PAGE_SIZE_64
;
453 if ((ret
== KERN_SUCCESS
)
455 /* send previous run */
457 ret
= callback(vcurstart
, vcur
, context
);
462 struct kern_dump_preflight_context
{
463 uint32_t region_count
;
464 uint64_t dumpable_bytes
;
467 struct kern_dump_send_context
{
470 uint64_t header_size
;
474 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
478 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
479 int ret
= KERN_SUCCESS
;
482 kdc
->dumpable_bytes
+= (end
- start
);
488 kern_dump_pmap_traverse_send_callback(vm_map_offset_t start
,
492 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
493 int ret
= KERN_SUCCESS
;
494 kernel_segment_command_t sc
;
495 vm_size_t size
= (vm_size_t
)(end
- start
);
497 if (kdc
->hoffset
+ sizeof(sc
) > kdc
->header_size
) {
498 return (KERN_NO_SPACE
);
502 * Fill in segment command structure.
505 sc
.cmd
= LC_SEGMENT_KERNEL
;
506 sc
.cmdsize
= sizeof(kernel_segment_command_t
);
508 sc
.vmaddr
= (vm_address_t
)start
;
510 sc
.fileoff
= (vm_address_t
)kdc
->foffset
;
512 sc
.maxprot
= VM_PROT_READ
;
513 sc
.initprot
= VM_PROT_READ
;
517 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(kdc
->hoffset
) , &kdc
->hoffset
)) < 0) {
518 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
522 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(kernel_segment_command_t
) , (caddr_t
) &sc
)) < 0) {
523 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
527 kdc
->hoffset
+= sizeof(kernel_segment_command_t
);
529 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(kdc
->foffset
) , &kdc
->foffset
)) < 0) {
530 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
534 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, (unsigned int)size
, (caddr_t
)(uintptr_t)start
)) < 0) {
535 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
539 kdc
->foffset
+= size
;
549 struct kern_dump_preflight_context kdc_preflight
;
550 struct kern_dump_send_context kdc_send
;
551 uint32_t segment_count
;
552 size_t command_size
= 0, header_size
= 0, tstate_size
= 0;
553 uint64_t hoffset
= 0, foffset
= 0;
554 kernel_mach_header_t mh
;
557 kdc_preflight
.region_count
= 0;
558 kdc_preflight
.dumpable_bytes
= 0;
560 ret
= pmap_traverse_present_mappings(kernel_pmap
,
561 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
562 VM_MAX_KERNEL_ADDRESS
,
563 kern_dump_pmap_traverse_preflight_callback
,
566 printf("pmap traversal failed: %d\n", ret
);
570 printf("Kernel dump region count: %u\n", kdc_preflight
.region_count
);
571 printf("Kernel dump byte count: %llu\n", kdc_preflight
.dumpable_bytes
);
573 segment_count
= kdc_preflight
.region_count
;
575 tstate_size
= sizeof(struct thread_command
) + kern_collectth_state_size();
577 command_size
= segment_count
* sizeof(kernel_segment_command_t
) +
580 header_size
= command_size
+ sizeof(kernel_mach_header_t
);
583 * Set up Mach-O header for currently executing kernel.
585 printf ("Generated Mach-O header size was %lu\n", header_size
);
587 mh
.magic
= _mh_execute_header
.magic
;
588 mh
.cputype
= _mh_execute_header
.cputype
;;
589 mh
.cpusubtype
= _mh_execute_header
.cpusubtype
;
590 mh
.filetype
= MH_CORE
;
591 mh
.ncmds
= segment_count
+ 1 /* thread */;
592 mh
.sizeofcmds
= (uint32_t)command_size
;
594 #if defined(__LP64__)
598 hoffset
= 0; /* offset into header */
599 foffset
= (uint32_t)round_page(header_size
); /* offset into file */
601 /* Transmit the Mach-O MH_CORE header, and seek forward past the
602 * area reserved for the segment and thread commands
603 * to begin data transmission
605 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
606 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
609 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(kernel_mach_header_t
), (caddr_t
) &mh
) < 0)) {
610 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
614 hoffset
+= sizeof(kernel_mach_header_t
);
616 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
) < 0)) {
617 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
621 printf ("Transmitting kernel state, please wait: ");
623 kdc_send
.hoffset
= hoffset
;
624 kdc_send
.foffset
= foffset
;
625 kdc_send
.header_size
= header_size
;
627 ret
= pmap_traverse_present_mappings(kernel_pmap
,
628 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
629 VM_MAX_KERNEL_ADDRESS
,
630 kern_dump_pmap_traverse_send_callback
,
633 kprintf("pmap traversal failed: %d\n", ret
);
637 /* Reload mutated offsets */
638 hoffset
= kdc_send
.hoffset
;
639 foffset
= kdc_send
.foffset
;
642 * Now send out the LC_THREAD load command, with the thread information
643 * for the current activation.
645 if (tstate_size
> 0) {
646 char tstate
[tstate_size
];
648 kern_collectth_state (current_thread(), tstate
, tstate_size
);
650 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
), &hoffset
)) < 0) {
651 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
655 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, tstate_size
, tstate
)) < 0) {
656 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
660 hoffset
+= tstate_size
;
664 if ((ret
= kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0))) < 0)
666 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
675 pt_entry_t
*debugger_ptep
;
676 vm_map_offset_t debugger_window_kva
;
678 /* Establish a pagetable window that can be remapped on demand.
679 * This is utilized by the debugger to address regions outside
684 kdp_machine_init(void) {
685 if (debug_boot_arg
== 0)
689 kern_return_t kr
= vm_map_find_space(kernel_map
,
690 &debugger_window_kva
,
692 VM_MAKE_TAG(VM_MEMORY_IOKIT
), &e
);
694 if (kr
!= KERN_SUCCESS
) {
695 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__
, kr
);
698 vm_map_unlock(kernel_map
);
700 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);
702 if (debugger_ptep
== NULL
) {
703 pmap_expand(kernel_pmap
, debugger_window_kva
, PMAP_EXPAND_OPTIONS_NONE
);
704 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);