2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <libsa/types.h>
36 #include <vm/vm_map.h>
37 #include <i386/pmap.h>
38 #include <i386/pmap_internal.h> /* pmap_pde */
40 #include <i386/misc_protos.h>
42 #include <i386/proc_reg.h>
44 #include <i386/pmap_internal.h>
46 #include <kdp/kdp_internal.h>
47 #include <kdp/kdp_core.h>
48 #include <kdp/ml/i386/kdp_x86_common.h>
49 #include <mach/vm_map.h>
51 #include <vm/vm_protos.h>
52 #include <vm/vm_kern.h>
54 #include <machine/pal_routines.h>
55 #include <libkern/kernel_mach_header.h>
57 // #define KDP_VM_READ_DEBUG 1
58 // #define KDP_VM_WRITE_DEBUG 1
61 * A (potentially valid) physical address is not a kernel address
62 * i.e. it'a a user address.
64 #define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr)
66 boolean_t kdp_read_io
;
67 boolean_t kdp_trans_off
;
69 addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
71 int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
74 int kern_dump_pmap_traverse_send_callback(vm_map_offset_t start
,
88 pp
= pmap_find_phys(pmap
, va
);
91 pa
= ((addr64_t
)pp
<< PAGE_SHIFT
) | (va
& PAGE_MASK
);
97 kdp_machine_vm_read( mach_vm_address_t src
, caddr_t dst
, mach_vm_size_t len
)
99 addr64_t cur_virt_src
= PAL_KDP_ADDR((addr64_t
)src
);
100 addr64_t cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)(intptr_t)dst
);
101 addr64_t cur_phys_dst
, cur_phys_src
;
102 mach_vm_size_t resid
= len
;
103 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
104 pmap_t src_pmap
= kernel_pmap
;
106 #ifdef KDP_VM_READ_DEBUG
107 printf("kdp_vm_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
110 if (kdp_trans_off
&& IS_PHYS_ADDR(src
)) {
111 kdp_readphysmem64_req_t rq
;
115 rq
.nbytes
= (uint32_t)len
;
116 ret
= kdp_machine_phys_read(&rq
, dst
, KDP_CURRENT_LCPU
);
120 /* If a different pmap has been specified with kdp_pmap, use it to translate the
121 * source (cur_virt_src); otherwise, the source is translated using the
128 if (!(cur_phys_src
= kdp_vtophys(src_pmap
,
132 /* Always translate the destination buffer using the kernel_pmap */
133 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
136 /* Validate physical page numbers unless kdp_read_io is set */
137 if (kdp_read_io
== FALSE
)
138 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
141 /* Get length left on page */
142 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
143 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
144 if (cnt_src
> cnt_dst
)
151 /* Do a physical copy */
152 if (EFAULT
== ml_copy_phys(cur_phys_src
,
161 return (len
- resid
);
165 kdp_machine_phys_read(kdp_readphysmem64_req_t
*rq
, caddr_t dst
,
168 mach_vm_address_t src
= rq
->address
;
169 mach_vm_size_t len
= rq
->nbytes
;
171 addr64_t cur_virt_dst
;
172 addr64_t cur_phys_dst
, cur_phys_src
;
173 mach_vm_size_t resid
= len
;
174 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
176 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
177 return (mach_vm_size_t
)
178 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_read
, rq
, dst
);
181 #ifdef KDP_VM_READ_DEBUG
182 printf("kdp_phys_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
185 cur_virt_dst
= (addr64_t
)(intptr_t)dst
;
186 cur_phys_src
= (addr64_t
)src
;
190 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
193 /* Get length left on page */
194 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
195 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
196 if (cnt_src
> cnt_dst
)
203 /* Do a physical copy; use ml_copy_phys() in the event this is
204 * a short read with potential side effects.
206 if (EFAULT
== ml_copy_phys(cur_phys_src
,
215 return (len
- resid
);
222 kdp_machine_vm_write( caddr_t src
, mach_vm_address_t dst
, mach_vm_size_t len
)
224 addr64_t cur_virt_src
, cur_virt_dst
;
225 addr64_t cur_phys_src
, cur_phys_dst
;
226 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
228 #ifdef KDP_VM_WRITE_DEBUG
229 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
232 cur_virt_src
= PAL_KDP_ADDR((addr64_t
)(intptr_t)src
);
233 cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)dst
);
235 resid
= (unsigned)len
;
238 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
241 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
244 /* Copy as many bytes as possible without crossing a page */
245 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
246 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
248 if (cnt_src
> cnt_dst
)
255 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
))
256 goto exit
; /* Copy stuff over */
263 return (len
- resid
);
270 kdp_machine_phys_write(kdp_writephysmem64_req_t
*rq
, caddr_t src
,
273 mach_vm_address_t dst
= rq
->address
;
274 mach_vm_size_t len
= rq
->nbytes
;
275 addr64_t cur_virt_src
;
276 addr64_t cur_phys_src
, cur_phys_dst
;
277 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
279 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
280 return (mach_vm_size_t
)
281 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_write
, rq
, src
);
284 #ifdef KDP_VM_WRITE_DEBUG
285 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
288 cur_virt_src
= (addr64_t
)(intptr_t)src
;
289 cur_phys_dst
= (addr64_t
)dst
;
291 resid
= (unsigned)len
;
294 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
297 /* Copy as many bytes as possible without crossing a page */
298 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
299 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
301 if (cnt_src
> cnt_dst
)
308 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
))
309 goto exit
; /* Copy stuff over */
317 return (len
- resid
);
321 kdp_machine_ioport_read(kdp_readioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
323 uint16_t addr
= rq
->address
;
324 uint16_t size
= rq
->nbytes
;
326 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
327 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_read
, rq
, data
);
333 *((uint8_t *) data
) = inb(addr
);
336 *((uint16_t *) data
) = inw(addr
);
339 *((uint32_t *) data
) = inl(addr
);
342 return KDPERR_BADFLAVOR
;
346 return KDPERR_NO_ERROR
;
350 kdp_machine_ioport_write(kdp_writeioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
352 uint16_t addr
= rq
->address
;
353 uint16_t size
= rq
->nbytes
;
355 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
356 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_write
, rq
, data
);
362 outb(addr
, *((uint8_t *) data
));
365 outw(addr
, *((uint16_t *) data
));
368 outl(addr
, *((uint32_t *) data
));
371 return KDPERR_BADFLAVOR
;
375 return KDPERR_NO_ERROR
;
379 kdp_machine_msr64_read(kdp_readmsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
381 uint64_t *value
= (uint64_t *) data
;
382 uint32_t msr
= rq
->address
;
384 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
385 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_read
, rq
, data
);
388 *value
= rdmsr64(msr
);
389 return KDPERR_NO_ERROR
;
393 kdp_machine_msr64_write(kdp_writemsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
395 uint64_t *value
= (uint64_t *) data
;
396 uint32_t msr
= rq
->address
;
398 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
399 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_write
, rq
, data
);
402 wrmsr64(msr
, *value
);
403 return KDPERR_NO_ERROR
;
407 pmap_traverse_present_mappings(pmap_t pmap
,
408 vm_map_offset_t start
,
410 pmap_traverse_callback callback
,
413 int ret
= KERN_SUCCESS
;
414 vm_map_offset_t vcurstart
, vcur
;
415 boolean_t lastvavalid
= FALSE
;
417 /* Assumes pmap is locked, or being called from the kernel debugger */
420 return (KERN_INVALID_ARGUMENT
);
423 if (start
& PAGE_MASK_64
) {
424 return (KERN_INVALID_ARGUMENT
);
427 for (vcur
= vcurstart
= start
; (ret
== KERN_SUCCESS
) && (vcur
< end
); ) {
428 ppnum_t ppn
= pmap_find_phys(pmap
, vcur
);
430 if (ppn
!= 0 && !pmap_valid_page(ppn
)) {
431 /* not something we want */
437 /* Start of a new virtual region */
443 /* end of a virtual region */
445 ret
= callback(vcurstart
, vcur
, context
);
450 /* Try to skip by 2MB if possible */
451 if (((vcur
& PDMASK
) == 0) && cpu_64bit
) {
454 pde
= pmap_pde(pmap
, vcur
);
455 if (0 == pde
|| ((*pde
& INTEL_PTE_VALID
) == 0)) {
456 /* Make sure we wouldn't overflow */
457 if (vcur
< (end
- NBPD
)) {
465 vcur
+= PAGE_SIZE_64
;
468 if ((ret
== KERN_SUCCESS
)
470 /* send previous run */
472 ret
= callback(vcurstart
, vcur
, context
);
477 struct kern_dump_preflight_context
{
478 uint32_t region_count
;
479 uint64_t dumpable_bytes
;
482 struct kern_dump_send_context
{
485 uint64_t header_size
;
489 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start
,
493 struct kern_dump_preflight_context
*kdc
= (struct kern_dump_preflight_context
*)context
;
494 int ret
= KERN_SUCCESS
;
497 kdc
->dumpable_bytes
+= (end
- start
);
503 kern_dump_pmap_traverse_send_callback(vm_map_offset_t start
,
507 struct kern_dump_send_context
*kdc
= (struct kern_dump_send_context
*)context
;
508 int ret
= KERN_SUCCESS
;
509 kernel_segment_command_t sc
;
510 vm_size_t size
= (vm_size_t
)(end
- start
);
512 if (kdc
->hoffset
+ sizeof(sc
) > kdc
->header_size
) {
513 return (KERN_NO_SPACE
);
517 * Fill in segment command structure.
520 sc
.cmd
= LC_SEGMENT_KERNEL
;
521 sc
.cmdsize
= sizeof(kernel_segment_command_t
);
523 sc
.vmaddr
= (vm_address_t
)start
;
525 sc
.fileoff
= (vm_address_t
)kdc
->foffset
;
527 sc
.maxprot
= VM_PROT_READ
;
528 sc
.initprot
= VM_PROT_READ
;
532 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(kdc
->hoffset
) , &kdc
->hoffset
)) < 0) {
533 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
537 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(kernel_segment_command_t
) , (caddr_t
) &sc
)) < 0) {
538 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
542 kdc
->hoffset
+= sizeof(kernel_segment_command_t
);
544 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(kdc
->foffset
) , &kdc
->foffset
)) < 0) {
545 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
549 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, (unsigned int)size
, (caddr_t
)(uintptr_t)start
)) < 0) {
550 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
554 kdc
->foffset
+= size
;
564 struct kern_dump_preflight_context kdc_preflight
;
565 struct kern_dump_send_context kdc_send
;
566 uint32_t segment_count
;
567 size_t command_size
= 0, header_size
= 0, tstate_size
= 0;
568 uint64_t hoffset
= 0, foffset
= 0;
569 kernel_mach_header_t mh
;
572 kdc_preflight
.region_count
= 0;
573 kdc_preflight
.dumpable_bytes
= 0;
575 ret
= pmap_traverse_present_mappings(kernel_pmap
,
576 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
577 VM_MAX_KERNEL_ADDRESS
,
578 kern_dump_pmap_traverse_preflight_callback
,
581 printf("pmap traversal failed: %d\n", ret
);
585 printf("Kernel dump region count: %u\n", kdc_preflight
.region_count
);
586 printf("Kernel dump byte count: %llu\n", kdc_preflight
.dumpable_bytes
);
588 segment_count
= kdc_preflight
.region_count
;
590 tstate_size
= sizeof(struct thread_command
) + kern_collectth_state_size();
592 command_size
= segment_count
* sizeof(kernel_segment_command_t
) +
595 header_size
= command_size
+ sizeof(kernel_mach_header_t
);
598 * Set up Mach-O header for currently executing kernel.
600 printf ("Generated Mach-O header size was %lu\n", header_size
);
602 mh
.magic
= _mh_execute_header
.magic
;
603 mh
.cputype
= _mh_execute_header
.cputype
;;
604 mh
.cpusubtype
= _mh_execute_header
.cpusubtype
;
605 mh
.filetype
= MH_CORE
;
606 mh
.ncmds
= segment_count
+ 1 /* thread */;
607 mh
.sizeofcmds
= (uint32_t)command_size
;
609 #if defined(__LP64__)
613 hoffset
= 0; /* offset into header */
614 foffset
= (uint32_t)round_page(header_size
); /* offset into file */
616 /* Transmit the Mach-O MH_CORE header, and seek forward past the
617 * area reserved for the segment and thread commands
618 * to begin data transmission
620 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
) , &hoffset
)) < 0) {
621 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
624 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, sizeof(kernel_mach_header_t
), (caddr_t
) &mh
) < 0)) {
625 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
629 hoffset
+= sizeof(kernel_mach_header_t
);
631 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(foffset
) , &foffset
) < 0)) {
632 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
636 printf ("Transmitting kernel state, please wait: ");
638 kdc_send
.hoffset
= hoffset
;
639 kdc_send
.foffset
= foffset
;
640 kdc_send
.header_size
= header_size
;
642 ret
= pmap_traverse_present_mappings(kernel_pmap
,
643 VM_MIN_KERNEL_AND_KEXT_ADDRESS
,
644 VM_MAX_KERNEL_ADDRESS
,
645 kern_dump_pmap_traverse_send_callback
,
648 kprintf("pmap traversal failed: %d\n", ret
);
652 /* Reload mutated offsets */
653 hoffset
= kdc_send
.hoffset
;
654 foffset
= kdc_send
.foffset
;
657 * Now send out the LC_THREAD load command, with the thread information
658 * for the current activation.
660 if (tstate_size
> 0) {
661 char tstate
[tstate_size
];
663 kern_collectth_state (current_thread(), tstate
, tstate_size
);
665 if ((ret
= kdp_send_crashdump_pkt (KDP_SEEK
, NULL
, sizeof(hoffset
), &hoffset
)) < 0) {
666 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
670 if ((ret
= kdp_send_crashdump_data (KDP_DATA
, NULL
, tstate_size
, tstate
)) < 0) {
671 printf ("kdp_send_crashdump_data failed with error %d\n", ret
);
675 hoffset
+= tstate_size
;
679 if ((ret
= kdp_send_crashdump_pkt (KDP_EOF
, NULL
, 0, ((void *) 0))) < 0)
681 printf ("kdp_send_crashdump_pkt failed with error %d\n", ret
);
690 pt_entry_t
*debugger_ptep
;
691 vm_map_offset_t debugger_window_kva
;
693 /* Establish a pagetable window that can be remapped on demand.
694 * This is utilized by the debugger to address regions outside
699 kdp_machine_init(void) {
700 if (debug_boot_arg
== 0)
704 kern_return_t kr
= vm_map_find_space(kernel_map
,
705 &debugger_window_kva
,
707 VM_MAKE_TAG(VM_MEMORY_IOKIT
), &e
);
709 if (kr
!= KERN_SUCCESS
) {
710 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__
, kr
);
713 vm_map_unlock(kernel_map
);
715 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);
717 if (debugger_ptep
== NULL
) {
718 pmap_expand(kernel_pmap
, debugger_window_kva
, PMAP_EXPAND_OPTIONS_NONE
);
719 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);