2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <libsa/types.h>
36 #include <vm/vm_map.h>
37 #include <i386/pmap.h>
38 #include <i386/pmap_internal.h> /* pmap_pde */
40 #include <i386/misc_protos.h>
42 #include <i386/proc_reg.h>
44 #include <i386/pmap_internal.h>
46 #include <kdp/kdp_internal.h>
47 #include <kdp/kdp_core.h>
48 #include <kdp/ml/i386/kdp_x86_common.h>
49 #include <mach/vm_map.h>
51 #include <vm/vm_protos.h>
52 #include <vm/vm_kern.h>
54 #include <machine/pal_routines.h>
55 #include <libkern/kernel_mach_header.h>
57 // #define KDP_VM_READ_DEBUG 1
58 // #define KDP_VM_WRITE_DEBUG 1
61 * A (potentially valid) physical address is not a kernel address
62 * i.e. it'a a user address.
64 #define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr)
66 boolean_t kdp_read_io
;
67 boolean_t kdp_trans_off
;
69 pmap_paddr_t
kdp_vtophys(pmap_t pmap
, vm_offset_t va
);
73 kdp_jtag_coredump_t kdp_jtag_coredump
;
82 pa
= pmap_find_pa(pmap
, va
);
88 kdp_machine_vm_read( mach_vm_address_t src
, caddr_t dst
, mach_vm_size_t len
)
90 addr64_t cur_virt_src
= PAL_KDP_ADDR((addr64_t
)src
);
91 addr64_t cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)(intptr_t)dst
);
92 addr64_t cur_phys_dst
, cur_phys_src
;
93 mach_vm_size_t resid
= len
;
94 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
95 pmap_t src_pmap
= kernel_pmap
;
97 #ifdef KDP_VM_READ_DEBUG
98 printf("kdp_vm_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
101 if (kdp_trans_off
&& IS_PHYS_ADDR(src
)) {
102 kdp_readphysmem64_req_t rq
;
106 rq
.nbytes
= (uint32_t)len
;
107 ret
= kdp_machine_phys_read(&rq
, dst
, KDP_CURRENT_LCPU
);
111 /* If a different pmap has been specified with kdp_pmap, use it to translate the
112 * source (cur_virt_src); otherwise, the source is translated using the
120 if (!(cur_phys_src
= kdp_vtophys(src_pmap
,
125 /* Always translate the destination buffer using the kernel_pmap */
126 if (!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
))) {
130 /* Validate physical page numbers unless kdp_read_io is set */
131 if (kdp_read_io
== FALSE
) {
132 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
))) {
137 /* Get length left on page */
138 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
139 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
140 if (cnt_src
> cnt_dst
) {
149 /* Do a physical copy */
150 if (EFAULT
== ml_copy_phys(cur_phys_src
,
164 kdp_machine_phys_read(kdp_readphysmem64_req_t
*rq
, caddr_t dst
,
167 mach_vm_address_t src
= rq
->address
;
168 mach_vm_size_t len
= rq
->nbytes
;
170 addr64_t cur_virt_dst
;
171 addr64_t cur_phys_dst
, cur_phys_src
;
172 mach_vm_size_t resid
= len
;
173 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
175 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
176 return (mach_vm_size_t
)
177 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_read
, rq
, dst
);
180 #ifdef KDP_VM_READ_DEBUG
181 printf("kdp_phys_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
184 cur_virt_dst
= (addr64_t
)(intptr_t)dst
;
185 cur_phys_src
= (addr64_t
)src
;
188 if (!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
))) {
192 /* Get length left on page */
193 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
194 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
195 if (cnt_src
> cnt_dst
) {
204 /* Do a physical copy; use ml_copy_phys() in the event this is
205 * a short read with potential side effects.
207 if (EFAULT
== ml_copy_phys(cur_phys_src
,
224 kdp_machine_vm_write( caddr_t src
, mach_vm_address_t dst
, mach_vm_size_t len
)
226 addr64_t cur_virt_src
, cur_virt_dst
;
227 addr64_t cur_phys_src
, cur_phys_dst
;
228 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
230 #ifdef KDP_VM_WRITE_DEBUG
231 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
234 cur_virt_src
= PAL_KDP_ADDR((addr64_t
)(intptr_t)src
);
235 cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)dst
);
237 resid
= (unsigned)len
;
240 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0) {
244 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0) {
248 /* Copy as many bytes as possible without crossing a page */
249 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
250 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
252 if (cnt_src
> cnt_dst
) {
261 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
)) {
262 goto exit
; /* Copy stuff over */
276 kdp_machine_phys_write(kdp_writephysmem64_req_t
*rq
, caddr_t src
,
279 mach_vm_address_t dst
= rq
->address
;
280 mach_vm_size_t len
= rq
->nbytes
;
281 addr64_t cur_virt_src
;
282 addr64_t cur_phys_src
, cur_phys_dst
;
283 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
285 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
286 return (mach_vm_size_t
)
287 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_write
, rq
, src
);
290 #ifdef KDP_VM_WRITE_DEBUG
291 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
294 cur_virt_src
= (addr64_t
)(intptr_t)src
;
295 cur_phys_dst
= (addr64_t
)dst
;
297 resid
= (unsigned)len
;
300 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0) {
304 /* Copy as many bytes as possible without crossing a page */
305 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
306 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
308 if (cnt_src
> cnt_dst
) {
317 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
)) {
318 goto exit
; /* Copy stuff over */
330 kdp_machine_ioport_read(kdp_readioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
332 uint16_t addr
= rq
->address
;
333 uint16_t size
= rq
->nbytes
;
335 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
336 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_read
, rq
, data
);
341 *((uint8_t *) data
) = inb(addr
);
344 *((uint16_t *) data
) = inw(addr
);
347 *((uint32_t *) data
) = inl(addr
);
350 return KDPERR_BADFLAVOR
;
353 return KDPERR_NO_ERROR
;
357 kdp_machine_ioport_write(kdp_writeioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
359 uint16_t addr
= rq
->address
;
360 uint16_t size
= rq
->nbytes
;
362 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
363 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_write
, rq
, data
);
368 outb(addr
, *((uint8_t *) data
));
371 outw(addr
, *((uint16_t *) data
));
374 outl(addr
, *((uint32_t *) data
));
377 return KDPERR_BADFLAVOR
;
380 return KDPERR_NO_ERROR
;
384 kdp_machine_msr64_read(kdp_readmsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
386 uint64_t *value
= (uint64_t *) data
;
387 uint32_t msr
= rq
->address
;
389 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
390 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_read
, rq
, data
);
393 *value
= rdmsr64(msr
);
394 return KDPERR_NO_ERROR
;
398 kdp_machine_msr64_write(kdp_writemsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
400 uint64_t *value
= (uint64_t *) data
;
401 uint32_t msr
= rq
->address
;
403 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
404 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_write
, rq
, data
);
407 wrmsr64(msr
, *value
);
408 return KDPERR_NO_ERROR
;
411 pt_entry_t
*debugger_ptep
;
412 vm_map_offset_t debugger_window_kva
;
414 /* Establish a pagetable window that can be remapped on demand.
415 * This is utilized by the debugger to address regions outside
420 kdp_map_debug_pagetable_window(void)
425 kr
= vm_map_find_space(kernel_map
,
426 &debugger_window_kva
,
429 VM_MAP_KERNEL_FLAGS_NONE
,
430 VM_KERN_MEMORY_OSFMK
,
433 if (kr
!= KERN_SUCCESS
) {
434 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__
, kr
);
437 vm_map_unlock(kernel_map
);
439 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);
441 if (debugger_ptep
== NULL
) {
442 pmap_expand(kernel_pmap
, debugger_window_kva
, PMAP_EXPAND_OPTIONS_NONE
);
443 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);
447 /* initialize kdp_jtag_coredump with data needed for JTAG coredump extraction */
450 kdp_jtag_coredump_init(void)
452 kdp_jtag_coredump
.version
= (uint64_t) KDP_JTAG_COREDUMP_VERSION_1
;
453 kdp_jtag_coredump
.kernel_map_start
= (uint64_t) kernel_map
->min_offset
;
454 kdp_jtag_coredump
.kernel_map_end
= (uint64_t) kernel_map
->max_offset
;
455 kdp_jtag_coredump
.kernel_pmap_pml4
= (uint64_t) kernel_pmap
->pm_pml4
;
456 kdp_jtag_coredump
.pmap_memory_regions
= (uint64_t) &pmap_memory_regions
;
457 kdp_jtag_coredump
.pmap_memory_region_count
= (uint64_t) pmap_memory_region_count
;
458 kdp_jtag_coredump
.pmap_memory_region_t_size
= (uint64_t) sizeof(pmap_memory_region_t
);
459 kdp_jtag_coredump
.physmap_base
= (uint64_t) &physmap_base
;
461 /* update signature last so that JTAG can trust that structure has valid data */
462 kdp_jtag_coredump
.signature
= (uint64_t) KDP_JTAG_COREDUMP_SIGNATURE
;
466 kdp_machine_init(void)
468 if (debug_boot_arg
== 0) {
472 kdp_map_debug_pagetable_window();
473 kdp_jtag_coredump_init();