2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <libsa/types.h>
36 #include <vm/vm_map.h>
37 #include <i386/pmap.h>
38 #include <i386/pmap_internal.h> /* pmap_pde */
40 #include <i386/misc_protos.h>
42 #include <i386/proc_reg.h>
44 #include <i386/pmap_internal.h>
46 #include <kdp/kdp_internal.h>
47 #include <kdp/kdp_core.h>
48 #include <kdp/ml/i386/kdp_x86_common.h>
49 #include <mach/vm_map.h>
51 #include <vm/vm_protos.h>
52 #include <vm/vm_kern.h>
54 #include <machine/pal_routines.h>
55 #include <libkern/kernel_mach_header.h>
57 // #define KDP_VM_READ_DEBUG 1
58 // #define KDP_VM_WRITE_DEBUG 1
61 * A (potentially valid) physical address is not a kernel address
62 * i.e. it'a a user address.
64 #define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr)
66 boolean_t kdp_read_io
;
67 boolean_t kdp_trans_off
;
69 addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
81 pp
= pmap_find_phys(pmap
, va
);
84 pa
= ((addr64_t
)pp
<< PAGE_SHIFT
) | (va
& PAGE_MASK
);
90 kdp_machine_vm_read( mach_vm_address_t src
, caddr_t dst
, mach_vm_size_t len
)
92 addr64_t cur_virt_src
= PAL_KDP_ADDR((addr64_t
)src
);
93 addr64_t cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)(intptr_t)dst
);
94 addr64_t cur_phys_dst
, cur_phys_src
;
95 mach_vm_size_t resid
= len
;
96 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
97 pmap_t src_pmap
= kernel_pmap
;
99 #ifdef KDP_VM_READ_DEBUG
100 printf("kdp_vm_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
103 if (kdp_trans_off
&& IS_PHYS_ADDR(src
)) {
104 kdp_readphysmem64_req_t rq
;
108 rq
.nbytes
= (uint32_t)len
;
109 ret
= kdp_machine_phys_read(&rq
, dst
, KDP_CURRENT_LCPU
);
113 /* If a different pmap has been specified with kdp_pmap, use it to translate the
114 * source (cur_virt_src); otherwise, the source is translated using the
121 if (!(cur_phys_src
= kdp_vtophys(src_pmap
,
125 /* Always translate the destination buffer using the kernel_pmap */
126 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
129 /* Validate physical page numbers unless kdp_read_io is set */
130 if (kdp_read_io
== FALSE
)
131 if (!pmap_valid_page(i386_btop(cur_phys_dst
)) || !pmap_valid_page(i386_btop(cur_phys_src
)))
134 /* Get length left on page */
135 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
136 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
137 if (cnt_src
> cnt_dst
)
144 /* Do a physical copy */
145 if (EFAULT
== ml_copy_phys(cur_phys_src
,
154 return (len
- resid
);
158 kdp_machine_phys_read(kdp_readphysmem64_req_t
*rq
, caddr_t dst
,
161 mach_vm_address_t src
= rq
->address
;
162 mach_vm_size_t len
= rq
->nbytes
;
164 addr64_t cur_virt_dst
;
165 addr64_t cur_phys_dst
, cur_phys_src
;
166 mach_vm_size_t resid
= len
;
167 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
169 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
170 return (mach_vm_size_t
)
171 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_read
, rq
, dst
);
174 #ifdef KDP_VM_READ_DEBUG
175 printf("kdp_phys_read: src %llx dst %p len %llx\n", src
, (void *)dst
, len
);
178 cur_virt_dst
= (addr64_t
)(intptr_t)dst
;
179 cur_phys_src
= (addr64_t
)src
;
183 if(!(cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)))
186 /* Get length left on page */
187 cnt_src
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
188 cnt_dst
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
189 if (cnt_src
> cnt_dst
)
196 /* Do a physical copy; use ml_copy_phys() in the event this is
197 * a short read with potential side effects.
199 if (EFAULT
== ml_copy_phys(cur_phys_src
,
208 return (len
- resid
);
215 kdp_machine_vm_write( caddr_t src
, mach_vm_address_t dst
, mach_vm_size_t len
)
217 addr64_t cur_virt_src
, cur_virt_dst
;
218 addr64_t cur_phys_src
, cur_phys_dst
;
219 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
221 #ifdef KDP_VM_WRITE_DEBUG
222 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
225 cur_virt_src
= PAL_KDP_ADDR((addr64_t
)(intptr_t)src
);
226 cur_virt_dst
= PAL_KDP_ADDR((addr64_t
)dst
);
228 resid
= (unsigned)len
;
231 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0)
234 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
237 /* Copy as many bytes as possible without crossing a page */
238 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
239 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
241 if (cnt_src
> cnt_dst
)
248 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
))
249 goto exit
; /* Copy stuff over */
256 return (len
- resid
);
263 kdp_machine_phys_write(kdp_writephysmem64_req_t
*rq
, caddr_t src
,
266 mach_vm_address_t dst
= rq
->address
;
267 mach_vm_size_t len
= rq
->nbytes
;
268 addr64_t cur_virt_src
;
269 addr64_t cur_phys_src
, cur_phys_dst
;
270 unsigned resid
, cnt
, cnt_src
, cnt_dst
;
272 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
273 return (mach_vm_size_t
)
274 kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_phys_write
, rq
, src
);
277 #ifdef KDP_VM_WRITE_DEBUG
278 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src
, dst
, len
, ((unsigned int *)src
)[0], ((unsigned int *)src
)[1]);
281 cur_virt_src
= (addr64_t
)(intptr_t)src
;
282 cur_phys_dst
= (addr64_t
)dst
;
284 resid
= (unsigned)len
;
287 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0)
290 /* Copy as many bytes as possible without crossing a page */
291 cnt_src
= (unsigned)(PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
));
292 cnt_dst
= (unsigned)(PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
));
294 if (cnt_src
> cnt_dst
)
301 if (EFAULT
== ml_copy_phys(cur_phys_src
, cur_phys_dst
, cnt
))
302 goto exit
; /* Copy stuff over */
310 return (len
- resid
);
314 kdp_machine_ioport_read(kdp_readioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
316 uint16_t addr
= rq
->address
;
317 uint16_t size
= rq
->nbytes
;
319 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
320 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_read
, rq
, data
);
326 *((uint8_t *) data
) = inb(addr
);
329 *((uint16_t *) data
) = inw(addr
);
332 *((uint32_t *) data
) = inl(addr
);
335 return KDPERR_BADFLAVOR
;
339 return KDPERR_NO_ERROR
;
343 kdp_machine_ioport_write(kdp_writeioport_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
345 uint16_t addr
= rq
->address
;
346 uint16_t size
= rq
->nbytes
;
348 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
349 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_ioport_write
, rq
, data
);
355 outb(addr
, *((uint8_t *) data
));
358 outw(addr
, *((uint16_t *) data
));
361 outl(addr
, *((uint32_t *) data
));
364 return KDPERR_BADFLAVOR
;
368 return KDPERR_NO_ERROR
;
372 kdp_machine_msr64_read(kdp_readmsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
374 uint64_t *value
= (uint64_t *) data
;
375 uint32_t msr
= rq
->address
;
377 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
378 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_read
, rq
, data
);
381 *value
= rdmsr64(msr
);
382 return KDPERR_NO_ERROR
;
386 kdp_machine_msr64_write(kdp_writemsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
388 uint64_t *value
= (uint64_t *) data
;
389 uint32_t msr
= rq
->address
;
391 if ((lcpu
!= KDP_CURRENT_LCPU
) && (lcpu
!= cpu_number())) {
392 return (int) kdp_x86_xcpu_invoke(lcpu
, (kdp_x86_xcpu_func_t
)kdp_machine_msr64_write
, rq
, data
);
395 wrmsr64(msr
, *value
);
396 return KDPERR_NO_ERROR
;
399 pt_entry_t
*debugger_ptep
;
400 vm_map_offset_t debugger_window_kva
;
402 /* Establish a pagetable window that can be remapped on demand.
403 * This is utilized by the debugger to address regions outside
408 kdp_machine_init(void) {
409 if (debug_boot_arg
== 0)
413 kern_return_t kr
= vm_map_find_space(kernel_map
,
414 &debugger_window_kva
,
416 VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK
), &e
);
418 if (kr
!= KERN_SUCCESS
) {
419 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__
, kr
);
422 vm_map_unlock(kernel_map
);
424 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);
426 if (debugger_ptep
== NULL
) {
427 pmap_expand(kernel_pmap
, debugger_window_kva
, PMAP_EXPAND_OPTIONS_NONE
);
428 debugger_ptep
= pmap_pte(kernel_pmap
, debugger_window_kva
);