]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_x86_common.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_x86_common.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/vm_attributes.h>
31 #include <mach/vm_param.h>
32 #include <libsa/types.h>
33
34 #include <vm/vm_map.h>
35 #include <i386/pmap.h>
36 #include <i386/mp.h>
37 #include <i386/misc_protos.h>
38 #include <i386/pio.h>
39 #include <i386/proc_reg.h>
40
41 #include <i386/pmap_internal.h>
42
43 #include <kdp/kdp_internal.h>
44 #include <mach/vm_map.h>
45
46 #include <vm/vm_protos.h>
47 #include <vm/vm_kern.h>
48
49 #include <machine/pal_routines.h>
50
51 // #define KDP_VM_READ_DEBUG 1
52 // #define KDP_VM_WRITE_DEBUG 1
53
54 boolean_t kdp_read_io;
55 boolean_t kdp_trans_off;
56
57 static addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
58
59 pmap_t kdp_pmap = 0;
60
61 static addr64_t
62 kdp_vtophys(
63 pmap_t pmap,
64 addr64_t va)
65 {
66 addr64_t pa;
67 ppnum_t pp;
68
69 pp = pmap_find_phys(pmap, va);
70 if(!pp) return 0;
71
72 pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL);
73
74 return(pa);
75 }
76
77 mach_vm_size_t
78 kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
79 {
80 addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src);
81 addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst);
82 addr64_t cur_phys_dst, cur_phys_src;
83 mach_vm_size_t resid = len;
84 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
85 pmap_t src_pmap = kernel_pmap;
86
87 #ifdef KDP_VM_READ_DEBUG
88 printf("kdp_vm_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
89 #endif
90
91 if (kdp_trans_off) {
92 kdp_readphysmem64_req_t rq;
93 mach_vm_size_t ret;
94
95 rq.address = src;
96 rq.nbytes = (uint32_t)len;
97 ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU);
98 return ret;
99 }
100
101 /* If a different pmap has been specified with kdp_pmap, use it to translate the
102 * source (cur_virt_src); otherwise, the source is translated using the
103 * kernel_pmap.
104 */
105 if (kdp_pmap)
106 src_pmap = kdp_pmap;
107
108 while (resid != 0) {
109 if (!(cur_phys_src = kdp_vtophys(src_pmap,
110 cur_virt_src)))
111 goto exit;
112
113 /* Always translate the destination buffer using the kernel_pmap */
114 if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
115 goto exit;
116
117 /* Validate physical page numbers unless kdp_read_io is set */
118 if (kdp_read_io == FALSE)
119 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
120 goto exit;
121
122 /* Get length left on page */
123 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
124 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
125 if (cnt_src > cnt_dst)
126 cnt = cnt_dst;
127 else
128 cnt = cnt_src;
129 if (cnt > resid)
130 cnt = resid;
131
132 /* Do a physical copy */
133 ml_copy_phys(cur_phys_src, cur_phys_dst, (vm_size_t)cnt);
134
135 cur_virt_src += cnt;
136 cur_virt_dst += cnt;
137 resid -= cnt;
138 }
139 exit:
140 return (len - resid);
141 }
142
143 mach_vm_size_t
144 kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst,
145 uint16_t lcpu)
146 {
147 mach_vm_address_t src = rq->address;
148 mach_vm_size_t len = rq->nbytes;
149
150 addr64_t cur_virt_dst;
151 addr64_t cur_phys_dst, cur_phys_src;
152 mach_vm_size_t resid = len;
153 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
154
155 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
156 return (mach_vm_size_t)
157 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst);
158 }
159
160 #ifdef KDP_VM_READ_DEBUG
161 printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
162 #endif
163
164 cur_virt_dst = (addr64_t)(intptr_t)dst;
165 cur_phys_src = (addr64_t)src;
166
167 while (resid != 0) {
168
169 if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
170 goto exit;
171
172 /* Get length left on page */
173 cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
174 cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
175 if (cnt_src > cnt_dst)
176 cnt = cnt_dst;
177 else
178 cnt = cnt_src;
179 if (cnt > resid)
180 cnt = resid;
181
182 /* Do a physical copy; use ml_copy_phys() in the event this is
183 * a short read with potential side effects.
184 */
185 ml_copy_phys(cur_phys_src, cur_phys_dst, (vm_size_t)cnt);
186 cur_phys_src += cnt;
187 cur_virt_dst += cnt;
188 resid -= cnt;
189 }
190 exit:
191 return (len - resid);
192 }
193
194 /*
195 *
196 */
197 mach_vm_size_t
198 kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
199 {
200 addr64_t cur_virt_src, cur_virt_dst;
201 addr64_t cur_phys_src, cur_phys_dst;
202 unsigned resid, cnt, cnt_src, cnt_dst;
203
204 #ifdef KDP_VM_WRITE_DEBUG
205 printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
206 #endif
207
208 cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src);
209 cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst);
210
211 resid = (unsigned)len;
212
213 while (resid != 0) {
214 if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
215 goto exit;
216
217 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
218 goto exit;
219
220 /* Copy as many bytes as possible without crossing a page */
221 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
222 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
223
224 if (cnt_src > cnt_dst)
225 cnt = cnt_dst;
226 else
227 cnt = cnt_src;
228 if (cnt > resid)
229 cnt = resid;
230
231 ml_copy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
232
233 cur_virt_src +=cnt;
234 cur_virt_dst +=cnt;
235 resid -= cnt;
236 }
237 exit:
238 return (len - resid);
239 }
240
241 /*
242 *
243 */
244 mach_vm_size_t
245 kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src,
246 uint16_t lcpu)
247 {
248 mach_vm_address_t dst = rq->address;
249 mach_vm_size_t len = rq->nbytes;
250 addr64_t cur_virt_src;
251 addr64_t cur_phys_src, cur_phys_dst;
252 unsigned resid, cnt, cnt_src, cnt_dst;
253
254 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
255 return (mach_vm_size_t)
256 kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src);
257 }
258
259 #ifdef KDP_VM_WRITE_DEBUG
260 printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
261 #endif
262
263 cur_virt_src = (addr64_t)(intptr_t)src;
264 cur_phys_dst = (addr64_t)dst;
265
266 resid = (unsigned)len;
267
268 while (resid != 0) {
269 if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
270 goto exit;
271
272 /* Copy as many bytes as possible without crossing a page */
273 cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
274 cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
275
276 if (cnt_src > cnt_dst)
277 cnt = cnt_dst;
278 else
279 cnt = cnt_src;
280 if (cnt > resid)
281 cnt = resid;
282
283 ml_copy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
284
285 cur_virt_src +=cnt;
286 cur_phys_dst +=cnt;
287 resid -= cnt;
288 }
289
290 exit:
291 return (len - resid);
292 }
293
294 int
295 kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu)
296 {
297 uint16_t addr = rq->address;
298 uint16_t size = rq->nbytes;
299
300 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
301 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data);
302 }
303
304 switch (size)
305 {
306 case 1:
307 *((uint8_t *) data) = inb(addr);
308 break;
309 case 2:
310 *((uint16_t *) data) = inw(addr);
311 break;
312 case 4:
313 *((uint32_t *) data) = inl(addr);
314 break;
315 default:
316 return KDPERR_BADFLAVOR;
317 break;
318 }
319
320 return KDPERR_NO_ERROR;
321 }
322
323 int
324 kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu)
325 {
326 uint16_t addr = rq->address;
327 uint16_t size = rq->nbytes;
328
329 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
330 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data);
331 }
332
333 switch (size)
334 {
335 case 1:
336 outb(addr, *((uint8_t *) data));
337 break;
338 case 2:
339 outw(addr, *((uint16_t *) data));
340 break;
341 case 4:
342 outl(addr, *((uint32_t *) data));
343 break;
344 default:
345 return KDPERR_BADFLAVOR;
346 break;
347 }
348
349 return KDPERR_NO_ERROR;
350 }
351
352 int
353 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
354 {
355 uint64_t *value = (uint64_t *) data;
356 uint32_t msr = rq->address;
357
358 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
359 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data);
360 }
361
362 *value = rdmsr64(msr);
363 return KDPERR_NO_ERROR;
364 }
365
366 int
367 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
368 {
369 uint64_t *value = (uint64_t *) data;
370 uint32_t msr = rq->address;
371
372 if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
373 return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data);
374 }
375
376 wrmsr64(msr, *value);
377 return KDPERR_NO_ERROR;
378 }
379
380 pt_entry_t *debugger_ptep;
381 vm_map_offset_t debugger_window_kva;
382
383 /* Establish a pagetable window that can be remapped on demand.
384 * This is utilized by the debugger to address regions outside
385 * the physical map.
386 */
387
388 void
389 kdp_machine_init(void) {
390 if (debug_boot_arg == 0)
391 return;
392
393 vm_map_entry_t e;
394 kern_return_t kr = vm_map_find_space(kernel_map,
395 &debugger_window_kva,
396 PAGE_SIZE, 0,
397 VM_MAKE_TAG(VM_MEMORY_IOKIT), &e);
398
399 if (kr != KERN_SUCCESS) {
400 panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__, kr);
401 }
402
403 vm_map_unlock(kernel_map);
404
405 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
406
407 if (debugger_ptep == NULL) {
408 pmap_expand(kernel_pmap, debugger_window_kva);
409 debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
410 }
411 }