]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/phys.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / i386 / phys.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
60
61 #include <sys/kdebug.h>
62
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
66
67 #include <kern/counters.h>
68 #include <kern/mach_param.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/sched_prim.h>
72 #include <kern/misc_protos.h>
73 #include <kern/assert.h>
74 #include <kern/spl.h>
75 #include <ipc/ipc_port.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_map.h>
78 #include <vm/pmap.h>
79
80 #include <i386/cpu_data.h>
81 #include <i386/cpu_number.h>
82 #include <i386/thread.h>
83 #include <i386/eflags.h>
84 #include <i386/proc_reg.h>
85 #include <i386/seg.h>
86 #include <i386/tss.h>
87 #include <i386/user_ldt.h>
88 #include <i386/fpu.h>
89 #include <i386/misc_protos.h>
90
91 /*
92 * pmap_zero_page zeros the specified (machine independent) page.
93 */
94 void
95 pmap_zero_page(
96 ppnum_t pn)
97 {
98 assert(pn != vm_page_fictitious_addr);
99 assert(pn != vm_page_guard_addr);
100 bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
101 }
102
103 /*
104 * pmap_zero_part_page
105 * zeros the specified (machine independent) part of a page.
106 */
107 void
108 pmap_zero_part_page(
109 ppnum_t pn,
110 vm_offset_t offset,
111 vm_size_t len)
112 {
113 assert(pn != vm_page_fictitious_addr);
114 assert(pn != vm_page_guard_addr);
115 assert(offset + len <= PAGE_SIZE);
116 bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len);
117 }
118
119 /*
120 * pmap_copy_page copies the specified (machine independent) pages.
121 */
122 void
123 pmap_copy_part_page(
124 ppnum_t psrc,
125 vm_offset_t src_offset,
126 ppnum_t pdst,
127 vm_offset_t dst_offset,
128 vm_size_t len)
129 {
130 pmap_paddr_t src, dst;
131
132 assert(psrc != vm_page_fictitious_addr);
133 assert(pdst != vm_page_fictitious_addr);
134 assert(psrc != vm_page_guard_addr);
135 assert(pdst != vm_page_guard_addr);
136
137 src = i386_ptob(psrc);
138 dst = i386_ptob(pdst);
139
140 assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
141 assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
142
143 bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
144 (addr64_t)dst + (dst_offset & INTEL_OFFMASK),
145 len);
146 }
147
148 /*
149 * pmap_copy_part_lpage copies part of a virtually addressed page
150 * to a physically addressed page.
151 */
152 void
153 pmap_copy_part_lpage(
154 __unused vm_offset_t src,
155 __unused ppnum_t pdst,
156 __unused vm_offset_t dst_offset,
157 __unused vm_size_t len)
158 {
159
160 assert(pdst != vm_page_fictitious_addr);
161 assert(pdst != vm_page_guard_addr);
162 assert((dst_offset + len) <= PAGE_SIZE);
163
164 }
165
166 /*
167 * pmap_copy_part_rpage copies part of a physically addressed page
168 * to a virtually addressed page.
169 */
170 void
171 pmap_copy_part_rpage(
172 __unused ppnum_t psrc,
173 __unused vm_offset_t src_offset,
174 __unused vm_offset_t dst,
175 __unused vm_size_t len)
176 {
177
178 assert(psrc != vm_page_fictitious_addr);
179 assert(psrc != vm_page_guard_addr);
180 assert((src_offset + len) <= PAGE_SIZE);
181
182 }
183
184 /*
185 * kvtophys(addr)
186 *
187 * Convert a kernel virtual address to a physical address
188 */
189 addr64_t
190 kvtophys(
191 vm_offset_t addr)
192 {
193 pmap_paddr_t pa;
194
195 pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
196 if (pa)
197 pa |= (addr & INTEL_OFFMASK);
198
199 return ((addr64_t)pa);
200 }
201
202 extern pt_entry_t *debugger_ptep;
203 extern vm_map_offset_t debugger_window_kva;
204 extern int _bcopy(const void *, void *, vm_size_t);
205 extern int _bcopy2(const void *, void *);
206 extern int _bcopy4(const void *, void *);
207 extern int _bcopy8(const void *, void *);
208
209 __private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
210 void *src, *dst;
211 int err = 0;
212
213 mp_disable_preemption();
214 #if NCOPY_WINDOWS > 0
215 mapwindow_t *src_map, *dst_map;
216 /* We rely on MTRRs here */
217 src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
218 dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
219 src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
220 dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
221 #elif defined(__x86_64__)
222 addr64_t debug_pa = 0;
223
224 /* If either destination or source are outside the
225 * physical map, establish a physical window onto the target frame.
226 */
227 assert(physmap_enclosed(src64) || physmap_enclosed(dst64));
228
229 if (physmap_enclosed(src64) == FALSE) {
230 src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
231 dst = PHYSMAP_PTOV(dst64);
232 debug_pa = src64 & PG_FRAME;
233 } else if (physmap_enclosed(dst64) == FALSE) {
234 src = PHYSMAP_PTOV(src64);
235 dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
236 debug_pa = dst64 & PG_FRAME;
237 } else {
238 src = PHYSMAP_PTOV(src64);
239 dst = PHYSMAP_PTOV(dst64);
240 }
241 /* DRK: debugger only routine, we don't bother checking for an
242 * identical mapping.
243 */
244 if (debug_pa) {
245 if (debugger_window_kva == 0)
246 panic("%s: invoked in non-debug mode", __FUNCTION__);
247 /* Establish a cache-inhibited physical window; some platforms
248 * may not cover arbitrary ranges with MTRRs
249 */
250 pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID);
251 flush_tlb_raw();
252 #if DEBUG
253 kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
254 #endif
255 }
256 #endif
257 /* ensure we stay within a page */
258 if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
259 panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
260 }
261
262 /*
263 * For device register access from the debugger,
264 * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
265 * by assembly routines ensuring the required access widths.
266 * 1-byte and other copies are handled by the regular _bcopy.
267 */
268 switch (bytes) {
269 case 2:
270 err = _bcopy2(src, dst);
271 break;
272 case 4:
273 err = _bcopy4(src, dst);
274 break;
275 case 8:
276 err = _bcopy8(src, dst);
277 break;
278 case 1:
279 default:
280 err = _bcopy(src, dst, bytes);
281 break;
282 }
283
284 #if NCOPY_WINDOWS > 0
285 pmap_put_mapwindow(src_map);
286 pmap_put_mapwindow(dst_map);
287 #endif
288 mp_enable_preemption();
289
290 return err;
291 }