2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
60 #include <sys/kdebug.h>
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
66 #include <kern/counters.h>
67 #include <kern/mach_param.h>
68 #include <kern/task.h>
69 #include <kern/thread.h>
70 #include <kern/sched_prim.h>
71 #include <kern/misc_protos.h>
72 #include <kern/assert.h>
74 #include <ipc/ipc_port.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_map.h>
79 #include <i386/cpu_data.h>
80 #include <i386/cpu_number.h>
81 #include <i386/thread.h>
82 #include <i386/eflags.h>
83 #include <i386/proc_reg.h>
86 #include <i386/user_ldt.h>
88 #include <i386/misc_protos.h>
91 * pmap_zero_page zeros the specified (machine independent) page.
97 assert(pn
!= vm_page_fictitious_addr
);
98 assert(pn
!= vm_page_guard_addr
);
99 bzero_phys((addr64_t
)i386_ptob(pn
), PAGE_SIZE
);
103 * pmap_zero_part_page
104 * zeros the specified (machine independent) part of a page.
112 assert(pn
!= vm_page_fictitious_addr
);
113 assert(pn
!= vm_page_guard_addr
);
114 assert(offset
+ len
<= PAGE_SIZE
);
115 bzero_phys((addr64_t
)(i386_ptob(pn
) + offset
), (uint32_t)len
);
119 * pmap_copy_page copies the specified (machine independent) pages.
124 vm_offset_t src_offset
,
126 vm_offset_t dst_offset
,
129 pmap_paddr_t src
, dst
;
131 assert(psrc
!= vm_page_fictitious_addr
);
132 assert(pdst
!= vm_page_fictitious_addr
);
133 assert(psrc
!= vm_page_guard_addr
);
134 assert(pdst
!= vm_page_guard_addr
);
136 src
= i386_ptob(psrc
);
137 dst
= i386_ptob(pdst
);
139 assert((((uintptr_t)dst
& PAGE_MASK
) + dst_offset
+ len
) <= PAGE_SIZE
);
140 assert((((uintptr_t)src
& PAGE_MASK
) + src_offset
+ len
) <= PAGE_SIZE
);
142 bcopy_phys((addr64_t
)src
+ (src_offset
& INTEL_OFFMASK
),
143 (addr64_t
)dst
+ (dst_offset
& INTEL_OFFMASK
),
148 * pmap_copy_part_lpage copies part of a virtually addressed page
149 * to a physically addressed page.
152 pmap_copy_part_lpage(
153 __unused vm_offset_t src
,
154 __unused ppnum_t pdst
,
155 __unused vm_offset_t dst_offset
,
156 __unused vm_size_t len
)
159 assert(pdst
!= vm_page_fictitious_addr
);
160 assert(pdst
!= vm_page_guard_addr
);
161 assert((dst_offset
+ len
) <= PAGE_SIZE
);
166 * pmap_copy_part_rpage copies part of a physically addressed page
167 * to a virtually addressed page.
170 pmap_copy_part_rpage(
171 __unused ppnum_t psrc
,
172 __unused vm_offset_t src_offset
,
173 __unused vm_offset_t dst
,
174 __unused vm_size_t len
)
177 assert(psrc
!= vm_page_fictitious_addr
);
178 assert(psrc
!= vm_page_guard_addr
);
179 assert((src_offset
+ len
) <= PAGE_SIZE
);
186 * Convert a kernel virtual address to a physical address
194 pa
= ((pmap_paddr_t
)pmap_find_phys(kernel_pmap
, addr
)) << INTEL_PGSHIFT
;
196 pa
|= (addr
& INTEL_OFFMASK
);
198 return ((addr64_t
)pa
);
201 extern pt_entry_t
*debugger_ptep
;
202 extern vm_map_offset_t debugger_window_kva
;
203 extern int _bcopy(const void *, void *, vm_size_t
);
204 extern int _bcopy2(const void *, void *);
205 extern int _bcopy4(const void *, void *);
206 extern int _bcopy8(const void *, void *);
208 __private_extern__
int ml_copy_phys(addr64_t src64
, addr64_t dst64
, vm_size_t bytes
) {
212 mp_disable_preemption();
213 #if NCOPY_WINDOWS > 0
214 mapwindow_t
*src_map
, *dst_map
;
215 /* We rely on MTRRs here */
216 src_map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| ((pmap_paddr_t
)src64
& PG_FRAME
) | INTEL_PTE_REF
));
217 dst_map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| ((pmap_paddr_t
)dst64
& PG_FRAME
) | INTEL_PTE_REF
| INTEL_PTE_MOD
));
218 src
= (void *) ((uintptr_t)src_map
->prv_CADDR
| ((uint32_t)src64
& INTEL_OFFMASK
));
219 dst
= (void *) ((uintptr_t)dst_map
->prv_CADDR
| ((uint32_t)dst64
& INTEL_OFFMASK
));
220 #elif defined(__x86_64__)
221 addr64_t debug_pa
= 0;
223 /* If either destination or source are outside the
224 * physical map, establish a physical window onto the target frame.
226 assert(physmap_enclosed(src64
) || physmap_enclosed(dst64
));
228 if (physmap_enclosed(src64
) == FALSE
) {
229 src
= (void *)(debugger_window_kva
| (src64
& INTEL_OFFMASK
));
230 dst
= PHYSMAP_PTOV(dst64
);
231 debug_pa
= src64
& PG_FRAME
;
232 } else if (physmap_enclosed(dst64
) == FALSE
) {
233 src
= PHYSMAP_PTOV(src64
);
234 dst
= (void *)(debugger_window_kva
| (dst64
& INTEL_OFFMASK
));
235 debug_pa
= dst64
& PG_FRAME
;
237 src
= PHYSMAP_PTOV(src64
);
238 dst
= PHYSMAP_PTOV(dst64
);
240 /* DRK: debugger only routine, we don't bother checking for an
244 if (debugger_window_kva
== 0)
245 panic("%s: invoked in non-debug mode", __FUNCTION__
);
246 /* Establish a cache-inhibited physical window; some platforms
247 * may not cover arbitrary ranges with MTRRs
249 pmap_store_pte(debugger_ptep
, debug_pa
| INTEL_PTE_NCACHE
| INTEL_PTE_RW
| INTEL_PTE_REF
| INTEL_PTE_MOD
| INTEL_PTE_VALID
);
252 kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva
, debug_pa
);
256 /* ensure we stay within a page */
257 if (((((uint32_t)src64
& (I386_PGBYTES
-1)) + bytes
) > I386_PGBYTES
) || ((((uint32_t)dst64
& (I386_PGBYTES
-1)) + bytes
) > I386_PGBYTES
) ) {
258 panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64
, dst64
);
262 * For device register access from the debugger,
263 * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
264 * by assembly routines ensuring the required access widths.
265 * 1-byte and other copies are handled by the regular _bcopy.
269 err
= _bcopy2(src
, dst
);
272 err
= _bcopy4(src
, dst
);
275 err
= _bcopy8(src
, dst
);
279 err
= _bcopy(src
, dst
, bytes
);
283 #if NCOPY_WINDOWS > 0
284 pmap_put_mapwindow(src_map
);
285 pmap_put_mapwindow(dst_map
);
287 mp_enable_preemption();