]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
0c530ab8 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
6601e61a | 4 | * @APPLE_LICENSE_HEADER_START@ |
8f6c56a5 | 5 | * |
6601e61a A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
8f6c56a5 | 11 | * |
6601e61a A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
6601e61a A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
8f6c56a5 | 19 | * |
6601e61a | 20 | * @APPLE_LICENSE_HEADER_END@ |
1c79356b A |
21 | */ |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | ||
53 | /* | |
54 | * File: pmap.h | |
55 | * | |
56 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
57 | * Date: 1985 | |
58 | * | |
59 | * Machine-dependent structures for the physical map module. | |
60 | */ | |
0c530ab8 | 61 | #ifdef KERNEL_PRIVATE |
1c79356b A |
62 | #ifndef _PMAP_MACHINE_ |
63 | #define _PMAP_MACHINE_ 1 | |
64 | ||
65 | #ifndef ASSEMBLER | |
66 | ||
67 | #include <platforms.h> | |
1c79356b A |
68 | |
69 | #include <mach/kern_return.h> | |
70 | #include <mach/machine/vm_types.h> | |
71 | #include <mach/vm_prot.h> | |
72 | #include <mach/vm_statistics.h> | |
73 | #include <mach/machine/vm_param.h> | |
74 | #include <kern/kern_types.h> | |
91447636 | 75 | #include <kern/thread.h> |
1c79356b | 76 | #include <kern/lock.h> |
0c530ab8 A |
77 | |
78 | #include <i386/mp.h> | |
79 | #include <i386/proc_reg.h> | |
1c79356b A |
80 | |
81 | /* | |
82 | * Define the generic in terms of the specific | |
83 | */ | |
84 | ||
85 | #define INTEL_PGBYTES I386_PGBYTES | |
86 | #define INTEL_PGSHIFT I386_PGSHIFT | |
87 | #define intel_btop(x) i386_btop(x) | |
88 | #define intel_ptob(x) i386_ptob(x) | |
89 | #define intel_round_page(x) i386_round_page(x) | |
90 | #define intel_trunc_page(x) i386_trunc_page(x) | |
91 | #define trunc_intel_to_vm(x) trunc_i386_to_vm(x) | |
92 | #define round_intel_to_vm(x) round_i386_to_vm(x) | |
93 | #define vm_to_intel(x) vm_to_i386(x) | |
94 | ||
95 | /* | |
96 | * i386/i486/i860 Page Table Entry | |
97 | */ | |
98 | ||
1c79356b A |
99 | #endif /* ASSEMBLER */ |
100 | ||
91447636 A |
101 | #define NPGPTD 4 |
102 | #define PDESHIFT 21 | |
103 | #define PTEMASK 0x1ff | |
104 | #define PTEINDX 3 | |
0c530ab8 | 105 | |
91447636 A |
106 | #define PTESHIFT 12 |
107 | ||
108 | #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ | |
109 | #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ | |
110 | ||
111 | #define INTEL_OFFMASK (I386_PGBYTES - 1) | |
0c530ab8 | 112 | #define PG_FRAME 0x000FFFFFFFFFF000ULL |
91447636 | 113 | #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t))) |
0c530ab8 | 114 | #define NPTDPG (PAGE_SIZE/(sizeof (pd_entry_t))) |
1c79356b | 115 | |
91447636 A |
116 | #define NBPTD (NPGPTD << PAGE_SHIFT) |
117 | #define NPDEPTD (NBPTD / (sizeof (pd_entry_t))) | |
118 | #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) | |
119 | #define NBPDE (1 << PDESHIFT) | |
120 | #define PDEMASK (NBPDE - 1) | |
9bccf70c | 121 | |
0c530ab8 A |
122 | /* cleanly define parameters for all the page table levels */ |
123 | typedef uint64_t pml4_entry_t; | |
124 | #define NPML4PG (PAGE_SIZE/(sizeof (pml4_entry_t))) | |
125 | #define PML4SHIFT 39 | |
126 | #define PML4PGSHIFT 9 | |
127 | #define NBPML4 (1ULL << PML4SHIFT) | |
128 | #define PML4MASK (NBPML4-1) | |
129 | #define PML4_ENTRY_NULL ((pml4_entry_t *) 0) | |
130 | ||
131 | typedef uint64_t pdpt_entry_t; | |
132 | #define NPDPTPG (PAGE_SIZE/(sizeof (pdpt_entry_t))) | |
133 | #define PDPTSHIFT 30 | |
134 | #define PDPTPGSHIFT 9 | |
135 | #define NBPDPT (1 << PDPTSHIFT) | |
136 | #define PDPTMASK (NBPDPT-1) | |
137 | #define PDPT_ENTRY_NULL ((pdpt_entry_t *) 0) | |
138 | ||
139 | typedef uint64_t pd_entry_t; | |
140 | #define NPDPG (PAGE_SIZE/(sizeof (pd_entry_t))) | |
141 | #define PDSHIFT 21 | |
142 | #define PDPGSHIFT 9 | |
143 | #define NBPD (1 << PDSHIFT) | |
144 | #define PDMASK (NBPD-1) | |
145 | #define PD_ENTRY_NULL ((pd_entry_t *) 0) | |
146 | ||
147 | typedef uint64_t pt_entry_t; | |
148 | #define NPTPG (PAGE_SIZE/(sizeof (pt_entry_t))) | |
149 | #define PTSHIFT 12 | |
150 | #define PTPGSHIFT 9 | |
151 | #define NBPT (1 << PTSHIFT) | |
152 | #define PTMASK (NBPT-1) | |
153 | #define PT_ENTRY_NULL ((pt_entry_t *) 0) | |
154 | ||
155 | typedef uint64_t pmap_paddr_t; | |
156 | ||
157 | /* | |
158 | * Atomic 64-bit store of a page table entry. | |
159 | */ | |
160 | static inline void | |
161 | pmap_store_pte(pt_entry_t *entryp, pt_entry_t value) | |
162 | { | |
163 | /* | |
164 | * Load the new value into %ecx:%ebx | |
165 | * Load the old value into %edx:%eax | |
166 | * Compare-exchange-8bytes at address entryp (loaded in %edi) | |
167 | * If the compare succeeds, the new value will have been stored. | |
168 | * Otherwise, the old value changed and reloaded, so try again. | |
169 | */ | |
170 | asm volatile( | |
171 | " movl (%0), %%eax \n\t" | |
172 | " movl 4(%0), %%edx \n\t" | |
173 | "1: \n\t" | |
174 | " cmpxchg8b (%0) \n\t" | |
175 | " jnz 1b" | |
176 | : | |
177 | : "D" (entryp), | |
178 | "b" ((uint32_t)value), | |
179 | "c" ((uint32_t)(value >> 32)) | |
180 | : "eax", "edx", "memory"); | |
181 | } | |
182 | ||
183 | /* | |
184 | * Atomic 64-bit compare and exchange of a page table entry. | |
185 | */ | |
186 | static inline boolean_t | |
187 | pmap_cmpx_pte(pt_entry_t *entryp, pt_entry_t old, pt_entry_t new) | |
188 | { | |
189 | boolean_t ret; | |
190 | ||
191 | /* | |
192 | * Load the old value into %edx:%eax | |
193 | * Load the new value into %ecx:%ebx | |
194 | * Compare-exchange-8bytes at address entryp (loaded in %edi) | |
195 | * If the compare succeeds, the new value is stored, return TRUE. | |
196 | * Otherwise, no swap is made, return FALSE. | |
197 | */ | |
198 | asm volatile( | |
199 | " lock; cmpxchg8b (%1) \n\t" | |
200 | " setz %%al \n\t" | |
201 | " movzbl %%al,%0" | |
202 | : "=a" (ret) | |
203 | : "D" (entryp), | |
204 | "a" ((uint32_t)old), | |
205 | "d" ((uint32_t)(old >> 32)), | |
206 | "b" ((uint32_t)new), | |
207 | "c" ((uint32_t)(new >> 32)) | |
208 | : "memory"); | |
209 | return ret; | |
210 | } | |
211 | ||
212 | #define pmap_update_pte(entryp, old, new) \ | |
213 | while (!pmap_cmpx_pte((entryp), (old), (new))) | |
214 | ||
215 | /* in 64 bit spaces, the number of each type of page in the page tables */ | |
216 | #define NPML4PGS (1ULL * (PAGE_SIZE/(sizeof (pml4_entry_t)))) | |
217 | #define NPDPTPGS (NPML4PGS * (PAGE_SIZE/(sizeof (pdpt_entry_t)))) | |
218 | #define NPDEPGS (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t)))) | |
219 | #define NPTEPGS (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t)))) | |
220 | ||
221 | /* | |
222 | * The 64-bit kernel is remapped in uber-space which is at the base | |
223 | * the highest 4th-level directory (KERNEL_UBER_PML4_INDEX). That is, | |
224 | * 512GB from the top of virtual space (or zero). | |
225 | */ | |
226 | #define KERNEL_UBER_PML4_INDEX 511 | |
227 | #define KERNEL_UBER_BASE (0ULL - NBPML4) | |
228 | #define KERNEL_UBER_BASE_HI32 ((uint32_t)(KERNEL_UBER_BASE >> 32)) | |
229 | ||
55e303ae | 230 | #define VM_WIMG_COPYBACK VM_MEM_COHERENT |
9bccf70c | 231 | #define VM_WIMG_DEFAULT VM_MEM_COHERENT |
55e303ae A |
232 | /* ?? intel ?? */ |
233 | #define VM_WIMG_IO (VM_MEM_COHERENT | \ | |
234 | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) | |
235 | #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) | |
236 | /* write combining mode, aka store gather */ | |
237 | #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) | |
9bccf70c | 238 | |
0c530ab8 A |
239 | /* |
240 | * Pte related macros | |
241 | */ | |
242 | #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT))) | |
243 | #define VADDR64(pmi, pdi, pti) ((vm_offset_t)(((pmi)<<PLM4SHIFT))((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)) | |
244 | ||
1c79356b | 245 | /* |
91447636 A |
246 | * Size of Kernel address space. This is the number of page table pages |
247 | * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. | |
248 | * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). | |
1c79356b | 249 | */ |
91447636 | 250 | #ifndef KVA_PAGES |
0c530ab8 | 251 | #define KVA_PAGES 1024 |
91447636 | 252 | #endif |
1c79356b | 253 | |
91447636 | 254 | #ifndef NKPT |
91447636 | 255 | #define NKPT 500 /* actual number of kernel page tables */ |
91447636 A |
256 | #endif |
257 | #ifndef NKPDE | |
258 | #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */ | |
259 | #endif | |
260 | ||
0c530ab8 A |
261 | |
262 | enum high_cpu_types { | |
263 | HIGH_CPU_ISS0, | |
264 | HIGH_CPU_ISS1, | |
265 | HIGH_CPU_DESC, | |
266 | HIGH_CPU_LDT_BEGIN, | |
267 | HIGH_CPU_LDT_END = HIGH_CPU_LDT_BEGIN + (LDTSZ / 512) - 1, | |
268 | HIGH_CPU_END | |
269 | }; | |
270 | ||
271 | enum high_fixed_addresses { | |
272 | HIGH_FIXED_TRAMPS, /* must be first */ | |
273 | HIGH_FIXED_TRAMPS_END, | |
274 | HIGH_FIXED_GDT, | |
275 | HIGH_FIXED_IDT, | |
276 | HIGH_FIXED_LDT_BEGIN, | |
277 | HIGH_FIXED_LDT_END = HIGH_FIXED_LDT_BEGIN + (LDTSZ / 512) - 1, | |
278 | HIGH_FIXED_KTSS, | |
279 | HIGH_FIXED_DFTSS, | |
280 | HIGH_FIXED_DBTSS, | |
281 | HIGH_FIXED_CPUS_BEGIN, | |
282 | HIGH_FIXED_CPUS_END = HIGH_FIXED_CPUS_BEGIN + (HIGH_CPU_END * MAX_CPUS) - 1, | |
283 | }; | |
284 | ||
285 | ||
286 | /* XXX64 below PTDI values need cleanup */ | |
91447636 A |
287 | /* |
288 | * The *PTDI values control the layout of virtual memory | |
289 | * | |
290 | */ | |
0c530ab8 | 291 | #define KPTDI (0x000)/* start of kernel virtual pde's */ |
91447636 A |
292 | #define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */ |
293 | #define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */ | |
0c530ab8 | 294 | #define UMAXPTDI (0x7F8) /* ptd entry for user space end */ |
6601e61a | 295 | #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */ |
91447636 A |
296 | |
297 | #define KERNBASE VADDR(KPTDI,0) | |
1c79356b | 298 | |
0c530ab8 A |
299 | /* |
300 | * Convert address offset to directory address | |
301 | * containing the page table pointer - legacy | |
302 | */ | |
303 | /*#define pmap_pde(m,v) (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT]))*/ | |
304 | ||
305 | #define HIGH_MEM_BASE ((uint32_t)( -NBPDE) ) /* shared gdt etc seg addr */ /* XXX64 ?? */ | |
306 | #define pmap_index_to_virt(x) (HIGH_MEM_BASE | ((unsigned)(x) << PAGE_SHIFT)) | |
307 | ||
1c79356b A |
308 | /* |
309 | * Convert address offset to page descriptor index | |
310 | */ | |
0c530ab8 | 311 | #define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK) |
91447636 | 312 | |
0c530ab8 A |
313 | #define pdeidx(pmap, a) (((a) >> PDSHIFT) & ((1ULL<<(48 - PDSHIFT)) -1)) |
314 | #define pdptidx(pmap, a) (((a) >> PDPTSHIFT) & ((1ULL<<(48 - PDPTSHIFT)) -1)) | |
315 | #define pml4idx(pmap, a) (((a) >> PML4SHIFT) & ((1ULL<<(48 - PML4SHIFT)) -1)) | |
1c79356b A |
316 | |
317 | /* | |
318 | * Convert page descriptor index to user virtual address | |
319 | */ | |
320 | #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) | |
321 | ||
322 | /* | |
323 | * Convert address offset to page table index | |
324 | */ | |
0c530ab8 | 325 | #define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK) |
1c79356b | 326 | |
1c79356b A |
327 | /* |
328 | * Hardware pte bit definitions (to be used directly on the ptes | |
329 | * without using the bit fields). | |
330 | */ | |
331 | ||
332 | #define INTEL_PTE_VALID 0x00000001 | |
333 | #define INTEL_PTE_WRITE 0x00000002 | |
91447636 | 334 | #define INTEL_PTE_RW 0x00000002 |
1c79356b A |
335 | #define INTEL_PTE_USER 0x00000004 |
336 | #define INTEL_PTE_WTHRU 0x00000008 | |
337 | #define INTEL_PTE_NCACHE 0x00000010 | |
338 | #define INTEL_PTE_REF 0x00000020 | |
339 | #define INTEL_PTE_MOD 0x00000040 | |
91447636 A |
340 | #define INTEL_PTE_PS 0x00000080 |
341 | #define INTEL_PTE_GLOBAL 0x00000100 | |
1c79356b | 342 | #define INTEL_PTE_WIRED 0x00000200 |
0c530ab8 | 343 | #define INTEL_PTE_PFN PG_FRAME |
55e303ae | 344 | #define INTEL_PTE_PTA 0x00000080 |
1c79356b | 345 | |
0c530ab8 A |
346 | #define INTEL_PTE_NX (1ULL << 63) |
347 | ||
348 | #define INTEL_PTE_INVALID 0 | |
349 | ||
91447636 A |
350 | #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */ |
351 | #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */ | |
1c79356b A |
352 | #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) |
353 | ||
0c530ab8 A |
354 | #define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW)) |
355 | #define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID)) | |
356 | #define pte_user_rw(p) ((pt_entry)t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW)) | |
357 | #define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER)) | |
358 | ||
9bccf70c A |
359 | #define PMAP_DEFAULT_CACHE 0 |
360 | #define PMAP_INHIBIT_CACHE 1 | |
361 | #define PMAP_GUARDED_CACHE 2 | |
362 | #define PMAP_ACTIVATE_CACHE 4 | |
363 | #define PMAP_NO_GUARD_CACHE 8 | |
364 | ||
365 | ||
91447636 A |
366 | #ifndef ASSEMBLER |
367 | ||
368 | #include <sys/queue.h> | |
369 | ||
1c79356b | 370 | /* |
91447636 A |
371 | * Address of current and alternate address space page table maps |
372 | * and directories. | |
1c79356b | 373 | */ |
1c79356b | 374 | |
91447636 A |
375 | extern pt_entry_t PTmap[], APTmap[], Upte; |
376 | extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde; | |
377 | ||
378 | extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */ | |
91447636 | 379 | extern pdpt_entry_t *IdlePDPT; |
0c530ab8 A |
380 | |
381 | extern pmap_paddr_t lo_kernel_cr3; | |
382 | ||
383 | extern pml4_entry_t *IdlePML4; | |
384 | extern pdpt_entry_t *IdlePDPT64; | |
385 | extern addr64_t kernel64_cr3; | |
386 | extern boolean_t no_shared_cr3; | |
91447636 A |
387 | |
388 | /* | |
389 | * virtual address to page table entry and | |
390 | * to physical address. Likewise for alternate address space. | |
391 | * Note: these work recursively, thus vtopte of a pte will give | |
392 | * the corresponding pde that in turn maps it. | |
393 | */ | |
0c530ab8 | 394 | #define vtopte(va) (PTmap + i386_btop((vm_offset_t)va)) |
91447636 A |
395 | |
396 | ||
1c79356b A |
397 | typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ |
398 | /* changed by other processors */ | |
91447636 A |
399 | struct md_page { |
400 | int pv_list_count; | |
401 | TAILQ_HEAD(,pv_entry) pv_list; | |
402 | }; | |
403 | ||
404 | #include <vm/vm_page.h> | |
405 | ||
406 | /* | |
407 | * For each vm_page_t, there is a list of all currently | |
408 | * valid virtual mappings of that page. An entry is | |
409 | * a pv_entry_t; the list is the pv_table. | |
410 | */ | |
1c79356b A |
411 | |
412 | struct pmap { | |
0c530ab8 A |
413 | pd_entry_t *dirbase; /* page directory pointer */ |
414 | pmap_paddr_t pdirbase; /* phys. address of dirbase */ | |
415 | vm_object_t pm_obj; /* object to hold pde's */ | |
1c79356b | 416 | int ref_count; /* reference count */ |
0c530ab8 A |
417 | int nx_enabled; |
418 | boolean_t pm_64bit; | |
419 | boolean_t pm_kernel_cr3; | |
420 | boolean_t pm_shared; | |
1c79356b A |
421 | decl_simple_lock_data(,lock) /* lock on map */ |
422 | struct pmap_statistics stats; /* map statistics */ | |
91447636 | 423 | vm_offset_t pm_hold; /* true pdpt zalloc addr */ |
0c530ab8 A |
424 | pmap_paddr_t pm_cr3; /* physical addr */ |
425 | pdpt_entry_t *pm_pdpt; /* KVA of 3rd level page */ | |
426 | pml4_entry_t *pm_pml4; /* VKA of top level */ | |
427 | vm_object_t pm_obj_pdpt; /* holds pdpt pages */ | |
428 | vm_object_t pm_obj_pml4; /* holds pml4 pages */ | |
429 | vm_object_t pm_obj_top; /* holds single top level page */ | |
1c79356b A |
430 | }; |
431 | ||
0c530ab8 A |
432 | |
433 | #define PMAP_PDPT_FIRST_WINDOW 0 | |
434 | #define PMAP_PDPT_NWINDOWS 4 | |
435 | #define PMAP_PDE_FIRST_WINDOW (PMAP_PDPT_NWINDOWS) | |
436 | #define PMAP_PDE_NWINDOWS 4 | |
437 | #define PMAP_PTE_FIRST_WINDOW (PMAP_PDE_FIRST_WINDOW + PMAP_PDE_NWINDOWS) | |
438 | #define PMAP_PTE_NWINDOWS 4 | |
439 | ||
440 | #define PMAP_NWINDOWS_FIRSTFREE (PMAP_PTE_FIRST_WINDOW + PMAP_PTE_NWINDOWS) | |
441 | #define PMAP_WINDOW_SIZE 8 | |
442 | #define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE) | |
443 | ||
91447636 A |
444 | typedef struct { |
445 | pt_entry_t *prv_CMAP; | |
446 | caddr_t prv_CADDR; | |
447 | } mapwindow_t; | |
448 | ||
449 | typedef struct cpu_pmap { | |
0c530ab8 A |
450 | int pdpt_window_index; |
451 | int pde_window_index; | |
452 | int pte_window_index; | |
91447636 | 453 | mapwindow_t mapwindow[PMAP_NWINDOWS]; |
91447636 A |
454 | } cpu_pmap_t; |
455 | ||
0c530ab8 A |
456 | |
457 | extern mapwindow_t *pmap_get_mapwindow(pt_entry_t pentry); | |
91447636 A |
458 | |
459 | typedef struct pmap_memory_regions { | |
460 | ppnum_t base; | |
461 | ppnum_t end; | |
462 | ppnum_t alloc; | |
463 | uint32_t type; | |
464 | } pmap_memory_region_t; | |
465 | ||
466 | unsigned pmap_memory_region_count; | |
467 | unsigned pmap_memory_region_current; | |
468 | ||
0c530ab8 | 469 | #define PMAP_MEMORY_REGIONS_SIZE 128 |
91447636 A |
470 | |
471 | extern pmap_memory_region_t pmap_memory_regions[]; | |
472 | ||
0c530ab8 A |
473 | static inline void set_dirbase(pmap_t tpmap, __unused int tcpu) { |
474 | current_cpu_datap()->cpu_task_cr3 = (pmap_paddr_t)((tpmap)->pm_cr3); | |
475 | current_cpu_datap()->cpu_task_map = tpmap->pm_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT; | |
1c79356b A |
476 | } |
477 | ||
1c79356b A |
478 | /* |
479 | * External declarations for PMAP_ACTIVATE. | |
480 | */ | |
481 | ||
0c530ab8 | 482 | extern void process_pmap_updates(void); |
1c79356b | 483 | extern void pmap_update_interrupt(void); |
1c79356b A |
484 | |
485 | /* | |
486 | * Machine dependent routines that are used only for i386/i486/i860. | |
487 | */ | |
1c79356b | 488 | |
0c530ab8 | 489 | extern addr64_t (kvtophys)( |
1c79356b A |
490 | vm_offset_t addr); |
491 | ||
492 | extern pt_entry_t *pmap_pte( | |
493 | struct pmap *pmap, | |
0c530ab8 A |
494 | vm_map_offset_t addr); |
495 | ||
496 | extern pd_entry_t *pmap_pde( | |
497 | struct pmap *pmap, | |
498 | vm_map_offset_t addr); | |
499 | ||
500 | extern pd_entry_t *pmap64_pde( | |
501 | struct pmap *pmap, | |
502 | vm_map_offset_t addr); | |
503 | ||
504 | extern pdpt_entry_t *pmap64_pdpt( | |
505 | struct pmap *pmap, | |
506 | vm_map_offset_t addr); | |
1c79356b A |
507 | |
508 | extern vm_offset_t pmap_map( | |
509 | vm_offset_t virt, | |
0c530ab8 A |
510 | vm_map_offset_t start, |
511 | vm_map_offset_t end, | |
512 | vm_prot_t prot, | |
513 | unsigned int flags); | |
1c79356b A |
514 | |
515 | extern vm_offset_t pmap_map_bd( | |
516 | vm_offset_t virt, | |
0c530ab8 A |
517 | vm_map_offset_t start, |
518 | vm_map_offset_t end, | |
519 | vm_prot_t prot, | |
520 | unsigned int flags); | |
1c79356b A |
521 | |
522 | extern void pmap_bootstrap( | |
0c530ab8 A |
523 | vm_offset_t load_start, |
524 | boolean_t IA32e); | |
1c79356b A |
525 | |
526 | extern boolean_t pmap_valid_page( | |
91447636 | 527 | ppnum_t pn); |
1c79356b A |
528 | |
529 | extern int pmap_list_resident_pages( | |
530 | struct pmap *pmap, | |
531 | vm_offset_t *listp, | |
532 | int space); | |
533 | ||
0c530ab8 | 534 | extern void pmap_commpage32_init( |
91447636 A |
535 | vm_offset_t kernel, |
536 | vm_offset_t user, | |
537 | int count); | |
0c530ab8 A |
538 | extern void pmap_commpage64_init( |
539 | vm_offset_t kernel, | |
540 | vm_map_offset_t user, | |
541 | int count); | |
542 | ||
91447636 A |
543 | extern struct cpu_pmap *pmap_cpu_alloc( |
544 | boolean_t is_boot_cpu); | |
545 | extern void pmap_cpu_free( | |
546 | struct cpu_pmap *cp); | |
0c530ab8 A |
547 | |
548 | extern void pmap_map_block( | |
549 | pmap_t pmap, | |
550 | addr64_t va, | |
551 | ppnum_t pa, | |
552 | uint32_t size, | |
553 | vm_prot_t prot, | |
554 | int attr, | |
555 | unsigned int flags); | |
91447636 | 556 | |
1c79356b A |
557 | extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); |
558 | extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); | |
55e303ae | 559 | extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); |
91447636 A |
560 | extern void pmap_sync_page_data_phys(ppnum_t pa); |
561 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
1c79356b | 562 | |
0c530ab8 A |
563 | extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size); |
564 | extern kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr); | |
565 | extern void pmap_map_sharedpage(task_t task, pmap_t pmap); | |
566 | extern void pmap_unmap_sharedpage(pmap_t pmap); | |
567 | extern void pmap_disable_NX(pmap_t pmap); | |
568 | extern void pmap_set_4GB_pagezero(pmap_t pmap); | |
569 | extern void pmap_clear_4GB_pagezero(pmap_t pmap); | |
570 | extern void pmap_load_kernel_cr3(void); | |
571 | extern vm_offset_t pmap_cpu_high_map_vaddr(int, enum high_cpu_types); | |
572 | extern vm_offset_t pmap_high_map_vaddr(enum high_cpu_types); | |
573 | extern vm_offset_t pmap_high_map(pt_entry_t, enum high_cpu_types); | |
574 | extern vm_offset_t pmap_cpu_high_shared_remap(int, enum high_cpu_types, vm_offset_t, int); | |
575 | extern vm_offset_t pmap_high_shared_remap(enum high_fixed_addresses, vm_offset_t, int); | |
576 | ||
577 | extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, int *, int *); | |
578 | ||
579 | ||
580 | ||
1c79356b A |
581 | /* |
582 | * Macros for speed. | |
583 | */ | |
584 | ||
1c79356b A |
585 | |
586 | #include <kern/spl.h> | |
587 | ||
55e303ae A |
588 | #if defined(PMAP_ACTIVATE_KERNEL) |
589 | #undef PMAP_ACTIVATE_KERNEL | |
590 | #undef PMAP_DEACTIVATE_KERNEL | |
591 | #undef PMAP_ACTIVATE_USER | |
592 | #undef PMAP_DEACTIVATE_USER | |
593 | #endif | |
594 | ||
1c79356b | 595 | |
0c530ab8 A |
596 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ |
597 | spl_t spl; \ | |
598 | \ | |
599 | spl = splhigh(); \ | |
600 | if (current_cpu_datap()->cpu_tlb_invalid) \ | |
601 | process_pmap_updates(); \ | |
602 | splx(spl); \ | |
1c79356b A |
603 | } |
604 | ||
0c530ab8 A |
605 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ |
606 | spl_t spl; \ | |
607 | \ | |
608 | spl = splhigh(); \ | |
609 | process_pmap_updates(); \ | |
610 | splx(spl); \ | |
1c79356b A |
611 | } |
612 | ||
0c530ab8 | 613 | |
1c79356b | 614 | #define PMAP_ACTIVATE_MAP(map, my_cpu) { \ |
55e303ae | 615 | register pmap_t tpmap; \ |
0c530ab8 A |
616 | \ |
617 | tpmap = vm_map_pmap(map); \ | |
618 | set_dirbase(tpmap, my_cpu); \ | |
1c79356b A |
619 | } |
620 | ||
0c530ab8 A |
621 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) \ |
622 | if (current_cpu_datap()->cpu_task_map == TASK_MAP_64BIT_SHARED) \ | |
623 | pmap_load_kernel_cr3(); | |
1c79356b | 624 | |
0c530ab8 A |
625 | #define PMAP_ACTIVATE_USER(th, my_cpu) { \ |
626 | spl_t spl; \ | |
627 | \ | |
628 | spl = splhigh(); \ | |
629 | PMAP_ACTIVATE_MAP(th->map, my_cpu) \ | |
630 | splx(spl); \ | |
1c79356b A |
631 | } |
632 | ||
91447636 | 633 | #define PMAP_DEACTIVATE_USER(th, my_cpu) |
1c79356b | 634 | |
0c530ab8 | 635 | |
1c79356b A |
636 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ |
637 | spl_t spl; \ | |
0c530ab8 A |
638 | pt_entry_t *kpdp; \ |
639 | pt_entry_t *updp; \ | |
640 | int i; \ | |
641 | int need_flush; \ | |
642 | \ | |
643 | need_flush = 0; \ | |
644 | spl = splhigh(); \ | |
1c79356b | 645 | if (old_th->map != new_th->map) { \ |
1c79356b A |
646 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ |
647 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
1c79356b | 648 | } \ |
0c530ab8 A |
649 | kpdp = current_cpu_datap()->cpu_copywindow_pdp; \ |
650 | for (i = 0; i < NCOPY_WINDOWS; i++) { \ | |
651 | if (new_th->machine.copy_window[i].user_base != (user_addr_t)-1) { \ | |
652 | updp = pmap_pde(new_th->map->pmap, \ | |
653 | new_th->machine.copy_window[i].user_base);\ | |
654 | pmap_store_pte(kpdp, updp ? *updp : 0); \ | |
655 | } \ | |
656 | kpdp++; \ | |
657 | } \ | |
658 | splx(spl); \ | |
659 | if (new_th->machine.copyio_state == WINDOWS_OPENED) \ | |
660 | need_flush = 1; \ | |
661 | else \ | |
662 | new_th->machine.copyio_state = WINDOWS_DIRTY; \ | |
663 | if (new_th->machine.physwindow_pte) { \ | |
664 | pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), \ | |
665 | new_th->machine.physwindow_pte); \ | |
666 | if (need_flush == 0) \ | |
667 | invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);\ | |
668 | } \ | |
669 | if (need_flush) \ | |
670 | flush_tlb(); \ | |
1c79356b A |
671 | } |
672 | ||
673 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
674 | spl_t spl; \ | |
675 | \ | |
0c530ab8 | 676 | spl = splhigh(); \ |
1c79356b A |
677 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ |
678 | th->map = new_map; \ | |
679 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
680 | splx(spl); \ | |
0c530ab8 | 681 | inval_copy_windows(th); \ |
1c79356b A |
682 | } |
683 | ||
0c530ab8 A |
684 | /* |
685 | * Marking the current cpu's cr3 inactive is achieved by setting its lsb. | |
686 | * Marking the current cpu's cr3 active once more involves clearng this bit. | |
687 | * Note that valid page tables are page-aligned and so the bottom 12 bits | |
688 | * are noramlly zero. | |
689 | * We can only mark the current cpu active/inactive but we can test any cpu. | |
690 | */ | |
691 | #define CPU_CR3_MARK_INACTIVE() \ | |
692 | current_cpu_datap()->cpu_active_cr3 |= 1 | |
693 | ||
694 | #define CPU_CR3_MARK_ACTIVE() \ | |
695 | current_cpu_datap()->cpu_active_cr3 &= ~1 | |
696 | ||
697 | #define CPU_CR3_IS_ACTIVE(cpu) \ | |
698 | ((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0) | |
699 | ||
700 | #define CPU_GET_ACTIVE_CR3(cpu) \ | |
701 | (cpu_datap(cpu)->cpu_active_cr3 & ~1) | |
702 | ||
1c79356b A |
703 | #define MARK_CPU_IDLE(my_cpu) { \ |
704 | /* \ | |
705 | * Mark this cpu idle, and remove it from the active set, \ | |
706 | * since it is not actively using any pmap. Signal_cpus \ | |
707 | * will notice that it is idle, and avoid signaling it, \ | |
708 | * but will queue the update request for when the cpu \ | |
709 | * becomes active. \ | |
710 | */ \ | |
711 | int s = splhigh(); \ | |
0c530ab8 A |
712 | if (!cpu_mode_is64bit() || no_shared_cr3) \ |
713 | process_pmap_updates(); \ | |
714 | else \ | |
715 | pmap_load_kernel_cr3(); \ | |
716 | CPU_CR3_MARK_INACTIVE(); \ | |
717 | __asm__ volatile("mfence"); \ | |
1c79356b | 718 | splx(s); \ |
1c79356b A |
719 | } |
720 | ||
0c530ab8 | 721 | #define MARK_CPU_ACTIVE(my_cpu) { \ |
1c79356b A |
722 | \ |
723 | int s = splhigh(); \ | |
724 | /* \ | |
725 | * If a kernel_pmap update was requested while this cpu \ | |
726 | * was idle, process it as if we got the interrupt. \ | |
727 | * Before doing so, remove this cpu from the idle set. \ | |
728 | * Since we do not grab any pmap locks while we flush \ | |
729 | * our TLB, another cpu may start an update operation \ | |
730 | * before we finish. Removing this cpu from the idle \ | |
731 | * set assures that we will receive another update \ | |
732 | * interrupt if this happens. \ | |
733 | */ \ | |
0c530ab8 A |
734 | CPU_CR3_MARK_ACTIVE(); \ |
735 | __asm__ volatile("mfence"); \ | |
55e303ae | 736 | \ |
0c530ab8 A |
737 | if (current_cpu_datap()->cpu_tlb_invalid) \ |
738 | process_pmap_updates(); \ | |
1c79356b | 739 | splx(s); \ |
1c79356b A |
740 | } |
741 | ||
1c79356b A |
742 | #define PMAP_CONTEXT(pmap, thread) |
743 | ||
744 | #define pmap_kernel_va(VA) \ | |
0c530ab8 A |
745 | ((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \ |
746 | (((vm_offset_t) (VA)) <= vm_max_kernel_address)) | |
747 | ||
1c79356b A |
748 | |
749 | #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) | |
1c79356b A |
750 | #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) |
751 | #define pmap_attribute(pmap,addr,size,attr,value) \ | |
752 | (KERN_INVALID_ADDRESS) | |
9bccf70c A |
753 | #define pmap_attribute_cache_sync(addr,size,attr,value) \ |
754 | (KERN_INVALID_ADDRESS) | |
765c9de3 | 755 | |
1c79356b A |
756 | #endif /* ASSEMBLER */ |
757 | ||
0c530ab8 | 758 | |
1c79356b | 759 | #endif /* _PMAP_MACHINE_ */ |
0c530ab8 A |
760 | |
761 | ||
762 | #endif /* KERNEL_PRIVATE */ |