]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
8ad349bb | 2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | /* | |
31 | * @OSF_COPYRIGHT@ | |
32 | */ | |
33 | /* | |
34 | * Mach Operating System | |
35 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
36 | * All Rights Reserved. | |
37 | * | |
38 | * Permission to use, copy, modify and distribute this software and its | |
39 | * documentation is hereby granted, provided that both the copyright | |
40 | * notice and this permission notice appear in all copies of the | |
41 | * software, derivative works or modified versions, and any portions | |
42 | * thereof, and that both notices appear in supporting documentation. | |
43 | * | |
44 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
45 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
46 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
47 | * | |
48 | * Carnegie Mellon requests users of this software to return to | |
49 | * | |
50 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
51 | * School of Computer Science | |
52 | * Carnegie Mellon University | |
53 | * Pittsburgh PA 15213-3890 | |
54 | * | |
55 | * any improvements or extensions that they make and grant Carnegie Mellon | |
56 | * the rights to redistribute these changes. | |
57 | */ | |
58 | /* | |
59 | */ | |
60 | ||
61 | /* | |
62 | * File: pmap.h | |
63 | * | |
64 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
65 | * Date: 1985 | |
66 | * | |
67 | * Machine-dependent structures for the physical map module. | |
68 | */ | |
8ad349bb | 69 | |
1c79356b A |
70 | #ifndef _PMAP_MACHINE_ |
71 | #define _PMAP_MACHINE_ 1 | |
72 | ||
73 | #ifndef ASSEMBLER | |
74 | ||
75 | #include <platforms.h> | |
1c79356b A |
76 | |
77 | #include <mach/kern_return.h> | |
78 | #include <mach/machine/vm_types.h> | |
79 | #include <mach/vm_prot.h> | |
80 | #include <mach/vm_statistics.h> | |
81 | #include <mach/machine/vm_param.h> | |
82 | #include <kern/kern_types.h> | |
91447636 | 83 | #include <kern/thread.h> |
1c79356b | 84 | #include <kern/lock.h> |
8ad349bb A |
85 | #define PMAP_QUEUE 1 |
86 | #ifdef PMAP_QUEUE | |
87 | #include <kern/queue.h> | |
88 | #endif | |
1c79356b A |
89 | |
90 | /* | |
91 | * Define the generic in terms of the specific | |
92 | */ | |
93 | ||
94 | #define INTEL_PGBYTES I386_PGBYTES | |
95 | #define INTEL_PGSHIFT I386_PGSHIFT | |
96 | #define intel_btop(x) i386_btop(x) | |
97 | #define intel_ptob(x) i386_ptob(x) | |
98 | #define intel_round_page(x) i386_round_page(x) | |
99 | #define intel_trunc_page(x) i386_trunc_page(x) | |
100 | #define trunc_intel_to_vm(x) trunc_i386_to_vm(x) | |
101 | #define round_intel_to_vm(x) round_i386_to_vm(x) | |
102 | #define vm_to_intel(x) vm_to_i386(x) | |
103 | ||
104 | /* | |
105 | * i386/i486/i860 Page Table Entry | |
106 | */ | |
107 | ||
8ad349bb A |
108 | #ifdef PAE |
109 | typedef uint64_t pdpt_entry_t; | |
110 | typedef uint64_t pt_entry_t; | |
111 | typedef uint64_t pd_entry_t; | |
112 | typedef uint64_t pmap_paddr_t; | |
113 | #else | |
114 | typedef uint32_t pt_entry_t; | |
115 | typedef uint32_t pd_entry_t; | |
116 | typedef uint32_t pmap_paddr_t; | |
117 | #endif | |
118 | ||
119 | #define PT_ENTRY_NULL ((pt_entry_t *) 0) | |
120 | #define PD_ENTRY_NULL ((pt_entry_t *) 0) | |
121 | ||
1c79356b A |
122 | #endif /* ASSEMBLER */ |
123 | ||
8ad349bb | 124 | #ifdef PAE |
91447636 A |
125 | #define NPGPTD 4 |
126 | #define PDESHIFT 21 | |
127 | #define PTEMASK 0x1ff | |
128 | #define PTEINDX 3 | |
8ad349bb A |
129 | #else |
130 | #define NPGPTD 1 | |
131 | #define PDESHIFT 22 | |
132 | #define PTEMASK 0x3ff | |
133 | #define PTEINDX 2 | |
134 | #endif | |
91447636 A |
135 | #define PTESHIFT 12 |
136 | ||
137 | #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ | |
138 | #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ | |
139 | ||
140 | #define INTEL_OFFMASK (I386_PGBYTES - 1) | |
8ad349bb | 141 | #define PG_FRAME (~((pmap_paddr_t)PAGE_MASK)) |
91447636 | 142 | #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t))) |
1c79356b | 143 | |
91447636 A |
144 | #define NBPTD (NPGPTD << PAGE_SHIFT) |
145 | #define NPDEPTD (NBPTD / (sizeof (pd_entry_t))) | |
146 | #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) | |
147 | #define NBPDE (1 << PDESHIFT) | |
148 | #define PDEMASK (NBPDE - 1) | |
9bccf70c | 149 | |
55e303ae | 150 | #define VM_WIMG_COPYBACK VM_MEM_COHERENT |
9bccf70c | 151 | #define VM_WIMG_DEFAULT VM_MEM_COHERENT |
55e303ae A |
152 | /* ?? intel ?? */ |
153 | #define VM_WIMG_IO (VM_MEM_COHERENT | \ | |
154 | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) | |
155 | #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) | |
156 | /* write combining mode, aka store gather */ | |
157 | #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) | |
9bccf70c | 158 | |
1c79356b | 159 | /* |
91447636 A |
160 | * Size of Kernel address space. This is the number of page table pages |
161 | * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. | |
162 | * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). | |
1c79356b | 163 | */ |
91447636 | 164 | #ifndef KVA_PAGES |
8ad349bb | 165 | #define KVA_PAGES 256 |
91447636 | 166 | #endif |
1c79356b | 167 | |
8ad349bb A |
168 | /* |
169 | * Pte related macros | |
170 | */ | |
171 | #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT))) | |
172 | ||
91447636 | 173 | #ifndef NKPT |
8ad349bb | 174 | #ifdef PAE |
91447636 | 175 | #define NKPT 500 /* actual number of kernel page tables */ |
8ad349bb A |
176 | #else |
177 | #define NKPT 32 /* initial number of kernel page tables */ | |
178 | #endif | |
91447636 A |
179 | #endif |
180 | #ifndef NKPDE | |
181 | #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */ | |
182 | #endif | |
183 | ||
184 | /* | |
185 | * The *PTDI values control the layout of virtual memory | |
186 | * | |
187 | */ | |
8ad349bb A |
188 | #ifdef PAE |
189 | #define KPTDI (0x600)/* start of kernel virtual pde's */ | |
91447636 A |
190 | #define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */ |
191 | #define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */ | |
8ad349bb A |
192 | #define UMAXPTDI (0x5FC) /* ptd entry for user space end */ |
193 | #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */ | |
194 | #else | |
195 | #define KPTDI (0x300)/* start of kernel virtual pde's */ | |
196 | #define PTDPTDI (0x3FD) /* ptd entry that points to ptd! */ | |
197 | #define APTDPTDI (0x3FE) /* alt ptd entry that points to APTD */ | |
198 | #define UMAXPTDI (0x2FF) /* ptd entry for user space end */ | |
91447636 | 199 | #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */ |
8ad349bb | 200 | #endif |
91447636 A |
201 | |
202 | #define KERNBASE VADDR(KPTDI,0) | |
1c79356b A |
203 | |
204 | /* | |
205 | * Convert address offset to page descriptor index | |
206 | */ | |
8ad349bb | 207 | #define pdenum(pmap, a) (((a) >> PDESHIFT) & PDEMASK) |
91447636 | 208 | |
1c79356b A |
209 | |
210 | /* | |
211 | * Convert page descriptor index to user virtual address | |
212 | */ | |
213 | #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) | |
214 | ||
215 | /* | |
216 | * Convert address offset to page table index | |
217 | */ | |
8ad349bb | 218 | #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK) |
1c79356b | 219 | |
1c79356b A |
220 | /* |
221 | * Hardware pte bit definitions (to be used directly on the ptes | |
222 | * without using the bit fields). | |
223 | */ | |
224 | ||
225 | #define INTEL_PTE_VALID 0x00000001 | |
226 | #define INTEL_PTE_WRITE 0x00000002 | |
91447636 | 227 | #define INTEL_PTE_RW 0x00000002 |
1c79356b A |
228 | #define INTEL_PTE_USER 0x00000004 |
229 | #define INTEL_PTE_WTHRU 0x00000008 | |
230 | #define INTEL_PTE_NCACHE 0x00000010 | |
231 | #define INTEL_PTE_REF 0x00000020 | |
232 | #define INTEL_PTE_MOD 0x00000040 | |
91447636 A |
233 | #define INTEL_PTE_PS 0x00000080 |
234 | #define INTEL_PTE_GLOBAL 0x00000100 | |
1c79356b | 235 | #define INTEL_PTE_WIRED 0x00000200 |
8ad349bb | 236 | #define INTEL_PTE_PFN /*0xFFFFF000*/ (~0xFFF) |
55e303ae | 237 | #define INTEL_PTE_PTA 0x00000080 |
1c79356b | 238 | |
91447636 A |
239 | #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */ |
240 | #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */ | |
1c79356b A |
241 | #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) |
242 | ||
9bccf70c A |
243 | #define PMAP_DEFAULT_CACHE 0 |
244 | #define PMAP_INHIBIT_CACHE 1 | |
245 | #define PMAP_GUARDED_CACHE 2 | |
246 | #define PMAP_ACTIVATE_CACHE 4 | |
247 | #define PMAP_NO_GUARD_CACHE 8 | |
248 | ||
249 | ||
91447636 A |
250 | #ifndef ASSEMBLER |
251 | ||
252 | #include <sys/queue.h> | |
253 | ||
1c79356b | 254 | /* |
91447636 A |
255 | * Address of current and alternate address space page table maps |
256 | * and directories. | |
1c79356b | 257 | */ |
1c79356b | 258 | |
91447636 A |
259 | extern pt_entry_t PTmap[], APTmap[], Upte; |
260 | extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde; | |
261 | ||
262 | extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */ | |
8ad349bb | 263 | #ifdef PAE |
91447636 | 264 | extern pdpt_entry_t *IdlePDPT; |
8ad349bb | 265 | #endif |
91447636 A |
266 | |
267 | /* | |
268 | * virtual address to page table entry and | |
269 | * to physical address. Likewise for alternate address space. | |
270 | * Note: these work recursively, thus vtopte of a pte will give | |
271 | * the corresponding pde that in turn maps it. | |
272 | */ | |
8ad349bb | 273 | #define vtopte(va) (PTmap + i386_btop(va)) |
91447636 A |
274 | |
275 | ||
1c79356b A |
276 | typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ |
277 | /* changed by other processors */ | |
91447636 A |
278 | struct md_page { |
279 | int pv_list_count; | |
280 | TAILQ_HEAD(,pv_entry) pv_list; | |
281 | }; | |
282 | ||
283 | #include <vm/vm_page.h> | |
284 | ||
285 | /* | |
286 | * For each vm_page_t, there is a list of all currently | |
287 | * valid virtual mappings of that page. An entry is | |
288 | * a pv_entry_t; the list is the pv_table. | |
289 | */ | |
1c79356b A |
290 | |
291 | struct pmap { | |
8ad349bb A |
292 | #ifdef PMAP_QUEUE |
293 | queue_head_t pmap_link; /* unordered queue of in use pmaps */ | |
294 | #endif | |
295 | pd_entry_t *dirbase; /* page directory pointer register */ | |
296 | pd_entry_t *pdirbase; /* phys. address of dirbase */ | |
297 | vm_object_t pm_obj; /* object to hold pte's */ | |
1c79356b A |
298 | int ref_count; /* reference count */ |
299 | decl_simple_lock_data(,lock) /* lock on map */ | |
300 | struct pmap_statistics stats; /* map statistics */ | |
8ad349bb A |
301 | cpu_set cpus_using; /* bitmap of cpus using pmap */ |
302 | #ifdef PAE | |
91447636 | 303 | vm_offset_t pm_hold; /* true pdpt zalloc addr */ |
8ad349bb A |
304 | pdpt_entry_t *pm_pdpt; /* KVA of pg dir ptr table */ |
305 | vm_offset_t pm_ppdpt; /* phy addr pdpt | |
306 | should really be 32/64 bit */ | |
307 | #endif | |
1c79356b A |
308 | }; |
309 | ||
8ad349bb | 310 | #define PMAP_NWINDOWS 4 |
91447636 A |
311 | typedef struct { |
312 | pt_entry_t *prv_CMAP; | |
313 | caddr_t prv_CADDR; | |
314 | } mapwindow_t; | |
315 | ||
316 | typedef struct cpu_pmap { | |
317 | mapwindow_t mapwindow[PMAP_NWINDOWS]; | |
8ad349bb A |
318 | struct pmap *real_pmap; |
319 | struct pmap_update_list *update_list; | |
320 | volatile boolean_t update_needed; | |
91447636 A |
321 | } cpu_pmap_t; |
322 | ||
8ad349bb A |
323 | /* |
324 | * Should be rewritten in asm anyway. | |
325 | */ | |
326 | #define CM1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CMAP) | |
327 | #define CM2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CMAP) | |
328 | #define CM3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CMAP) | |
329 | #define CM4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CMAP) | |
330 | #define CA1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CADDR) | |
331 | #define CA2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CADDR) | |
332 | #define CA3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CADDR) | |
333 | #define CA4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CADDR) | |
91447636 A |
334 | |
335 | typedef struct pmap_memory_regions { | |
336 | ppnum_t base; | |
337 | ppnum_t end; | |
338 | ppnum_t alloc; | |
339 | uint32_t type; | |
340 | } pmap_memory_region_t; | |
341 | ||
342 | unsigned pmap_memory_region_count; | |
343 | unsigned pmap_memory_region_current; | |
344 | ||
8ad349bb | 345 | #define PMAP_MEMORY_REGIONS_SIZE 32 |
91447636 A |
346 | |
347 | extern pmap_memory_region_t pmap_memory_regions[]; | |
348 | ||
8ad349bb A |
349 | /* |
350 | * Optimization avoiding some TLB flushes when switching to | |
351 | * kernel-loaded threads. This is effective only for i386: | |
352 | * Since user task, kernel task and kernel loaded tasks share the | |
353 | * same virtual space (with appropriate protections), any pmap | |
354 | * allows mapping kernel and kernel loaded tasks. | |
355 | * | |
356 | * The idea is to avoid switching to another pmap unnecessarily when | |
357 | * switching to a kernel-loaded task, or when switching to the kernel | |
358 | * itself. | |
359 | * | |
360 | * We store the pmap we are really using (from which we fetched the | |
361 | * dirbase value) in current_cpu_datap()->cpu_pmap.real_pmap. | |
362 | * | |
363 | * Invariant: | |
364 | * current_pmap() == current_cpu_datap()->cpu_pmap.real_pmap || | |
365 | * current_pmap() == kernel_pmap. | |
366 | */ | |
367 | #define PMAP_REAL(my_cpu) (cpu_datap(my_cpu)->cpu_pmap->real_pmap) | |
368 | ||
369 | #include <i386/proc_reg.h> | |
370 | /* | |
371 | * If switching to the kernel pmap, don't incur the TLB cost of switching | |
372 | * to its page tables, since all maps include the kernel map as a subset. | |
373 | * Simply record that this CPU is logically on the kernel pmap (see | |
374 | * pmap_destroy). | |
375 | * | |
376 | * Similarly, if switching to a pmap (other than kernel_pmap that is already | |
377 | * in use, don't do anything to the hardware, to avoid a TLB flush. | |
378 | */ | |
379 | ||
380 | #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using)) | |
381 | #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using)) | |
382 | ||
383 | #ifdef PAE | |
384 | #define PDIRBASE pm_ppdpt | |
385 | #else | |
386 | #define PDIRBASE pdirbase | |
387 | #endif | |
388 | #define set_dirbase(mypmap, my_cpu) { \ | |
389 | struct pmap **ppmap = &PMAP_REAL(my_cpu); \ | |
390 | pmap_paddr_t pdirbase = (pmap_paddr_t)((mypmap)->PDIRBASE); \ | |
391 | \ | |
392 | if (*ppmap == (pmap_paddr_t)NULL) { \ | |
393 | *ppmap = (mypmap); \ | |
394 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
395 | set_cr3(pdirbase); \ | |
396 | } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \ | |
397 | if (*ppmap != kernel_pmap) \ | |
398 | PMAP_CPU_CLR(*ppmap, my_cpu); \ | |
399 | *ppmap = (mypmap); \ | |
400 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
401 | set_cr3(pdirbase); \ | |
402 | } \ | |
403 | assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \ | |
1c79356b A |
404 | } |
405 | ||
8ad349bb A |
406 | /* |
407 | * List of cpus that are actively using mapped memory. Any | |
408 | * pmap update operation must wait for all cpus in this list. | |
409 | * Update operations must still be queued to cpus not in this | |
410 | * list. | |
411 | */ | |
412 | extern cpu_set cpus_active; | |
413 | ||
414 | /* | |
415 | * List of cpus that are idle, but still operating, and will want | |
416 | * to see any kernel pmap updates when they become active. | |
417 | */ | |
418 | extern cpu_set cpus_idle; | |
419 | ||
420 | ||
421 | #define cpu_update_needed(cpu) cpu_datap(cpu)->cpu_pmap->update_needed | |
422 | #define cpu_update_list(cpu) cpu_datap(cpu)->cpu_pmap->update_list | |
423 | ||
1c79356b A |
424 | /* |
425 | * External declarations for PMAP_ACTIVATE. | |
426 | */ | |
427 | ||
8ad349bb | 428 | extern void process_pmap_updates(struct pmap *pmap); |
1c79356b | 429 | extern void pmap_update_interrupt(void); |
1c79356b A |
430 | |
431 | /* | |
432 | * Machine dependent routines that are used only for i386/i486/i860. | |
433 | */ | |
1c79356b | 434 | |
8ad349bb | 435 | extern vm_offset_t (kvtophys)( |
1c79356b A |
436 | vm_offset_t addr); |
437 | ||
438 | extern pt_entry_t *pmap_pte( | |
439 | struct pmap *pmap, | |
8ad349bb | 440 | vm_offset_t addr); |
1c79356b A |
441 | |
442 | extern vm_offset_t pmap_map( | |
443 | vm_offset_t virt, | |
8ad349bb A |
444 | vm_offset_t start, |
445 | vm_offset_t end, | |
446 | vm_prot_t prot); | |
1c79356b A |
447 | |
448 | extern vm_offset_t pmap_map_bd( | |
449 | vm_offset_t virt, | |
8ad349bb A |
450 | vm_offset_t start, |
451 | vm_offset_t end, | |
452 | vm_prot_t prot); | |
1c79356b A |
453 | |
454 | extern void pmap_bootstrap( | |
8ad349bb | 455 | vm_offset_t load_start); |
1c79356b A |
456 | |
457 | extern boolean_t pmap_valid_page( | |
91447636 | 458 | ppnum_t pn); |
1c79356b A |
459 | |
460 | extern int pmap_list_resident_pages( | |
461 | struct pmap *pmap, | |
462 | vm_offset_t *listp, | |
463 | int space); | |
464 | ||
8ad349bb | 465 | extern void pmap_commpage_init( |
91447636 A |
466 | vm_offset_t kernel, |
467 | vm_offset_t user, | |
468 | int count); | |
469 | extern struct cpu_pmap *pmap_cpu_alloc( | |
470 | boolean_t is_boot_cpu); | |
471 | extern void pmap_cpu_free( | |
472 | struct cpu_pmap *cp); | |
473 | ||
1c79356b A |
474 | extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); |
475 | extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); | |
55e303ae | 476 | extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); |
91447636 A |
477 | extern void pmap_sync_page_data_phys(ppnum_t pa); |
478 | extern void pmap_sync_page_attributes_phys(ppnum_t pa); | |
1c79356b A |
479 | |
480 | /* | |
481 | * Macros for speed. | |
482 | */ | |
483 | ||
1c79356b A |
484 | |
485 | #include <kern/spl.h> | |
486 | ||
55e303ae A |
487 | #if defined(PMAP_ACTIVATE_KERNEL) |
488 | #undef PMAP_ACTIVATE_KERNEL | |
489 | #undef PMAP_DEACTIVATE_KERNEL | |
490 | #undef PMAP_ACTIVATE_USER | |
491 | #undef PMAP_DEACTIVATE_USER | |
492 | #endif | |
493 | ||
8ad349bb A |
494 | /* |
495 | * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage | |
496 | * fields to control TLB invalidation on other CPUS. | |
497 | */ | |
1c79356b | 498 | |
8ad349bb A |
499 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ |
500 | \ | |
501 | /* \ | |
502 | * Let pmap updates proceed while we wait for this pmap. \ | |
503 | */ \ | |
504 | i_bit_clear((my_cpu), &cpus_active); \ | |
505 | \ | |
506 | /* \ | |
507 | * Lock the pmap to put this cpu in its active set. \ | |
508 | * Wait for updates here. \ | |
509 | */ \ | |
510 | simple_lock(&kernel_pmap->lock); \ | |
511 | \ | |
512 | /* \ | |
513 | * Process invalidate requests for the kernel pmap. \ | |
514 | */ \ | |
515 | if (cpu_update_needed(my_cpu)) \ | |
516 | process_pmap_updates(kernel_pmap); \ | |
517 | \ | |
518 | /* \ | |
519 | * Mark that this cpu is using the pmap. \ | |
520 | */ \ | |
521 | i_bit_set((my_cpu), &kernel_pmap->cpus_using); \ | |
522 | \ | |
523 | /* \ | |
524 | * Mark this cpu active - IPL will be lowered by \ | |
525 | * load_context(). \ | |
526 | */ \ | |
527 | i_bit_set((my_cpu), &cpus_active); \ | |
528 | \ | |
529 | simple_unlock(&kernel_pmap->lock); \ | |
1c79356b A |
530 | } |
531 | ||
8ad349bb A |
532 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ |
533 | /* \ | |
534 | * Mark pmap no longer in use by this cpu even if \ | |
535 | * pmap is locked against updates. \ | |
536 | */ \ | |
537 | i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \ | |
538 | i_bit_clear((my_cpu), &cpus_active); \ | |
539 | PMAP_REAL(my_cpu) = NULL; \ | |
1c79356b A |
540 | } |
541 | ||
542 | #define PMAP_ACTIVATE_MAP(map, my_cpu) { \ | |
55e303ae | 543 | register pmap_t tpmap; \ |
8ad349bb A |
544 | \ |
545 | tpmap = vm_map_pmap(map); \ | |
546 | if (tpmap == kernel_pmap) { \ | |
547 | /* \ | |
548 | * If this is the kernel pmap, switch to its page tables. \ | |
549 | */ \ | |
550 | set_dirbase(kernel_pmap, my_cpu); \ | |
551 | } \ | |
552 | else { \ | |
553 | /* \ | |
554 | * Let pmap updates proceed while we wait for this pmap. \ | |
555 | */ \ | |
556 | i_bit_clear((my_cpu), &cpus_active); \ | |
557 | \ | |
558 | /* \ | |
559 | * Lock the pmap to put this cpu in its active set. \ | |
560 | * Wait for updates here. \ | |
561 | */ \ | |
562 | simple_lock(&tpmap->lock); \ | |
563 | \ | |
564 | /* \ | |
565 | * No need to invalidate the TLB - the entire user pmap \ | |
566 | * will be invalidated by reloading dirbase. \ | |
567 | */ \ | |
568 | set_dirbase(tpmap, my_cpu); \ | |
569 | \ | |
570 | /* \ | |
571 | * Mark this cpu active - IPL will be lowered by \ | |
572 | * load_context(). \ | |
573 | */ \ | |
574 | i_bit_set((my_cpu), &cpus_active); \ | |
575 | \ | |
576 | simple_unlock(&tpmap->lock); \ | |
577 | } \ | |
1c79356b A |
578 | } |
579 | ||
580 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) | |
581 | ||
8ad349bb A |
582 | #define PMAP_ACTIVATE_USER(th, my_cpu) { \ |
583 | spl_t spl; \ | |
584 | \ | |
585 | spl = splhigh(); \ | |
586 | PMAP_ACTIVATE_MAP(th->map, my_cpu) \ | |
587 | splx(spl); \ | |
1c79356b A |
588 | } |
589 | ||
91447636 | 590 | #define PMAP_DEACTIVATE_USER(th, my_cpu) |
1c79356b A |
591 | |
592 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ | |
593 | spl_t spl; \ | |
8ad349bb | 594 | \ |
1c79356b | 595 | if (old_th->map != new_th->map) { \ |
8ad349bb | 596 | spl = splhigh(); \ |
1c79356b A |
597 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ |
598 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
8ad349bb | 599 | splx(spl); \ |
1c79356b A |
600 | } \ |
601 | } | |
602 | ||
603 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
604 | spl_t spl; \ | |
605 | \ | |
8ad349bb | 606 | spl = splhigh(); \ |
1c79356b A |
607 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ |
608 | th->map = new_map; \ | |
609 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
610 | splx(spl); \ | |
611 | } | |
612 | ||
1c79356b A |
613 | #define MARK_CPU_IDLE(my_cpu) { \ |
614 | /* \ | |
615 | * Mark this cpu idle, and remove it from the active set, \ | |
616 | * since it is not actively using any pmap. Signal_cpus \ | |
617 | * will notice that it is idle, and avoid signaling it, \ | |
618 | * but will queue the update request for when the cpu \ | |
619 | * becomes active. \ | |
620 | */ \ | |
621 | int s = splhigh(); \ | |
8ad349bb A |
622 | i_bit_set((my_cpu), &cpus_idle); \ |
623 | i_bit_clear((my_cpu), &cpus_active); \ | |
1c79356b | 624 | splx(s); \ |
8ad349bb | 625 | set_led(my_cpu); \ |
1c79356b A |
626 | } |
627 | ||
8ad349bb | 628 | #define MARK_CPU_ACTIVE(my_cpu) { \ |
1c79356b A |
629 | \ |
630 | int s = splhigh(); \ | |
631 | /* \ | |
632 | * If a kernel_pmap update was requested while this cpu \ | |
633 | * was idle, process it as if we got the interrupt. \ | |
634 | * Before doing so, remove this cpu from the idle set. \ | |
635 | * Since we do not grab any pmap locks while we flush \ | |
636 | * our TLB, another cpu may start an update operation \ | |
637 | * before we finish. Removing this cpu from the idle \ | |
638 | * set assures that we will receive another update \ | |
639 | * interrupt if this happens. \ | |
640 | */ \ | |
8ad349bb A |
641 | i_bit_clear((my_cpu), &cpus_idle); \ |
642 | \ | |
643 | if (cpu_update_needed(my_cpu)) \ | |
644 | pmap_update_interrupt(); \ | |
55e303ae | 645 | \ |
8ad349bb A |
646 | /* \ |
647 | * Mark that this cpu is now active. \ | |
648 | */ \ | |
649 | i_bit_set((my_cpu), &cpus_active); \ | |
1c79356b | 650 | splx(s); \ |
8ad349bb | 651 | clear_led(my_cpu); \ |
1c79356b A |
652 | } |
653 | ||
1c79356b A |
654 | #define PMAP_CONTEXT(pmap, thread) |
655 | ||
656 | #define pmap_kernel_va(VA) \ | |
8ad349bb | 657 | (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) |
1c79356b A |
658 | |
659 | #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) | |
1c79356b A |
660 | #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) |
661 | #define pmap_attribute(pmap,addr,size,attr,value) \ | |
662 | (KERN_INVALID_ADDRESS) | |
9bccf70c A |
663 | #define pmap_attribute_cache_sync(addr,size,attr,value) \ |
664 | (KERN_INVALID_ADDRESS) | |
765c9de3 | 665 | |
1c79356b A |
666 | #endif /* ASSEMBLER */ |
667 | ||
668 | #endif /* _PMAP_MACHINE_ */ |