]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | ||
53 | /* | |
54 | * File: pmap.h | |
55 | * | |
56 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
57 | * Date: 1985 | |
58 | * | |
59 | * Machine-dependent structures for the physical map module. | |
60 | */ | |
61 | ||
62 | #ifndef _PMAP_MACHINE_ | |
63 | #define _PMAP_MACHINE_ 1 | |
64 | ||
65 | #ifndef ASSEMBLER | |
66 | ||
67 | #include <platforms.h> | |
68 | #include <mp_v1_1.h> | |
69 | ||
70 | #include <mach/kern_return.h> | |
71 | #include <mach/machine/vm_types.h> | |
72 | #include <mach/vm_prot.h> | |
73 | #include <mach/vm_statistics.h> | |
74 | #include <mach/machine/vm_param.h> | |
75 | #include <kern/kern_types.h> | |
76 | #include <kern/thread_act.h> | |
77 | #include <kern/lock.h> | |
78 | ||
79 | /* | |
80 | * Define the generic in terms of the specific | |
81 | */ | |
82 | ||
83 | #define INTEL_PGBYTES I386_PGBYTES | |
84 | #define INTEL_PGSHIFT I386_PGSHIFT | |
85 | #define intel_btop(x) i386_btop(x) | |
86 | #define intel_ptob(x) i386_ptob(x) | |
87 | #define intel_round_page(x) i386_round_page(x) | |
88 | #define intel_trunc_page(x) i386_trunc_page(x) | |
89 | #define trunc_intel_to_vm(x) trunc_i386_to_vm(x) | |
90 | #define round_intel_to_vm(x) round_i386_to_vm(x) | |
91 | #define vm_to_intel(x) vm_to_i386(x) | |
92 | ||
93 | /* | |
94 | * i386/i486/i860 Page Table Entry | |
95 | */ | |
96 | ||
97 | typedef unsigned int pt_entry_t; | |
98 | #define PT_ENTRY_NULL ((pt_entry_t *) 0) | |
99 | ||
100 | #endif /* ASSEMBLER */ | |
101 | ||
102 | #define INTEL_OFFMASK 0xfff /* offset within page */ | |
103 | #define PDESHIFT 22 /* page descriptor shift */ | |
104 | #define PDEMASK 0x3ff /* mask for page descriptor index */ | |
105 | #define PTESHIFT 12 /* page table shift */ | |
106 | #define PTEMASK 0x3ff /* mask for page table index */ | |
107 | ||
9bccf70c A |
108 | |
109 | #define VM_WIMG_DEFAULT VM_MEM_COHERENT | |
110 | ||
1c79356b A |
111 | /* |
112 | * Convert kernel virtual address to linear address | |
113 | */ | |
114 | ||
115 | #define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS) | |
116 | ||
117 | /* | |
118 | * Convert address offset to page descriptor index | |
119 | */ | |
120 | #define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \ | |
121 | kvtolinear(a) : (a)) \ | |
122 | >> PDESHIFT) & PDEMASK) | |
123 | ||
124 | /* | |
125 | * Convert page descriptor index to user virtual address | |
126 | */ | |
127 | #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) | |
128 | ||
129 | /* | |
130 | * Convert address offset to page table index | |
131 | */ | |
132 | #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK) | |
133 | ||
134 | #define NPTES (intel_ptob(1)/sizeof(pt_entry_t)) | |
135 | #define NPDES (intel_ptob(1)/sizeof(pt_entry_t)) | |
136 | ||
137 | /* | |
138 | * Hardware pte bit definitions (to be used directly on the ptes | |
139 | * without using the bit fields). | |
140 | */ | |
141 | ||
142 | #define INTEL_PTE_VALID 0x00000001 | |
143 | #define INTEL_PTE_WRITE 0x00000002 | |
144 | #define INTEL_PTE_USER 0x00000004 | |
145 | #define INTEL_PTE_WTHRU 0x00000008 | |
146 | #define INTEL_PTE_NCACHE 0x00000010 | |
147 | #define INTEL_PTE_REF 0x00000020 | |
148 | #define INTEL_PTE_MOD 0x00000040 | |
149 | #define INTEL_PTE_WIRED 0x00000200 | |
150 | #define INTEL_PTE_PFN 0xfffff000 | |
151 | ||
152 | #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) | |
153 | #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) | |
154 | #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) | |
155 | ||
9bccf70c A |
156 | #define PMAP_DEFAULT_CACHE 0 |
157 | #define PMAP_INHIBIT_CACHE 1 | |
158 | #define PMAP_GUARDED_CACHE 2 | |
159 | #define PMAP_ACTIVATE_CACHE 4 | |
160 | #define PMAP_NO_GUARD_CACHE 8 | |
161 | ||
162 | ||
1c79356b A |
163 | /* |
164 | * Convert page table entry to kernel virtual address | |
165 | */ | |
166 | #define ptetokv(a) (phystokv(pte_to_pa(a))) | |
167 | ||
168 | #ifndef ASSEMBLER | |
169 | typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ | |
170 | /* changed by other processors */ | |
171 | ||
172 | struct pmap { | |
173 | pt_entry_t *dirbase; /* page directory pointer register */ | |
174 | vm_offset_t pdirbase; /* phys. address of dirbase */ | |
175 | int ref_count; /* reference count */ | |
176 | decl_simple_lock_data(,lock) /* lock on map */ | |
177 | struct pmap_statistics stats; /* map statistics */ | |
178 | cpu_set cpus_using; /* bitmap of cpus using pmap */ | |
179 | }; | |
180 | ||
181 | /* | |
182 | * Optimization avoiding some TLB flushes when switching to | |
183 | * kernel-loaded threads. This is effective only for i386: | |
184 | * Since user task, kernel task and kernel loaded tasks share the | |
185 | * same virtual space (with appropriate protections), any pmap | |
186 | * allows mapping kernel and kernel loaded tasks. | |
187 | * | |
188 | * The idea is to avoid switching to another pmap unnecessarily when | |
189 | * switching to a kernel-loaded task, or when switching to the kernel | |
190 | * itself. | |
191 | * | |
192 | * We store the pmap we are really using (from which we fetched the | |
193 | * dirbase value) in real_pmap[cpu_number()]. | |
194 | * | |
195 | * Invariant: | |
196 | * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap. | |
197 | */ | |
198 | ||
199 | extern struct pmap *real_pmap[NCPUS]; | |
200 | ||
201 | #include <i386/proc_reg.h> | |
202 | /* | |
203 | * If switching to the kernel pmap, don't incur the TLB cost of switching | |
204 | * to its page tables, since all maps include the kernel map as a subset. | |
205 | * Simply record that this CPU is logically on the kernel pmap (see | |
206 | * pmap_destroy). | |
207 | * | |
208 | * Similarly, if switching to a pmap (other than kernel_pmap that is already | |
209 | * in use, don't do anything to the hardware, to avoid a TLB flush. | |
210 | */ | |
211 | ||
212 | #if NCPUS > 1 | |
213 | #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using)) | |
214 | #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using)) | |
215 | #else /* NCPUS > 1 */ | |
216 | #define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE | |
217 | #define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE | |
218 | #endif /* NCPUS > 1 */ | |
219 | ||
220 | ||
221 | #define set_dirbase(mypmap, my_cpu) { \ | |
222 | struct pmap **ppmap = &real_pmap[my_cpu]; \ | |
223 | vm_offset_t pdirbase = (mypmap)->pdirbase; \ | |
224 | \ | |
225 | if (*ppmap == (vm_offset_t)NULL) { \ | |
226 | *ppmap = (mypmap); \ | |
227 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
228 | set_cr3(pdirbase); \ | |
229 | } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \ | |
230 | if (*ppmap != kernel_pmap) \ | |
231 | PMAP_CPU_CLR(*ppmap, my_cpu); \ | |
232 | *ppmap = (mypmap); \ | |
233 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
234 | set_cr3(pdirbase); \ | |
235 | } \ | |
236 | assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \ | |
237 | } | |
238 | ||
239 | #if NCPUS > 1 | |
240 | /* | |
241 | * List of cpus that are actively using mapped memory. Any | |
242 | * pmap update operation must wait for all cpus in this list. | |
243 | * Update operations must still be queued to cpus not in this | |
244 | * list. | |
245 | */ | |
246 | extern cpu_set cpus_active; | |
247 | ||
248 | /* | |
249 | * List of cpus that are idle, but still operating, and will want | |
250 | * to see any kernel pmap updates when they become active. | |
251 | */ | |
252 | extern cpu_set cpus_idle; | |
253 | ||
254 | ||
255 | /* | |
256 | * External declarations for PMAP_ACTIVATE. | |
257 | */ | |
258 | ||
259 | extern void process_pmap_updates(struct pmap *pmap); | |
260 | extern void pmap_update_interrupt(void); | |
261 | ||
262 | #endif /* NCPUS > 1 */ | |
263 | ||
264 | /* | |
265 | * Machine dependent routines that are used only for i386/i486/i860. | |
266 | */ | |
267 | extern vm_offset_t (phystokv)( | |
268 | vm_offset_t pa); | |
269 | ||
270 | extern vm_offset_t (kvtophys)( | |
271 | vm_offset_t addr); | |
272 | ||
273 | extern pt_entry_t *pmap_pte( | |
274 | struct pmap *pmap, | |
275 | vm_offset_t addr); | |
276 | ||
277 | extern vm_offset_t pmap_map( | |
278 | vm_offset_t virt, | |
279 | vm_offset_t start, | |
280 | vm_offset_t end, | |
281 | vm_prot_t prot); | |
282 | ||
283 | extern vm_offset_t pmap_map_bd( | |
284 | vm_offset_t virt, | |
285 | vm_offset_t start, | |
286 | vm_offset_t end, | |
287 | vm_prot_t prot); | |
288 | ||
289 | extern void pmap_bootstrap( | |
290 | vm_offset_t load_start); | |
291 | ||
292 | extern boolean_t pmap_valid_page( | |
293 | vm_offset_t pa); | |
294 | ||
295 | extern int pmap_list_resident_pages( | |
296 | struct pmap *pmap, | |
297 | vm_offset_t *listp, | |
298 | int space); | |
299 | ||
300 | extern void flush_tlb(void); | |
301 | extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); | |
302 | extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); | |
303 | ||
304 | ||
305 | /* | |
306 | * Macros for speed. | |
307 | */ | |
308 | ||
309 | #if NCPUS > 1 | |
310 | ||
311 | #include <kern/spl.h> | |
312 | ||
313 | /* | |
314 | * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage | |
315 | * fields to control TLB invalidation on other CPUS. | |
316 | */ | |
317 | ||
318 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ | |
319 | \ | |
320 | /* \ | |
321 | * Let pmap updates proceed while we wait for this pmap. \ | |
322 | */ \ | |
323 | i_bit_clear((my_cpu), &cpus_active); \ | |
324 | \ | |
325 | /* \ | |
326 | * Lock the pmap to put this cpu in its active set. \ | |
327 | * Wait for updates here. \ | |
328 | */ \ | |
329 | simple_lock(&kernel_pmap->lock); \ | |
330 | \ | |
331 | /* \ | |
332 | * Mark that this cpu is using the pmap. \ | |
333 | */ \ | |
334 | i_bit_set((my_cpu), &kernel_pmap->cpus_using); \ | |
335 | \ | |
336 | /* \ | |
337 | * Mark this cpu active - IPL will be lowered by \ | |
338 | * load_context(). \ | |
339 | */ \ | |
340 | i_bit_set((my_cpu), &cpus_active); \ | |
341 | \ | |
342 | simple_unlock(&kernel_pmap->lock); \ | |
343 | } | |
344 | ||
345 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ | |
346 | /* \ | |
347 | * Mark pmap no longer in use by this cpu even if \ | |
348 | * pmap is locked against updates. \ | |
349 | */ \ | |
350 | i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \ | |
351 | } | |
352 | ||
353 | #define PMAP_ACTIVATE_MAP(map, my_cpu) { \ | |
354 | register struct pmap *tpmap; \ | |
355 | \ | |
356 | tpmap = vm_map_pmap(map); \ | |
357 | if (tpmap == kernel_pmap) { \ | |
358 | /* \ | |
359 | * If this is the kernel pmap, switch to its page tables. \ | |
360 | */ \ | |
361 | set_dirbase(kernel_pmap, my_cpu); \ | |
362 | } \ | |
363 | else { \ | |
364 | /* \ | |
365 | * Let pmap updates proceed while we wait for this pmap. \ | |
366 | */ \ | |
367 | i_bit_clear((my_cpu), &cpus_active); \ | |
368 | \ | |
369 | /* \ | |
370 | * Lock the pmap to put this cpu in its active set. \ | |
371 | * Wait for updates here. \ | |
372 | */ \ | |
373 | simple_lock(&tpmap->lock); \ | |
374 | \ | |
375 | /* \ | |
376 | * No need to invalidate the TLB - the entire user pmap \ | |
377 | * will be invalidated by reloading dirbase. \ | |
378 | */ \ | |
379 | set_dirbase(tpmap, my_cpu); \ | |
380 | \ | |
381 | /* \ | |
382 | * Mark this cpu active - IPL will be lowered by \ | |
383 | * load_context(). \ | |
384 | */ \ | |
385 | i_bit_set((my_cpu), &cpus_active); \ | |
386 | \ | |
387 | simple_unlock(&tpmap->lock); \ | |
388 | } \ | |
389 | } | |
390 | ||
391 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) | |
392 | ||
393 | #define PMAP_ACTIVATE_USER(th, my_cpu) { \ | |
394 | spl_t spl; \ | |
395 | \ | |
396 | spl = splhigh(); \ | |
397 | PMAP_ACTIVATE_MAP(th->map, my_cpu) \ | |
398 | splx(spl); \ | |
399 | } | |
400 | ||
401 | #define PMAP_DEACTIVATE_USER(th, my_cpu) { \ | |
402 | spl_t spl; \ | |
403 | \ | |
404 | spl = splhigh(); \ | |
405 | PMAP_DEACTIVATE_MAP(th->map, my_cpu) \ | |
406 | splx(spl); \ | |
407 | } | |
408 | ||
409 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ | |
410 | spl_t spl; \ | |
411 | \ | |
412 | if (old_th->map != new_th->map) { \ | |
413 | spl = splhigh(); \ | |
414 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ | |
415 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
416 | splx(spl); \ | |
417 | } \ | |
418 | } | |
419 | ||
420 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
421 | spl_t spl; \ | |
422 | \ | |
423 | spl = splhigh(); \ | |
424 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ | |
425 | th->map = new_map; \ | |
426 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
427 | splx(spl); \ | |
428 | } | |
429 | ||
430 | #if MP_V1_1 | |
431 | #define set_led(cpu) | |
432 | #define clear_led(cpu) | |
433 | #endif /* MP_V1_1 */ | |
434 | ||
435 | #define MARK_CPU_IDLE(my_cpu) { \ | |
436 | /* \ | |
437 | * Mark this cpu idle, and remove it from the active set, \ | |
438 | * since it is not actively using any pmap. Signal_cpus \ | |
439 | * will notice that it is idle, and avoid signaling it, \ | |
440 | * but will queue the update request for when the cpu \ | |
441 | * becomes active. \ | |
442 | */ \ | |
443 | int s = splhigh(); \ | |
444 | i_bit_set((my_cpu), &cpus_idle); \ | |
445 | i_bit_clear((my_cpu), &cpus_active); \ | |
446 | splx(s); \ | |
447 | set_led(my_cpu); \ | |
448 | } | |
449 | ||
450 | #define MARK_CPU_ACTIVE(my_cpu) { \ | |
451 | \ | |
452 | int s = splhigh(); \ | |
453 | /* \ | |
454 | * If a kernel_pmap update was requested while this cpu \ | |
455 | * was idle, process it as if we got the interrupt. \ | |
456 | * Before doing so, remove this cpu from the idle set. \ | |
457 | * Since we do not grab any pmap locks while we flush \ | |
458 | * our TLB, another cpu may start an update operation \ | |
459 | * before we finish. Removing this cpu from the idle \ | |
460 | * set assures that we will receive another update \ | |
461 | * interrupt if this happens. \ | |
462 | */ \ | |
463 | i_bit_clear((my_cpu), &cpus_idle); \ | |
464 | \ | |
465 | /* \ | |
466 | * Mark that this cpu is now active. \ | |
467 | */ \ | |
468 | i_bit_set((my_cpu), &cpus_active); \ | |
469 | splx(s); \ | |
470 | clear_led(my_cpu); \ | |
471 | } | |
472 | ||
473 | #else /* NCPUS > 1 */ | |
474 | ||
475 | /* | |
476 | * With only one CPU, we just have to indicate whether the pmap is | |
477 | * in use. | |
478 | */ | |
479 | ||
480 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ | |
481 | kernel_pmap->cpus_using = TRUE; \ | |
482 | } | |
483 | ||
484 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ | |
485 | kernel_pmap->cpus_using = FALSE; \ | |
486 | } | |
487 | ||
488 | #define PMAP_ACTIVATE_MAP(map, my_cpu) \ | |
489 | set_dirbase(vm_map_pmap(map), my_cpu) | |
490 | ||
491 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) | |
492 | ||
493 | #define PMAP_ACTIVATE_USER(th, my_cpu) \ | |
494 | PMAP_ACTIVATE_MAP(th->map, my_cpu) | |
495 | ||
496 | #define PMAP_DEACTIVATE_USER(th, my_cpu) \ | |
497 | PMAP_DEACTIVATE_MAP(th->map, my_cpu) | |
498 | ||
499 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ | |
500 | if (old_th->map != new_th->map) { \ | |
501 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ | |
502 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
503 | } \ | |
504 | } | |
505 | ||
506 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
507 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ | |
508 | th->map = new_map; \ | |
509 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
510 | } | |
511 | ||
512 | #endif /* NCPUS > 1 */ | |
513 | ||
514 | #define PMAP_CONTEXT(pmap, thread) | |
515 | ||
516 | #define pmap_kernel_va(VA) \ | |
517 | (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) | |
518 | ||
519 | #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) | |
520 | #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame))) | |
521 | #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys))) | |
522 | #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) | |
523 | #define pmap_attribute(pmap,addr,size,attr,value) \ | |
524 | (KERN_INVALID_ADDRESS) | |
9bccf70c A |
525 | #define pmap_attribute_cache_sync(addr,size,attr,value) \ |
526 | (KERN_INVALID_ADDRESS) | |
765c9de3 A |
527 | #define pmap_sync_caches_phys(pa) \ |
528 | (KERN_INVALID_ADDRESS) | |
529 | ||
1c79356b A |
530 | #endif /* ASSEMBLER */ |
531 | ||
532 | #endif /* _PMAP_MACHINE_ */ |