]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.h
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: pmap.h
55 *
56 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
57 * Date: 1985
58 *
59 * Machine-dependent structures for the physical map module.
60 */
61
62 #ifndef _PMAP_MACHINE_
63 #define _PMAP_MACHINE_ 1
64
65 #ifndef ASSEMBLER
66
67 #include <platforms.h>
68
69 #include <mach/kern_return.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/vm_prot.h>
72 #include <mach/vm_statistics.h>
73 #include <mach/machine/vm_param.h>
74 #include <kern/kern_types.h>
75 #include <kern/thread_act.h>
76 #include <kern/lock.h>
77
78 /*
79 * Define the generic in terms of the specific
80 */
81
82 #define INTEL_PGBYTES I386_PGBYTES
83 #define INTEL_PGSHIFT I386_PGSHIFT
84 #define intel_btop(x) i386_btop(x)
85 #define intel_ptob(x) i386_ptob(x)
86 #define intel_round_page(x) i386_round_page(x)
87 #define intel_trunc_page(x) i386_trunc_page(x)
88 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
89 #define round_intel_to_vm(x) round_i386_to_vm(x)
90 #define vm_to_intel(x) vm_to_i386(x)
91
92 /*
93 * i386/i486/i860 Page Table Entry
94 */
95
96 typedef unsigned int pt_entry_t;
97 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
98
99 #endif /* ASSEMBLER */
100
101 #define INTEL_OFFMASK 0xfff /* offset within page */
102 #define PDESHIFT 22 /* page descriptor shift */
103 #define PDEMASK 0x3ff /* mask for page descriptor index */
104 #define PTESHIFT 12 /* page table shift */
105 #define PTEMASK 0x3ff /* mask for page table index */
106
107
108 #define VM_WIMG_COPYBACK VM_MEM_COHERENT
109 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
110 /* ?? intel ?? */
111 #define VM_WIMG_IO (VM_MEM_COHERENT | \
112 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
113 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
114 /* write combining mode, aka store gather */
115 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
116
117 /*
118 * Convert kernel virtual address to linear address
119 */
120
121 #define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
122
123 /*
124 * Convert address offset to page descriptor index
125 */
126 #define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
127 kvtolinear(a) : (a)) \
128 >> PDESHIFT) & PDEMASK)
129
130 /*
131 * Convert page descriptor index to user virtual address
132 */
133 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
134
135 /*
136 * Convert address offset to page table index
137 */
138 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
139
140 #define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
141 #define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
142
143 /*
144 * Hardware pte bit definitions (to be used directly on the ptes
145 * without using the bit fields).
146 */
147
148 #define INTEL_PTE_VALID 0x00000001
149 #define INTEL_PTE_WRITE 0x00000002
150 #define INTEL_PTE_USER 0x00000004
151 #define INTEL_PTE_WTHRU 0x00000008
152 #define INTEL_PTE_NCACHE 0x00000010
153 #define INTEL_PTE_REF 0x00000020
154 #define INTEL_PTE_MOD 0x00000040
155 #define INTEL_PTE_WIRED 0x00000200
156 #define INTEL_PTE_PFN 0xfffff000
157 #define INTEL_PTE_PTA 0x00000080
158
159 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
160 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
161 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
162
163 #define PMAP_DEFAULT_CACHE 0
164 #define PMAP_INHIBIT_CACHE 1
165 #define PMAP_GUARDED_CACHE 2
166 #define PMAP_ACTIVATE_CACHE 4
167 #define PMAP_NO_GUARD_CACHE 8
168
169
170 /*
171 * Convert page table entry to kernel virtual address
172 */
173 #define ptetokv(a) (phystokv(pte_to_pa(a)))
174
175 #ifndef ASSEMBLER
176 typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
177 /* changed by other processors */
178
179 struct pmap {
180 pt_entry_t *dirbase; /* page directory pointer register */
181 vm_offset_t pdirbase; /* phys. address of dirbase */
182 int ref_count; /* reference count */
183 decl_simple_lock_data(,lock) /* lock on map */
184 struct pmap_statistics stats; /* map statistics */
185 cpu_set cpus_using; /* bitmap of cpus using pmap */
186 };
187
188 /*
189 * Optimization avoiding some TLB flushes when switching to
190 * kernel-loaded threads. This is effective only for i386:
191 * Since user task, kernel task and kernel loaded tasks share the
192 * same virtual space (with appropriate protections), any pmap
193 * allows mapping kernel and kernel loaded tasks.
194 *
195 * The idea is to avoid switching to another pmap unnecessarily when
196 * switching to a kernel-loaded task, or when switching to the kernel
197 * itself.
198 *
199 * We store the pmap we are really using (from which we fetched the
200 * dirbase value) in real_pmap[cpu_number()].
201 *
202 * Invariant:
203 * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap.
204 */
205
206 extern struct pmap *real_pmap[NCPUS];
207
208 #include <i386/proc_reg.h>
209 /*
210 * If switching to the kernel pmap, don't incur the TLB cost of switching
211 * to its page tables, since all maps include the kernel map as a subset.
212 * Simply record that this CPU is logically on the kernel pmap (see
213 * pmap_destroy).
214 *
215 * Similarly, if switching to a pmap (other than kernel_pmap that is already
216 * in use, don't do anything to the hardware, to avoid a TLB flush.
217 */
218
219 #if NCPUS > 1
220 #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
221 #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
222 #else /* NCPUS > 1 */
223 #define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
224 #define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
225 #endif /* NCPUS > 1 */
226
227
228 #define set_dirbase(mypmap, my_cpu) { \
229 struct pmap **ppmap = &real_pmap[my_cpu]; \
230 vm_offset_t pdirbase = (mypmap)->pdirbase; \
231 \
232 if (*ppmap == (vm_offset_t)NULL) { \
233 *ppmap = (mypmap); \
234 PMAP_CPU_SET((mypmap), my_cpu); \
235 set_cr3(pdirbase); \
236 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
237 if (*ppmap != kernel_pmap) \
238 PMAP_CPU_CLR(*ppmap, my_cpu); \
239 *ppmap = (mypmap); \
240 PMAP_CPU_SET((mypmap), my_cpu); \
241 set_cr3(pdirbase); \
242 } \
243 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
244 }
245
246 #if NCPUS > 1
247 /*
248 * List of cpus that are actively using mapped memory. Any
249 * pmap update operation must wait for all cpus in this list.
250 * Update operations must still be queued to cpus not in this
251 * list.
252 */
253 extern cpu_set cpus_active;
254
255 /*
256 * List of cpus that are idle, but still operating, and will want
257 * to see any kernel pmap updates when they become active.
258 */
259 extern cpu_set cpus_idle;
260
261
262 /*
263 * Quick test for pmap update requests.
264 */
265 extern volatile
266 boolean_t cpu_update_needed[NCPUS];
267
268 /*
269 * External declarations for PMAP_ACTIVATE.
270 */
271
272 extern void process_pmap_updates(struct pmap *pmap);
273 extern void pmap_update_interrupt(void);
274 extern pmap_t kernel_pmap;
275
276 #endif /* NCPUS > 1 */
277
278 /*
279 * Machine dependent routines that are used only for i386/i486/i860.
280 */
281 extern vm_offset_t (phystokv)(
282 vm_offset_t pa);
283
284 extern vm_offset_t (kvtophys)(
285 vm_offset_t addr);
286
287 extern pt_entry_t *pmap_pte(
288 struct pmap *pmap,
289 vm_offset_t addr);
290
291 extern vm_offset_t pmap_map(
292 vm_offset_t virt,
293 vm_offset_t start,
294 vm_offset_t end,
295 vm_prot_t prot);
296
297 extern vm_offset_t pmap_map_bd(
298 vm_offset_t virt,
299 vm_offset_t start,
300 vm_offset_t end,
301 vm_prot_t prot);
302
303 extern void pmap_bootstrap(
304 vm_offset_t load_start);
305
306 extern boolean_t pmap_valid_page(
307 vm_offset_t pa);
308
309 extern int pmap_list_resident_pages(
310 struct pmap *pmap,
311 vm_offset_t *listp,
312 int space);
313
314 extern void flush_tlb(void);
315 extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
316 extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
317 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
318
319 /*
320 * Macros for speed.
321 */
322
323 #if NCPUS > 1
324
325 #include <kern/spl.h>
326
327 #if defined(PMAP_ACTIVATE_KERNEL)
328 #undef PMAP_ACTIVATE_KERNEL
329 #undef PMAP_DEACTIVATE_KERNEL
330 #undef PMAP_ACTIVATE_USER
331 #undef PMAP_DEACTIVATE_USER
332 #endif
333
334 /*
335 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
336 * fields to control TLB invalidation on other CPUS.
337 */
338
339 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
340 \
341 /* \
342 * Let pmap updates proceed while we wait for this pmap. \
343 */ \
344 i_bit_clear((my_cpu), &cpus_active); \
345 \
346 /* \
347 * Lock the pmap to put this cpu in its active set. \
348 * Wait for updates here. \
349 */ \
350 simple_lock(&kernel_pmap->lock); \
351 \
352 /* \
353 * Process invalidate requests for the kernel pmap. \
354 */ \
355 if (cpu_update_needed[(my_cpu)]) \
356 process_pmap_updates(kernel_pmap); \
357 \
358 /* \
359 * Mark that this cpu is using the pmap. \
360 */ \
361 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
362 \
363 /* \
364 * Mark this cpu active - IPL will be lowered by \
365 * load_context(). \
366 */ \
367 i_bit_set((my_cpu), &cpus_active); \
368 \
369 simple_unlock(&kernel_pmap->lock); \
370 }
371
372 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
373 /* \
374 * Mark pmap no longer in use by this cpu even if \
375 * pmap is locked against updates. \
376 */ \
377 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
378 }
379
380 #define PMAP_ACTIVATE_MAP(map, my_cpu) { \
381 register pmap_t tpmap; \
382 \
383 tpmap = vm_map_pmap(map); \
384 if (tpmap == kernel_pmap) { \
385 /* \
386 * If this is the kernel pmap, switch to its page tables. \
387 */ \
388 set_dirbase(kernel_pmap, my_cpu); \
389 } \
390 else { \
391 /* \
392 * Let pmap updates proceed while we wait for this pmap. \
393 */ \
394 i_bit_clear((my_cpu), &cpus_active); \
395 \
396 /* \
397 * Lock the pmap to put this cpu in its active set. \
398 * Wait for updates here. \
399 */ \
400 simple_lock(&tpmap->lock); \
401 \
402 /* \
403 * No need to invalidate the TLB - the entire user pmap \
404 * will be invalidated by reloading dirbase. \
405 */ \
406 set_dirbase(tpmap, my_cpu); \
407 \
408 /* \
409 * Mark this cpu active - IPL will be lowered by \
410 * load_context(). \
411 */ \
412 i_bit_set((my_cpu), &cpus_active); \
413 \
414 simple_unlock(&tpmap->lock); \
415 } \
416 }
417
418 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
419
420 #define PMAP_ACTIVATE_USER(th, my_cpu) { \
421 spl_t spl; \
422 \
423 spl = splhigh(); \
424 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
425 splx(spl); \
426 }
427
428 #define PMAP_DEACTIVATE_USER(th, my_cpu) { \
429 spl_t spl; \
430 \
431 spl = splhigh(); \
432 PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
433 splx(spl); \
434 }
435
436 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
437 spl_t spl; \
438 \
439 if (old_th->map != new_th->map) { \
440 spl = splhigh(); \
441 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
442 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
443 splx(spl); \
444 } \
445 }
446
447 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
448 spl_t spl; \
449 \
450 spl = splhigh(); \
451 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
452 th->map = new_map; \
453 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
454 splx(spl); \
455 }
456
457 #define MARK_CPU_IDLE(my_cpu) { \
458 /* \
459 * Mark this cpu idle, and remove it from the active set, \
460 * since it is not actively using any pmap. Signal_cpus \
461 * will notice that it is idle, and avoid signaling it, \
462 * but will queue the update request for when the cpu \
463 * becomes active. \
464 */ \
465 int s = splhigh(); \
466 i_bit_set((my_cpu), &cpus_idle); \
467 i_bit_clear((my_cpu), &cpus_active); \
468 splx(s); \
469 set_led(my_cpu); \
470 }
471
472 #define MARK_CPU_ACTIVE(my_cpu) { \
473 \
474 int s = splhigh(); \
475 /* \
476 * If a kernel_pmap update was requested while this cpu \
477 * was idle, process it as if we got the interrupt. \
478 * Before doing so, remove this cpu from the idle set. \
479 * Since we do not grab any pmap locks while we flush \
480 * our TLB, another cpu may start an update operation \
481 * before we finish. Removing this cpu from the idle \
482 * set assures that we will receive another update \
483 * interrupt if this happens. \
484 */ \
485 i_bit_clear((my_cpu), &cpus_idle); \
486 \
487 if (cpu_update_needed[(my_cpu)]) \
488 pmap_update_interrupt(); \
489 \
490 /* \
491 * Mark that this cpu is now active. \
492 */ \
493 i_bit_set((my_cpu), &cpus_active); \
494 splx(s); \
495 clear_led(my_cpu); \
496 }
497
498 #else /* NCPUS > 1 */
499
500 /*
501 * With only one CPU, we just have to indicate whether the pmap is
502 * in use.
503 */
504
505 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
506 kernel_pmap->cpus_using = TRUE; \
507 }
508
509 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
510 kernel_pmap->cpus_using = FALSE; \
511 }
512
513 #define PMAP_ACTIVATE_MAP(map, my_cpu) \
514 set_dirbase(vm_map_pmap(map), my_cpu)
515
516 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
517
518 #define PMAP_ACTIVATE_USER(th, my_cpu) \
519 PMAP_ACTIVATE_MAP(th->map, my_cpu)
520
521 #define PMAP_DEACTIVATE_USER(th, my_cpu) \
522 PMAP_DEACTIVATE_MAP(th->map, my_cpu)
523
524 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
525 if (old_th->map != new_th->map) { \
526 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
527 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
528 } \
529 }
530
531 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
532 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
533 th->map = new_map; \
534 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
535 }
536
537 #endif /* NCPUS > 1 */
538
539 #define PMAP_CONTEXT(pmap, thread)
540
541 #define pmap_kernel_va(VA) \
542 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
543
544 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
545 #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
546 #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
547 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
548 #define pmap_attribute(pmap,addr,size,attr,value) \
549 (KERN_INVALID_ADDRESS)
550 #define pmap_attribute_cache_sync(addr,size,attr,value) \
551 (KERN_INVALID_ADDRESS)
552
553 #endif /* ASSEMBLER */
554
555 #endif /* _PMAP_MACHINE_ */