]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.h
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
56 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
59 * Machine-dependent structures for the physical map module.
62 #ifndef _PMAP_MACHINE_
63 #define _PMAP_MACHINE_ 1
67 #include <platforms.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine/vm_types.h>
72 #include <mach/vm_prot.h>
73 #include <mach/vm_statistics.h>
74 #include <mach/machine/vm_param.h>
75 #include <kern/kern_types.h>
76 #include <kern/thread_act.h>
77 #include <kern/lock.h>
80 * Define the generic in terms of the specific
83 #define INTEL_PGBYTES I386_PGBYTES
84 #define INTEL_PGSHIFT I386_PGSHIFT
85 #define intel_btop(x) i386_btop(x)
86 #define intel_ptob(x) i386_ptob(x)
87 #define intel_round_page(x) i386_round_page(x)
88 #define intel_trunc_page(x) i386_trunc_page(x)
89 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
90 #define round_intel_to_vm(x) round_i386_to_vm(x)
91 #define vm_to_intel(x) vm_to_i386(x)
94 * i386/i486/i860 Page Table Entry
97 typedef unsigned int pt_entry_t
;
98 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
100 #endif /* ASSEMBLER */
102 #define INTEL_OFFMASK 0xfff /* offset within page */
103 #define PDESHIFT 22 /* page descriptor shift */
104 #define PDEMASK 0x3ff /* mask for page descriptor index */
105 #define PTESHIFT 12 /* page table shift */
106 #define PTEMASK 0x3ff /* mask for page table index */
109 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
112 * Convert kernel virtual address to linear address
115 #define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
118 * Convert address offset to page descriptor index
120 #define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
121 kvtolinear(a) : (a)) \
122 >> PDESHIFT) & PDEMASK)
125 * Convert page descriptor index to user virtual address
127 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
130 * Convert address offset to page table index
132 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
134 #define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
135 #define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
138 * Hardware pte bit definitions (to be used directly on the ptes
139 * without using the bit fields).
142 #define INTEL_PTE_VALID 0x00000001
143 #define INTEL_PTE_WRITE 0x00000002
144 #define INTEL_PTE_USER 0x00000004
145 #define INTEL_PTE_WTHRU 0x00000008
146 #define INTEL_PTE_NCACHE 0x00000010
147 #define INTEL_PTE_REF 0x00000020
148 #define INTEL_PTE_MOD 0x00000040
149 #define INTEL_PTE_WIRED 0x00000200
150 #define INTEL_PTE_PFN 0xfffff000
152 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
153 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
154 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
156 #define PMAP_DEFAULT_CACHE 0
157 #define PMAP_INHIBIT_CACHE 1
158 #define PMAP_GUARDED_CACHE 2
159 #define PMAP_ACTIVATE_CACHE 4
160 #define PMAP_NO_GUARD_CACHE 8
164 * Convert page table entry to kernel virtual address
166 #define ptetokv(a) (phystokv(pte_to_pa(a)))
169 typedef volatile long cpu_set
; /* set of CPUs - must be <= 32 */
170 /* changed by other processors */
173 pt_entry_t
*dirbase
; /* page directory pointer register */
174 vm_offset_t pdirbase
; /* phys. address of dirbase */
175 int ref_count
; /* reference count */
176 decl_simple_lock_data(,lock
) /* lock on map */
177 struct pmap_statistics stats
; /* map statistics */
178 cpu_set cpus_using
; /* bitmap of cpus using pmap */
182 * Optimization avoiding some TLB flushes when switching to
183 * kernel-loaded threads. This is effective only for i386:
184 * Since user task, kernel task and kernel loaded tasks share the
185 * same virtual space (with appropriate protections), any pmap
186 * allows mapping kernel and kernel loaded tasks.
188 * The idea is to avoid switching to another pmap unnecessarily when
189 * switching to a kernel-loaded task, or when switching to the kernel
192 * We store the pmap we are really using (from which we fetched the
193 * dirbase value) in real_pmap[cpu_number()].
196 * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap.
199 extern struct pmap
*real_pmap
[NCPUS
];
201 #include <i386/proc_reg.h>
203 * If switching to the kernel pmap, don't incur the TLB cost of switching
204 * to its page tables, since all maps include the kernel map as a subset.
205 * Simply record that this CPU is logically on the kernel pmap (see
208 * Similarly, if switching to a pmap (other than kernel_pmap that is already
209 * in use, don't do anything to the hardware, to avoid a TLB flush.
213 #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
214 #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
215 #else /* NCPUS > 1 */
216 #define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
217 #define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
218 #endif /* NCPUS > 1 */
221 #define set_dirbase(mypmap, my_cpu) { \
222 struct pmap **ppmap = &real_pmap[my_cpu]; \
223 vm_offset_t pdirbase = (mypmap)->pdirbase; \
225 if (*ppmap == (vm_offset_t)NULL) { \
227 PMAP_CPU_SET((mypmap), my_cpu); \
229 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
230 if (*ppmap != kernel_pmap) \
231 PMAP_CPU_CLR(*ppmap, my_cpu); \
233 PMAP_CPU_SET((mypmap), my_cpu); \
236 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
241 * List of cpus that are actively using mapped memory. Any
242 * pmap update operation must wait for all cpus in this list.
243 * Update operations must still be queued to cpus not in this
246 extern cpu_set cpus_active
;
249 * List of cpus that are idle, but still operating, and will want
250 * to see any kernel pmap updates when they become active.
252 extern cpu_set cpus_idle
;
256 * External declarations for PMAP_ACTIVATE.
259 extern void process_pmap_updates(struct pmap
*pmap
);
260 extern void pmap_update_interrupt(void);
262 #endif /* NCPUS > 1 */
265 * Machine dependent routines that are used only for i386/i486/i860.
267 extern vm_offset_t (phystokv
)(
270 extern vm_offset_t (kvtophys
)(
273 extern pt_entry_t
*pmap_pte(
277 extern vm_offset_t
pmap_map(
283 extern vm_offset_t
pmap_map_bd(
289 extern void pmap_bootstrap(
290 vm_offset_t load_start
);
292 extern boolean_t
pmap_valid_page(
295 extern int pmap_list_resident_pages(
300 extern void flush_tlb(void);
301 extern void invalidate_icache(vm_offset_t addr
, unsigned cnt
, int phys
);
302 extern void flush_dcache(vm_offset_t addr
, unsigned count
, int phys
);
311 #include <kern/spl.h>
314 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
315 * fields to control TLB invalidation on other CPUS.
318 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
321 * Let pmap updates proceed while we wait for this pmap. \
323 i_bit_clear((my_cpu), &cpus_active); \
326 * Lock the pmap to put this cpu in its active set. \
327 * Wait for updates here. \
329 simple_lock(&kernel_pmap->lock); \
332 * Mark that this cpu is using the pmap. \
334 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
337 * Mark this cpu active - IPL will be lowered by \
340 i_bit_set((my_cpu), &cpus_active); \
342 simple_unlock(&kernel_pmap->lock); \
345 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
347 * Mark pmap no longer in use by this cpu even if \
348 * pmap is locked against updates. \
350 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
353 #define PMAP_ACTIVATE_MAP(map, my_cpu) { \
354 register struct pmap *tpmap; \
356 tpmap = vm_map_pmap(map); \
357 if (tpmap == kernel_pmap) { \
359 * If this is the kernel pmap, switch to its page tables. \
361 set_dirbase(kernel_pmap, my_cpu); \
365 * Let pmap updates proceed while we wait for this pmap. \
367 i_bit_clear((my_cpu), &cpus_active); \
370 * Lock the pmap to put this cpu in its active set. \
371 * Wait for updates here. \
373 simple_lock(&tpmap->lock); \
376 * No need to invalidate the TLB - the entire user pmap \
377 * will be invalidated by reloading dirbase. \
379 set_dirbase(tpmap, my_cpu); \
382 * Mark this cpu active - IPL will be lowered by \
385 i_bit_set((my_cpu), &cpus_active); \
387 simple_unlock(&tpmap->lock); \
391 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
393 #define PMAP_ACTIVATE_USER(th, my_cpu) { \
397 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
401 #define PMAP_DEACTIVATE_USER(th, my_cpu) { \
405 PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
409 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
412 if (old_th->map != new_th->map) { \
414 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
415 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
420 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
424 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
426 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
432 #define clear_led(cpu)
435 #define MARK_CPU_IDLE(my_cpu) { \
437 * Mark this cpu idle, and remove it from the active set, \
438 * since it is not actively using any pmap. Signal_cpus \
439 * will notice that it is idle, and avoid signaling it, \
440 * but will queue the update request for when the cpu \
444 i_bit_set((my_cpu), &cpus_idle); \
445 i_bit_clear((my_cpu), &cpus_active); \
450 #define MARK_CPU_ACTIVE(my_cpu) { \
454 * If a kernel_pmap update was requested while this cpu \
455 * was idle, process it as if we got the interrupt. \
456 * Before doing so, remove this cpu from the idle set. \
457 * Since we do not grab any pmap locks while we flush \
458 * our TLB, another cpu may start an update operation \
459 * before we finish. Removing this cpu from the idle \
460 * set assures that we will receive another update \
461 * interrupt if this happens. \
463 i_bit_clear((my_cpu), &cpus_idle); \
466 * Mark that this cpu is now active. \
468 i_bit_set((my_cpu), &cpus_active); \
473 #else /* NCPUS > 1 */
476 * With only one CPU, we just have to indicate whether the pmap is
480 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
481 kernel_pmap->cpus_using = TRUE; \
484 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
485 kernel_pmap->cpus_using = FALSE; \
488 #define PMAP_ACTIVATE_MAP(map, my_cpu) \
489 set_dirbase(vm_map_pmap(map), my_cpu)
491 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
493 #define PMAP_ACTIVATE_USER(th, my_cpu) \
494 PMAP_ACTIVATE_MAP(th->map, my_cpu)
496 #define PMAP_DEACTIVATE_USER(th, my_cpu) \
497 PMAP_DEACTIVATE_MAP(th->map, my_cpu)
499 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
500 if (old_th->map != new_th->map) { \
501 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
502 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
506 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
507 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
509 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
512 #endif /* NCPUS > 1 */
514 #define PMAP_CONTEXT(pmap, thread)
516 #define pmap_kernel_va(VA) \
517 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
519 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
520 #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
521 #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
522 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
523 #define pmap_attribute(pmap,addr,size,attr,value) \
524 (KERN_INVALID_ADDRESS)
525 #define pmap_attribute_cache_sync(addr,size,attr,value) \
526 (KERN_INVALID_ADDRESS)
527 #define pmap_sync_caches_phys(pa) \
528 (KERN_INVALID_ADDRESS)
530 #endif /* ASSEMBLER */
532 #endif /* _PMAP_MACHINE_ */