2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
59 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
62 * Machine-dependent structures for the physical map module.
65 #ifndef _PMAP_MACHINE_
66 #define _PMAP_MACHINE_ 1
70 #include <platforms.h>
72 #include <mach/kern_return.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/vm_prot.h>
75 #include <mach/vm_statistics.h>
76 #include <mach/machine/vm_param.h>
77 #include <kern/kern_types.h>
78 #include <kern/thread_act.h>
79 #include <kern/lock.h>
82 * Define the generic in terms of the specific
85 #define INTEL_PGBYTES I386_PGBYTES
86 #define INTEL_PGSHIFT I386_PGSHIFT
87 #define intel_btop(x) i386_btop(x)
88 #define intel_ptob(x) i386_ptob(x)
89 #define intel_round_page(x) i386_round_page(x)
90 #define intel_trunc_page(x) i386_trunc_page(x)
91 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
92 #define round_intel_to_vm(x) round_i386_to_vm(x)
93 #define vm_to_intel(x) vm_to_i386(x)
96 * i386/i486/i860 Page Table Entry
99 typedef unsigned int pt_entry_t
;
100 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
102 #endif /* ASSEMBLER */
104 #define INTEL_OFFMASK 0xfff /* offset within page */
105 #define PDESHIFT 22 /* page descriptor shift */
106 #define PDEMASK 0x3ff /* mask for page descriptor index */
107 #define PTESHIFT 12 /* page table shift */
108 #define PTEMASK 0x3ff /* mask for page table index */
111 #define VM_WIMG_COPYBACK VM_MEM_COHERENT
112 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
114 #define VM_WIMG_IO (VM_MEM_COHERENT | \
115 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
116 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
117 /* write combining mode, aka store gather */
118 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
121 * Convert kernel virtual address to linear address
124 #define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
127 * Convert address offset to page descriptor index
129 #define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
130 kvtolinear(a) : (a)) \
131 >> PDESHIFT) & PDEMASK)
134 * Convert page descriptor index to user virtual address
136 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
139 * Convert address offset to page table index
141 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
143 #define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
144 #define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
147 * Hardware pte bit definitions (to be used directly on the ptes
148 * without using the bit fields).
151 #define INTEL_PTE_VALID 0x00000001
152 #define INTEL_PTE_WRITE 0x00000002
153 #define INTEL_PTE_USER 0x00000004
154 #define INTEL_PTE_WTHRU 0x00000008
155 #define INTEL_PTE_NCACHE 0x00000010
156 #define INTEL_PTE_REF 0x00000020
157 #define INTEL_PTE_MOD 0x00000040
158 #define INTEL_PTE_WIRED 0x00000200
159 #define INTEL_PTE_PFN 0xfffff000
160 #define INTEL_PTE_PTA 0x00000080
162 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
163 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
164 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
166 #define PMAP_DEFAULT_CACHE 0
167 #define PMAP_INHIBIT_CACHE 1
168 #define PMAP_GUARDED_CACHE 2
169 #define PMAP_ACTIVATE_CACHE 4
170 #define PMAP_NO_GUARD_CACHE 8
174 * Convert page table entry to kernel virtual address
176 #define ptetokv(a) (phystokv(pte_to_pa(a)))
179 typedef volatile long cpu_set
; /* set of CPUs - must be <= 32 */
180 /* changed by other processors */
183 pt_entry_t
*dirbase
; /* page directory pointer register */
184 vm_offset_t pdirbase
; /* phys. address of dirbase */
185 int ref_count
; /* reference count */
186 decl_simple_lock_data(,lock
) /* lock on map */
187 struct pmap_statistics stats
; /* map statistics */
188 cpu_set cpus_using
; /* bitmap of cpus using pmap */
192 * Optimization avoiding some TLB flushes when switching to
193 * kernel-loaded threads. This is effective only for i386:
194 * Since user task, kernel task and kernel loaded tasks share the
195 * same virtual space (with appropriate protections), any pmap
196 * allows mapping kernel and kernel loaded tasks.
198 * The idea is to avoid switching to another pmap unnecessarily when
199 * switching to a kernel-loaded task, or when switching to the kernel
202 * We store the pmap we are really using (from which we fetched the
203 * dirbase value) in real_pmap[cpu_number()].
206 * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap.
209 extern struct pmap
*real_pmap
[NCPUS
];
211 #include <i386/proc_reg.h>
213 * If switching to the kernel pmap, don't incur the TLB cost of switching
214 * to its page tables, since all maps include the kernel map as a subset.
215 * Simply record that this CPU is logically on the kernel pmap (see
218 * Similarly, if switching to a pmap (other than kernel_pmap that is already
219 * in use, don't do anything to the hardware, to avoid a TLB flush.
223 #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
224 #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
225 #else /* NCPUS > 1 */
226 #define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
227 #define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
228 #endif /* NCPUS > 1 */
231 #define set_dirbase(mypmap, my_cpu) { \
232 struct pmap **ppmap = &real_pmap[my_cpu]; \
233 vm_offset_t pdirbase = (mypmap)->pdirbase; \
235 if (*ppmap == (vm_offset_t)NULL) { \
237 PMAP_CPU_SET((mypmap), my_cpu); \
239 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
240 if (*ppmap != kernel_pmap) \
241 PMAP_CPU_CLR(*ppmap, my_cpu); \
243 PMAP_CPU_SET((mypmap), my_cpu); \
246 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
251 * List of cpus that are actively using mapped memory. Any
252 * pmap update operation must wait for all cpus in this list.
253 * Update operations must still be queued to cpus not in this
256 extern cpu_set cpus_active
;
259 * List of cpus that are idle, but still operating, and will want
260 * to see any kernel pmap updates when they become active.
262 extern cpu_set cpus_idle
;
266 * Quick test for pmap update requests.
269 boolean_t cpu_update_needed
[NCPUS
];
272 * External declarations for PMAP_ACTIVATE.
275 extern void process_pmap_updates(struct pmap
*pmap
);
276 extern void pmap_update_interrupt(void);
277 extern pmap_t kernel_pmap
;
279 #endif /* NCPUS > 1 */
282 * Machine dependent routines that are used only for i386/i486/i860.
284 extern vm_offset_t (phystokv
)(
287 extern vm_offset_t (kvtophys
)(
290 extern pt_entry_t
*pmap_pte(
294 extern vm_offset_t
pmap_map(
300 extern vm_offset_t
pmap_map_bd(
306 extern void pmap_bootstrap(
307 vm_offset_t load_start
);
309 extern boolean_t
pmap_valid_page(
312 extern int pmap_list_resident_pages(
317 extern void flush_tlb(void);
318 extern void invalidate_icache(vm_offset_t addr
, unsigned cnt
, int phys
);
319 extern void flush_dcache(vm_offset_t addr
, unsigned count
, int phys
);
320 extern ppnum_t
pmap_find_phys(pmap_t map
, addr64_t va
);
328 #include <kern/spl.h>
330 #if defined(PMAP_ACTIVATE_KERNEL)
331 #undef PMAP_ACTIVATE_KERNEL
332 #undef PMAP_DEACTIVATE_KERNEL
333 #undef PMAP_ACTIVATE_USER
334 #undef PMAP_DEACTIVATE_USER
338 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
339 * fields to control TLB invalidation on other CPUS.
342 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
345 * Let pmap updates proceed while we wait for this pmap. \
347 i_bit_clear((my_cpu), &cpus_active); \
350 * Lock the pmap to put this cpu in its active set. \
351 * Wait for updates here. \
353 simple_lock(&kernel_pmap->lock); \
356 * Process invalidate requests for the kernel pmap. \
358 if (cpu_update_needed[(my_cpu)]) \
359 process_pmap_updates(kernel_pmap); \
362 * Mark that this cpu is using the pmap. \
364 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
367 * Mark this cpu active - IPL will be lowered by \
370 i_bit_set((my_cpu), &cpus_active); \
372 simple_unlock(&kernel_pmap->lock); \
375 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
377 * Mark pmap no longer in use by this cpu even if \
378 * pmap is locked against updates. \
380 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
383 #define PMAP_ACTIVATE_MAP(map, my_cpu) { \
384 register pmap_t tpmap; \
386 tpmap = vm_map_pmap(map); \
387 if (tpmap == kernel_pmap) { \
389 * If this is the kernel pmap, switch to its page tables. \
391 set_dirbase(kernel_pmap, my_cpu); \
395 * Let pmap updates proceed while we wait for this pmap. \
397 i_bit_clear((my_cpu), &cpus_active); \
400 * Lock the pmap to put this cpu in its active set. \
401 * Wait for updates here. \
403 simple_lock(&tpmap->lock); \
406 * No need to invalidate the TLB - the entire user pmap \
407 * will be invalidated by reloading dirbase. \
409 set_dirbase(tpmap, my_cpu); \
412 * Mark this cpu active - IPL will be lowered by \
415 i_bit_set((my_cpu), &cpus_active); \
417 simple_unlock(&tpmap->lock); \
421 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
423 #define PMAP_ACTIVATE_USER(th, my_cpu) { \
427 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
431 #define PMAP_DEACTIVATE_USER(th, my_cpu) { \
435 PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
439 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
442 if (old_th->map != new_th->map) { \
444 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
445 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
450 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
454 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
456 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
460 #define MARK_CPU_IDLE(my_cpu) { \
462 * Mark this cpu idle, and remove it from the active set, \
463 * since it is not actively using any pmap. Signal_cpus \
464 * will notice that it is idle, and avoid signaling it, \
465 * but will queue the update request for when the cpu \
469 i_bit_set((my_cpu), &cpus_idle); \
470 i_bit_clear((my_cpu), &cpus_active); \
475 #define MARK_CPU_ACTIVE(my_cpu) { \
479 * If a kernel_pmap update was requested while this cpu \
480 * was idle, process it as if we got the interrupt. \
481 * Before doing so, remove this cpu from the idle set. \
482 * Since we do not grab any pmap locks while we flush \
483 * our TLB, another cpu may start an update operation \
484 * before we finish. Removing this cpu from the idle \
485 * set assures that we will receive another update \
486 * interrupt if this happens. \
488 i_bit_clear((my_cpu), &cpus_idle); \
490 if (cpu_update_needed[(my_cpu)]) \
491 pmap_update_interrupt(); \
494 * Mark that this cpu is now active. \
496 i_bit_set((my_cpu), &cpus_active); \
501 #else /* NCPUS > 1 */
504 * With only one CPU, we just have to indicate whether the pmap is
508 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
509 kernel_pmap->cpus_using = TRUE; \
512 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
513 kernel_pmap->cpus_using = FALSE; \
516 #define PMAP_ACTIVATE_MAP(map, my_cpu) \
517 set_dirbase(vm_map_pmap(map), my_cpu)
519 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
521 #define PMAP_ACTIVATE_USER(th, my_cpu) \
522 PMAP_ACTIVATE_MAP(th->map, my_cpu)
524 #define PMAP_DEACTIVATE_USER(th, my_cpu) \
525 PMAP_DEACTIVATE_MAP(th->map, my_cpu)
527 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
528 if (old_th->map != new_th->map) { \
529 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
530 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
534 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
535 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
537 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
540 #endif /* NCPUS > 1 */
542 #define PMAP_CONTEXT(pmap, thread)
544 #define pmap_kernel_va(VA) \
545 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
547 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
548 #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
549 #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
550 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
551 #define pmap_attribute(pmap,addr,size,attr,value) \
552 (KERN_INVALID_ADDRESS)
553 #define pmap_attribute_cache_sync(addr,size,attr,value) \
554 (KERN_INVALID_ADDRESS)
556 #endif /* ASSEMBLER */
558 #endif /* _PMAP_MACHINE_ */