]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap.h
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55
56/*
57 * File: pmap.h
58 *
59 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
60 * Date: 1985
61 *
62 * Machine-dependent structures for the physical map module.
63 */
64
65#ifndef _PMAP_MACHINE_
66#define _PMAP_MACHINE_ 1
67
68#ifndef ASSEMBLER
69
70#include <platforms.h>
71#include <mp_v1_1.h>
72
73#include <mach/kern_return.h>
74#include <mach/machine/vm_types.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_statistics.h>
77#include <mach/machine/vm_param.h>
78#include <kern/kern_types.h>
79#include <kern/thread_act.h>
80#include <kern/lock.h>
81
82/*
83 * Define the generic in terms of the specific
84 */
85
86#define INTEL_PGBYTES I386_PGBYTES
87#define INTEL_PGSHIFT I386_PGSHIFT
88#define intel_btop(x) i386_btop(x)
89#define intel_ptob(x) i386_ptob(x)
90#define intel_round_page(x) i386_round_page(x)
91#define intel_trunc_page(x) i386_trunc_page(x)
92#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
93#define round_intel_to_vm(x) round_i386_to_vm(x)
94#define vm_to_intel(x) vm_to_i386(x)
95
96/*
97 * i386/i486/i860 Page Table Entry
98 */
99
100typedef unsigned int pt_entry_t;
101#define PT_ENTRY_NULL ((pt_entry_t *) 0)
102
103#endif /* ASSEMBLER */
104
105#define INTEL_OFFMASK 0xfff /* offset within page */
106#define PDESHIFT 22 /* page descriptor shift */
107#define PDEMASK 0x3ff /* mask for page descriptor index */
108#define PTESHIFT 12 /* page table shift */
109#define PTEMASK 0x3ff /* mask for page table index */
110
9bccf70c 111
d7e50217 112#define VM_WIMG_COPYBACK VM_MEM_COHERENT
9bccf70c 113#define VM_WIMG_DEFAULT VM_MEM_COHERENT
d7e50217
A
114/* ?? intel ?? */
115#define VM_WIMG_IO (VM_MEM_COHERENT | \
116 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
117#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
118/* write combining mode, aka store gather */
119#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
9bccf70c 120
1c79356b
A
121/*
122 * Convert kernel virtual address to linear address
123 */
124
125#define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
126
127/*
128 * Convert address offset to page descriptor index
129 */
130#define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
131 kvtolinear(a) : (a)) \
132 >> PDESHIFT) & PDEMASK)
133
134/*
135 * Convert page descriptor index to user virtual address
136 */
137#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
138
139/*
140 * Convert address offset to page table index
141 */
142#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
143
144#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
145#define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
146
147/*
148 * Hardware pte bit definitions (to be used directly on the ptes
149 * without using the bit fields).
150 */
151
152#define INTEL_PTE_VALID 0x00000001
153#define INTEL_PTE_WRITE 0x00000002
154#define INTEL_PTE_USER 0x00000004
155#define INTEL_PTE_WTHRU 0x00000008
156#define INTEL_PTE_NCACHE 0x00000010
157#define INTEL_PTE_REF 0x00000020
158#define INTEL_PTE_MOD 0x00000040
159#define INTEL_PTE_WIRED 0x00000200
160#define INTEL_PTE_PFN 0xfffff000
d7e50217 161#define INTEL_PTE_PTA 0x00000080
1c79356b
A
162
163#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
164#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
165#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
166
9bccf70c
A
167#define PMAP_DEFAULT_CACHE 0
168#define PMAP_INHIBIT_CACHE 1
169#define PMAP_GUARDED_CACHE 2
170#define PMAP_ACTIVATE_CACHE 4
171#define PMAP_NO_GUARD_CACHE 8
172
173
1c79356b
A
174/*
175 * Convert page table entry to kernel virtual address
176 */
177#define ptetokv(a) (phystokv(pte_to_pa(a)))
178
179#ifndef ASSEMBLER
180typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
181 /* changed by other processors */
182
183struct pmap {
184 pt_entry_t *dirbase; /* page directory pointer register */
185 vm_offset_t pdirbase; /* phys. address of dirbase */
186 int ref_count; /* reference count */
187 decl_simple_lock_data(,lock) /* lock on map */
188 struct pmap_statistics stats; /* map statistics */
189 cpu_set cpus_using; /* bitmap of cpus using pmap */
190};
191
192/*
193 * Optimization avoiding some TLB flushes when switching to
194 * kernel-loaded threads. This is effective only for i386:
195 * Since user task, kernel task and kernel loaded tasks share the
196 * same virtual space (with appropriate protections), any pmap
197 * allows mapping kernel and kernel loaded tasks.
198 *
199 * The idea is to avoid switching to another pmap unnecessarily when
200 * switching to a kernel-loaded task, or when switching to the kernel
201 * itself.
202 *
203 * We store the pmap we are really using (from which we fetched the
204 * dirbase value) in real_pmap[cpu_number()].
205 *
206 * Invariant:
207 * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap.
208 */
209
210extern struct pmap *real_pmap[NCPUS];
211
212#include <i386/proc_reg.h>
213/*
214 * If switching to the kernel pmap, don't incur the TLB cost of switching
215 * to its page tables, since all maps include the kernel map as a subset.
216 * Simply record that this CPU is logically on the kernel pmap (see
217 * pmap_destroy).
218 *
219 * Similarly, if switching to a pmap (other than kernel_pmap that is already
220 * in use, don't do anything to the hardware, to avoid a TLB flush.
221 */
222
223#if NCPUS > 1
224#define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
225#define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
226#else /* NCPUS > 1 */
227#define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
228#define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
229#endif /* NCPUS > 1 */
230
231
232#define set_dirbase(mypmap, my_cpu) { \
233 struct pmap **ppmap = &real_pmap[my_cpu]; \
234 vm_offset_t pdirbase = (mypmap)->pdirbase; \
235 \
236 if (*ppmap == (vm_offset_t)NULL) { \
237 *ppmap = (mypmap); \
238 PMAP_CPU_SET((mypmap), my_cpu); \
239 set_cr3(pdirbase); \
240 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
241 if (*ppmap != kernel_pmap) \
242 PMAP_CPU_CLR(*ppmap, my_cpu); \
243 *ppmap = (mypmap); \
244 PMAP_CPU_SET((mypmap), my_cpu); \
245 set_cr3(pdirbase); \
246 } \
247 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
248}
249
250#if NCPUS > 1
251/*
252 * List of cpus that are actively using mapped memory. Any
253 * pmap update operation must wait for all cpus in this list.
254 * Update operations must still be queued to cpus not in this
255 * list.
256 */
257extern cpu_set cpus_active;
258
259/*
260 * List of cpus that are idle, but still operating, and will want
261 * to see any kernel pmap updates when they become active.
262 */
263extern cpu_set cpus_idle;
264
265
266/*
267 * External declarations for PMAP_ACTIVATE.
268 */
269
270extern void process_pmap_updates(struct pmap *pmap);
271extern void pmap_update_interrupt(void);
272
273#endif /* NCPUS > 1 */
274
275/*
276 * Machine dependent routines that are used only for i386/i486/i860.
277 */
278extern vm_offset_t (phystokv)(
279 vm_offset_t pa);
280
281extern vm_offset_t (kvtophys)(
282 vm_offset_t addr);
283
284extern pt_entry_t *pmap_pte(
285 struct pmap *pmap,
286 vm_offset_t addr);
287
288extern vm_offset_t pmap_map(
289 vm_offset_t virt,
290 vm_offset_t start,
291 vm_offset_t end,
292 vm_prot_t prot);
293
294extern vm_offset_t pmap_map_bd(
295 vm_offset_t virt,
296 vm_offset_t start,
297 vm_offset_t end,
298 vm_prot_t prot);
299
300extern void pmap_bootstrap(
301 vm_offset_t load_start);
302
303extern boolean_t pmap_valid_page(
304 vm_offset_t pa);
305
306extern int pmap_list_resident_pages(
307 struct pmap *pmap,
308 vm_offset_t *listp,
309 int space);
310
311extern void flush_tlb(void);
312extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
313extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
314
315
316/*
317 * Macros for speed.
318 */
319
320#if NCPUS > 1
321
322#include <kern/spl.h>
323
324/*
325 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
326 * fields to control TLB invalidation on other CPUS.
327 */
328
329#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
330 \
331 /* \
332 * Let pmap updates proceed while we wait for this pmap. \
333 */ \
334 i_bit_clear((my_cpu), &cpus_active); \
335 \
336 /* \
337 * Lock the pmap to put this cpu in its active set. \
338 * Wait for updates here. \
339 */ \
340 simple_lock(&kernel_pmap->lock); \
341 \
342 /* \
343 * Mark that this cpu is using the pmap. \
344 */ \
345 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
346 \
347 /* \
348 * Mark this cpu active - IPL will be lowered by \
349 * load_context(). \
350 */ \
351 i_bit_set((my_cpu), &cpus_active); \
352 \
353 simple_unlock(&kernel_pmap->lock); \
354}
355
356#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
357 /* \
358 * Mark pmap no longer in use by this cpu even if \
359 * pmap is locked against updates. \
360 */ \
361 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
362}
363
364#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
365 register struct pmap *tpmap; \
366 \
367 tpmap = vm_map_pmap(map); \
368 if (tpmap == kernel_pmap) { \
369 /* \
370 * If this is the kernel pmap, switch to its page tables. \
371 */ \
372 set_dirbase(kernel_pmap, my_cpu); \
373 } \
374 else { \
375 /* \
376 * Let pmap updates proceed while we wait for this pmap. \
377 */ \
378 i_bit_clear((my_cpu), &cpus_active); \
379 \
380 /* \
381 * Lock the pmap to put this cpu in its active set. \
382 * Wait for updates here. \
383 */ \
384 simple_lock(&tpmap->lock); \
385 \
386 /* \
387 * No need to invalidate the TLB - the entire user pmap \
388 * will be invalidated by reloading dirbase. \
389 */ \
390 set_dirbase(tpmap, my_cpu); \
391 \
392 /* \
393 * Mark this cpu active - IPL will be lowered by \
394 * load_context(). \
395 */ \
396 i_bit_set((my_cpu), &cpus_active); \
397 \
398 simple_unlock(&tpmap->lock); \
399 } \
400}
401
402#define PMAP_DEACTIVATE_MAP(map, my_cpu)
403
404#define PMAP_ACTIVATE_USER(th, my_cpu) { \
405 spl_t spl; \
406 \
407 spl = splhigh(); \
408 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
409 splx(spl); \
410}
411
412#define PMAP_DEACTIVATE_USER(th, my_cpu) { \
413 spl_t spl; \
414 \
415 spl = splhigh(); \
416 PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
417 splx(spl); \
418}
419
420#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
421 spl_t spl; \
422 \
423 if (old_th->map != new_th->map) { \
424 spl = splhigh(); \
425 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
426 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
427 splx(spl); \
428 } \
429}
430
431#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
432 spl_t spl; \
433 \
434 spl = splhigh(); \
435 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
436 th->map = new_map; \
437 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
438 splx(spl); \
439}
440
441#if MP_V1_1
442#define set_led(cpu)
443#define clear_led(cpu)
444#endif /* MP_V1_1 */
445
446#define MARK_CPU_IDLE(my_cpu) { \
447 /* \
448 * Mark this cpu idle, and remove it from the active set, \
449 * since it is not actively using any pmap. Signal_cpus \
450 * will notice that it is idle, and avoid signaling it, \
451 * but will queue the update request for when the cpu \
452 * becomes active. \
453 */ \
454 int s = splhigh(); \
455 i_bit_set((my_cpu), &cpus_idle); \
456 i_bit_clear((my_cpu), &cpus_active); \
457 splx(s); \
458 set_led(my_cpu); \
459}
460
461#define MARK_CPU_ACTIVE(my_cpu) { \
462 \
463 int s = splhigh(); \
464 /* \
465 * If a kernel_pmap update was requested while this cpu \
466 * was idle, process it as if we got the interrupt. \
467 * Before doing so, remove this cpu from the idle set. \
468 * Since we do not grab any pmap locks while we flush \
469 * our TLB, another cpu may start an update operation \
470 * before we finish. Removing this cpu from the idle \
471 * set assures that we will receive another update \
472 * interrupt if this happens. \
473 */ \
474 i_bit_clear((my_cpu), &cpus_idle); \
475 \
476 /* \
477 * Mark that this cpu is now active. \
478 */ \
479 i_bit_set((my_cpu), &cpus_active); \
480 splx(s); \
481 clear_led(my_cpu); \
482}
483
484#else /* NCPUS > 1 */
485
486/*
487 * With only one CPU, we just have to indicate whether the pmap is
488 * in use.
489 */
490
491#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
492 kernel_pmap->cpus_using = TRUE; \
493}
494
495#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
496 kernel_pmap->cpus_using = FALSE; \
497}
498
499#define PMAP_ACTIVATE_MAP(map, my_cpu) \
500 set_dirbase(vm_map_pmap(map), my_cpu)
501
502#define PMAP_DEACTIVATE_MAP(map, my_cpu)
503
504#define PMAP_ACTIVATE_USER(th, my_cpu) \
505 PMAP_ACTIVATE_MAP(th->map, my_cpu)
506
507#define PMAP_DEACTIVATE_USER(th, my_cpu) \
508 PMAP_DEACTIVATE_MAP(th->map, my_cpu)
509
510#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
511 if (old_th->map != new_th->map) { \
512 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
513 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
514 } \
515}
516
517#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
518 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
519 th->map = new_map; \
520 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
521}
522
523#endif /* NCPUS > 1 */
524
525#define PMAP_CONTEXT(pmap, thread)
526
527#define pmap_kernel_va(VA) \
528 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
529
530#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
531#define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
532#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
533#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
534#define pmap_attribute(pmap,addr,size,attr,value) \
535 (KERN_INVALID_ADDRESS)
9bccf70c
A
536#define pmap_attribute_cache_sync(addr,size,attr,value) \
537 (KERN_INVALID_ADDRESS)
765c9de3 538
1c79356b
A
539#endif /* ASSEMBLER */
540
541#endif /* _PMAP_MACHINE_ */