]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/pmap.h
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52
53/*
54 * File: pmap.h
55 *
56 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
57 * Date: 1985
58 *
59 * Machine-dependent structures for the physical map module.
60 */
61
62#ifndef _PMAP_MACHINE_
63#define _PMAP_MACHINE_ 1
64
65#ifndef ASSEMBLER
66
67#include <platforms.h>
68#include <mp_v1_1.h>
69
70#include <mach/kern_return.h>
71#include <mach/machine/vm_types.h>
72#include <mach/vm_prot.h>
73#include <mach/vm_statistics.h>
74#include <mach/machine/vm_param.h>
75#include <kern/kern_types.h>
76#include <kern/thread_act.h>
77#include <kern/lock.h>
78
79/*
80 * Define the generic in terms of the specific
81 */
82
83#define INTEL_PGBYTES I386_PGBYTES
84#define INTEL_PGSHIFT I386_PGSHIFT
85#define intel_btop(x) i386_btop(x)
86#define intel_ptob(x) i386_ptob(x)
87#define intel_round_page(x) i386_round_page(x)
88#define intel_trunc_page(x) i386_trunc_page(x)
89#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
90#define round_intel_to_vm(x) round_i386_to_vm(x)
91#define vm_to_intel(x) vm_to_i386(x)
92
93/*
94 * i386/i486/i860 Page Table Entry
95 */
96
97typedef unsigned int pt_entry_t;
98#define PT_ENTRY_NULL ((pt_entry_t *) 0)
99
100#endif /* ASSEMBLER */
101
102#define INTEL_OFFMASK 0xfff /* offset within page */
103#define PDESHIFT 22 /* page descriptor shift */
104#define PDEMASK 0x3ff /* mask for page descriptor index */
105#define PTESHIFT 12 /* page table shift */
106#define PTEMASK 0x3ff /* mask for page table index */
107
108/*
109 * Convert kernel virtual address to linear address
110 */
111
112#define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS)
113
114/*
115 * Convert address offset to page descriptor index
116 */
117#define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \
118 kvtolinear(a) : (a)) \
119 >> PDESHIFT) & PDEMASK)
120
121/*
122 * Convert page descriptor index to user virtual address
123 */
124#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
125
126/*
127 * Convert address offset to page table index
128 */
129#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
130
131#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
132#define NPDES (intel_ptob(1)/sizeof(pt_entry_t))
133
134/*
135 * Hardware pte bit definitions (to be used directly on the ptes
136 * without using the bit fields).
137 */
138
139#define INTEL_PTE_VALID 0x00000001
140#define INTEL_PTE_WRITE 0x00000002
141#define INTEL_PTE_USER 0x00000004
142#define INTEL_PTE_WTHRU 0x00000008
143#define INTEL_PTE_NCACHE 0x00000010
144#define INTEL_PTE_REF 0x00000020
145#define INTEL_PTE_MOD 0x00000040
146#define INTEL_PTE_WIRED 0x00000200
147#define INTEL_PTE_PFN 0xfffff000
148
149#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
150#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
151#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
152
153/*
154 * Convert page table entry to kernel virtual address
155 */
156#define ptetokv(a) (phystokv(pte_to_pa(a)))
157
158#ifndef ASSEMBLER
159typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
160 /* changed by other processors */
161
162struct pmap {
163 pt_entry_t *dirbase; /* page directory pointer register */
164 vm_offset_t pdirbase; /* phys. address of dirbase */
165 int ref_count; /* reference count */
166 decl_simple_lock_data(,lock) /* lock on map */
167 struct pmap_statistics stats; /* map statistics */
168 cpu_set cpus_using; /* bitmap of cpus using pmap */
169};
170
171/*
172 * Optimization avoiding some TLB flushes when switching to
173 * kernel-loaded threads. This is effective only for i386:
174 * Since user task, kernel task and kernel loaded tasks share the
175 * same virtual space (with appropriate protections), any pmap
176 * allows mapping kernel and kernel loaded tasks.
177 *
178 * The idea is to avoid switching to another pmap unnecessarily when
179 * switching to a kernel-loaded task, or when switching to the kernel
180 * itself.
181 *
182 * We store the pmap we are really using (from which we fetched the
183 * dirbase value) in real_pmap[cpu_number()].
184 *
185 * Invariant:
186 * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap.
187 */
188
189extern struct pmap *real_pmap[NCPUS];
190
191#include <i386/proc_reg.h>
192/*
193 * If switching to the kernel pmap, don't incur the TLB cost of switching
194 * to its page tables, since all maps include the kernel map as a subset.
195 * Simply record that this CPU is logically on the kernel pmap (see
196 * pmap_destroy).
197 *
198 * Similarly, if switching to a pmap (other than kernel_pmap that is already
199 * in use, don't do anything to the hardware, to avoid a TLB flush.
200 */
201
202#if NCPUS > 1
203#define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
204#define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
205#else /* NCPUS > 1 */
206#define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE
207#define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE
208#endif /* NCPUS > 1 */
209
210
211#define set_dirbase(mypmap, my_cpu) { \
212 struct pmap **ppmap = &real_pmap[my_cpu]; \
213 vm_offset_t pdirbase = (mypmap)->pdirbase; \
214 \
215 if (*ppmap == (vm_offset_t)NULL) { \
216 *ppmap = (mypmap); \
217 PMAP_CPU_SET((mypmap), my_cpu); \
218 set_cr3(pdirbase); \
219 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
220 if (*ppmap != kernel_pmap) \
221 PMAP_CPU_CLR(*ppmap, my_cpu); \
222 *ppmap = (mypmap); \
223 PMAP_CPU_SET((mypmap), my_cpu); \
224 set_cr3(pdirbase); \
225 } \
226 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
227}
228
229#if NCPUS > 1
230/*
231 * List of cpus that are actively using mapped memory. Any
232 * pmap update operation must wait for all cpus in this list.
233 * Update operations must still be queued to cpus not in this
234 * list.
235 */
236extern cpu_set cpus_active;
237
238/*
239 * List of cpus that are idle, but still operating, and will want
240 * to see any kernel pmap updates when they become active.
241 */
242extern cpu_set cpus_idle;
243
244
245/*
246 * External declarations for PMAP_ACTIVATE.
247 */
248
249extern void process_pmap_updates(struct pmap *pmap);
250extern void pmap_update_interrupt(void);
251
252#endif /* NCPUS > 1 */
253
254/*
255 * Machine dependent routines that are used only for i386/i486/i860.
256 */
257extern vm_offset_t (phystokv)(
258 vm_offset_t pa);
259
260extern vm_offset_t (kvtophys)(
261 vm_offset_t addr);
262
263extern pt_entry_t *pmap_pte(
264 struct pmap *pmap,
265 vm_offset_t addr);
266
267extern vm_offset_t pmap_map(
268 vm_offset_t virt,
269 vm_offset_t start,
270 vm_offset_t end,
271 vm_prot_t prot);
272
273extern vm_offset_t pmap_map_bd(
274 vm_offset_t virt,
275 vm_offset_t start,
276 vm_offset_t end,
277 vm_prot_t prot);
278
279extern void pmap_bootstrap(
280 vm_offset_t load_start);
281
282extern boolean_t pmap_valid_page(
283 vm_offset_t pa);
284
285extern int pmap_list_resident_pages(
286 struct pmap *pmap,
287 vm_offset_t *listp,
288 int space);
289
290extern void flush_tlb(void);
291extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
292extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
293
294
295/*
296 * Macros for speed.
297 */
298
299#if NCPUS > 1
300
301#include <kern/spl.h>
302
303/*
304 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
305 * fields to control TLB invalidation on other CPUS.
306 */
307
308#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
309 \
310 /* \
311 * Let pmap updates proceed while we wait for this pmap. \
312 */ \
313 i_bit_clear((my_cpu), &cpus_active); \
314 \
315 /* \
316 * Lock the pmap to put this cpu in its active set. \
317 * Wait for updates here. \
318 */ \
319 simple_lock(&kernel_pmap->lock); \
320 \
321 /* \
322 * Mark that this cpu is using the pmap. \
323 */ \
324 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
325 \
326 /* \
327 * Mark this cpu active - IPL will be lowered by \
328 * load_context(). \
329 */ \
330 i_bit_set((my_cpu), &cpus_active); \
331 \
332 simple_unlock(&kernel_pmap->lock); \
333}
334
335#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
336 /* \
337 * Mark pmap no longer in use by this cpu even if \
338 * pmap is locked against updates. \
339 */ \
340 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
341}
342
343#define PMAP_ACTIVATE_MAP(map, my_cpu) { \
344 register struct pmap *tpmap; \
345 \
346 tpmap = vm_map_pmap(map); \
347 if (tpmap == kernel_pmap) { \
348 /* \
349 * If this is the kernel pmap, switch to its page tables. \
350 */ \
351 set_dirbase(kernel_pmap, my_cpu); \
352 } \
353 else { \
354 /* \
355 * Let pmap updates proceed while we wait for this pmap. \
356 */ \
357 i_bit_clear((my_cpu), &cpus_active); \
358 \
359 /* \
360 * Lock the pmap to put this cpu in its active set. \
361 * Wait for updates here. \
362 */ \
363 simple_lock(&tpmap->lock); \
364 \
365 /* \
366 * No need to invalidate the TLB - the entire user pmap \
367 * will be invalidated by reloading dirbase. \
368 */ \
369 set_dirbase(tpmap, my_cpu); \
370 \
371 /* \
372 * Mark this cpu active - IPL will be lowered by \
373 * load_context(). \
374 */ \
375 i_bit_set((my_cpu), &cpus_active); \
376 \
377 simple_unlock(&tpmap->lock); \
378 } \
379}
380
381#define PMAP_DEACTIVATE_MAP(map, my_cpu)
382
383#define PMAP_ACTIVATE_USER(th, my_cpu) { \
384 spl_t spl; \
385 \
386 spl = splhigh(); \
387 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
388 splx(spl); \
389}
390
391#define PMAP_DEACTIVATE_USER(th, my_cpu) { \
392 spl_t spl; \
393 \
394 spl = splhigh(); \
395 PMAP_DEACTIVATE_MAP(th->map, my_cpu) \
396 splx(spl); \
397}
398
399#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
400 spl_t spl; \
401 \
402 if (old_th->map != new_th->map) { \
403 spl = splhigh(); \
404 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
405 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
406 splx(spl); \
407 } \
408}
409
410#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
411 spl_t spl; \
412 \
413 spl = splhigh(); \
414 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
415 th->map = new_map; \
416 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
417 splx(spl); \
418}
419
420#if MP_V1_1
421#define set_led(cpu)
422#define clear_led(cpu)
423#endif /* MP_V1_1 */
424
425#define MARK_CPU_IDLE(my_cpu) { \
426 /* \
427 * Mark this cpu idle, and remove it from the active set, \
428 * since it is not actively using any pmap. Signal_cpus \
429 * will notice that it is idle, and avoid signaling it, \
430 * but will queue the update request for when the cpu \
431 * becomes active. \
432 */ \
433 int s = splhigh(); \
434 i_bit_set((my_cpu), &cpus_idle); \
435 i_bit_clear((my_cpu), &cpus_active); \
436 splx(s); \
437 set_led(my_cpu); \
438}
439
440#define MARK_CPU_ACTIVE(my_cpu) { \
441 \
442 int s = splhigh(); \
443 /* \
444 * If a kernel_pmap update was requested while this cpu \
445 * was idle, process it as if we got the interrupt. \
446 * Before doing so, remove this cpu from the idle set. \
447 * Since we do not grab any pmap locks while we flush \
448 * our TLB, another cpu may start an update operation \
449 * before we finish. Removing this cpu from the idle \
450 * set assures that we will receive another update \
451 * interrupt if this happens. \
452 */ \
453 i_bit_clear((my_cpu), &cpus_idle); \
454 \
455 /* \
456 * Mark that this cpu is now active. \
457 */ \
458 i_bit_set((my_cpu), &cpus_active); \
459 splx(s); \
460 clear_led(my_cpu); \
461}
462
463#else /* NCPUS > 1 */
464
465/*
466 * With only one CPU, we just have to indicate whether the pmap is
467 * in use.
468 */
469
470#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
471 kernel_pmap->cpus_using = TRUE; \
472}
473
474#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
475 kernel_pmap->cpus_using = FALSE; \
476}
477
478#define PMAP_ACTIVATE_MAP(map, my_cpu) \
479 set_dirbase(vm_map_pmap(map), my_cpu)
480
481#define PMAP_DEACTIVATE_MAP(map, my_cpu)
482
483#define PMAP_ACTIVATE_USER(th, my_cpu) \
484 PMAP_ACTIVATE_MAP(th->map, my_cpu)
485
486#define PMAP_DEACTIVATE_USER(th, my_cpu) \
487 PMAP_DEACTIVATE_MAP(th->map, my_cpu)
488
489#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
490 if (old_th->map != new_th->map) { \
491 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
492 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
493 } \
494}
495
496#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
497 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
498 th->map = new_map; \
499 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
500}
501
502#endif /* NCPUS > 1 */
503
504#define PMAP_CONTEXT(pmap, thread)
505
506#define pmap_kernel_va(VA) \
507 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
508
509#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
510#define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame)))
511#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
512#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
513#define pmap_attribute(pmap,addr,size,attr,value) \
514 (KERN_INVALID_ADDRESS)
515#endif /* ASSEMBLER */
516
517#endif /* _PMAP_MACHINE_ */