]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | ||
56 | /* | |
57 | * File: pmap.h | |
58 | * | |
59 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
60 | * Date: 1985 | |
61 | * | |
62 | * Machine-dependent structures for the physical map module. | |
63 | */ | |
64 | ||
65 | #ifndef _PMAP_MACHINE_ | |
66 | #define _PMAP_MACHINE_ 1 | |
67 | ||
68 | #ifndef ASSEMBLER | |
69 | ||
70 | #include <platforms.h> | |
1c79356b A |
71 | |
72 | #include <mach/kern_return.h> | |
73 | #include <mach/machine/vm_types.h> | |
74 | #include <mach/vm_prot.h> | |
75 | #include <mach/vm_statistics.h> | |
76 | #include <mach/machine/vm_param.h> | |
77 | #include <kern/kern_types.h> | |
78 | #include <kern/thread_act.h> | |
79 | #include <kern/lock.h> | |
80 | ||
81 | /* | |
82 | * Define the generic in terms of the specific | |
83 | */ | |
84 | ||
85 | #define INTEL_PGBYTES I386_PGBYTES | |
86 | #define INTEL_PGSHIFT I386_PGSHIFT | |
87 | #define intel_btop(x) i386_btop(x) | |
88 | #define intel_ptob(x) i386_ptob(x) | |
89 | #define intel_round_page(x) i386_round_page(x) | |
90 | #define intel_trunc_page(x) i386_trunc_page(x) | |
91 | #define trunc_intel_to_vm(x) trunc_i386_to_vm(x) | |
92 | #define round_intel_to_vm(x) round_i386_to_vm(x) | |
93 | #define vm_to_intel(x) vm_to_i386(x) | |
94 | ||
95 | /* | |
96 | * i386/i486/i860 Page Table Entry | |
97 | */ | |
98 | ||
99 | typedef unsigned int pt_entry_t; | |
100 | #define PT_ENTRY_NULL ((pt_entry_t *) 0) | |
101 | ||
102 | #endif /* ASSEMBLER */ | |
103 | ||
104 | #define INTEL_OFFMASK 0xfff /* offset within page */ | |
105 | #define PDESHIFT 22 /* page descriptor shift */ | |
106 | #define PDEMASK 0x3ff /* mask for page descriptor index */ | |
107 | #define PTESHIFT 12 /* page table shift */ | |
108 | #define PTEMASK 0x3ff /* mask for page table index */ | |
109 | ||
9bccf70c | 110 | |
55e303ae | 111 | #define VM_WIMG_COPYBACK VM_MEM_COHERENT |
9bccf70c | 112 | #define VM_WIMG_DEFAULT VM_MEM_COHERENT |
55e303ae A |
113 | /* ?? intel ?? */ |
114 | #define VM_WIMG_IO (VM_MEM_COHERENT | \ | |
115 | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) | |
116 | #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) | |
117 | /* write combining mode, aka store gather */ | |
118 | #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) | |
9bccf70c | 119 | |
1c79356b A |
120 | /* |
121 | * Convert kernel virtual address to linear address | |
122 | */ | |
123 | ||
124 | #define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS) | |
125 | ||
126 | /* | |
127 | * Convert address offset to page descriptor index | |
128 | */ | |
129 | #define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \ | |
130 | kvtolinear(a) : (a)) \ | |
131 | >> PDESHIFT) & PDEMASK) | |
132 | ||
133 | /* | |
134 | * Convert page descriptor index to user virtual address | |
135 | */ | |
136 | #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) | |
137 | ||
138 | /* | |
139 | * Convert address offset to page table index | |
140 | */ | |
141 | #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK) | |
142 | ||
143 | #define NPTES (intel_ptob(1)/sizeof(pt_entry_t)) | |
144 | #define NPDES (intel_ptob(1)/sizeof(pt_entry_t)) | |
145 | ||
146 | /* | |
147 | * Hardware pte bit definitions (to be used directly on the ptes | |
148 | * without using the bit fields). | |
149 | */ | |
150 | ||
151 | #define INTEL_PTE_VALID 0x00000001 | |
152 | #define INTEL_PTE_WRITE 0x00000002 | |
153 | #define INTEL_PTE_USER 0x00000004 | |
154 | #define INTEL_PTE_WTHRU 0x00000008 | |
155 | #define INTEL_PTE_NCACHE 0x00000010 | |
156 | #define INTEL_PTE_REF 0x00000020 | |
157 | #define INTEL_PTE_MOD 0x00000040 | |
158 | #define INTEL_PTE_WIRED 0x00000200 | |
159 | #define INTEL_PTE_PFN 0xfffff000 | |
55e303ae | 160 | #define INTEL_PTE_PTA 0x00000080 |
1c79356b A |
161 | |
162 | #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) | |
163 | #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) | |
164 | #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) | |
165 | ||
9bccf70c A |
166 | #define PMAP_DEFAULT_CACHE 0 |
167 | #define PMAP_INHIBIT_CACHE 1 | |
168 | #define PMAP_GUARDED_CACHE 2 | |
169 | #define PMAP_ACTIVATE_CACHE 4 | |
170 | #define PMAP_NO_GUARD_CACHE 8 | |
171 | ||
172 | ||
1c79356b A |
173 | /* |
174 | * Convert page table entry to kernel virtual address | |
175 | */ | |
176 | #define ptetokv(a) (phystokv(pte_to_pa(a))) | |
177 | ||
178 | #ifndef ASSEMBLER | |
179 | typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ | |
180 | /* changed by other processors */ | |
181 | ||
182 | struct pmap { | |
183 | pt_entry_t *dirbase; /* page directory pointer register */ | |
184 | vm_offset_t pdirbase; /* phys. address of dirbase */ | |
185 | int ref_count; /* reference count */ | |
186 | decl_simple_lock_data(,lock) /* lock on map */ | |
187 | struct pmap_statistics stats; /* map statistics */ | |
188 | cpu_set cpus_using; /* bitmap of cpus using pmap */ | |
189 | }; | |
190 | ||
191 | /* | |
192 | * Optimization avoiding some TLB flushes when switching to | |
193 | * kernel-loaded threads. This is effective only for i386: | |
194 | * Since user task, kernel task and kernel loaded tasks share the | |
195 | * same virtual space (with appropriate protections), any pmap | |
196 | * allows mapping kernel and kernel loaded tasks. | |
197 | * | |
198 | * The idea is to avoid switching to another pmap unnecessarily when | |
199 | * switching to a kernel-loaded task, or when switching to the kernel | |
200 | * itself. | |
201 | * | |
202 | * We store the pmap we are really using (from which we fetched the | |
203 | * dirbase value) in real_pmap[cpu_number()]. | |
204 | * | |
205 | * Invariant: | |
206 | * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap. | |
207 | */ | |
208 | ||
209 | extern struct pmap *real_pmap[NCPUS]; | |
210 | ||
211 | #include <i386/proc_reg.h> | |
212 | /* | |
213 | * If switching to the kernel pmap, don't incur the TLB cost of switching | |
214 | * to its page tables, since all maps include the kernel map as a subset. | |
215 | * Simply record that this CPU is logically on the kernel pmap (see | |
216 | * pmap_destroy). | |
217 | * | |
218 | * Similarly, if switching to a pmap (other than kernel_pmap that is already | |
219 | * in use, don't do anything to the hardware, to avoid a TLB flush. | |
220 | */ | |
221 | ||
222 | #if NCPUS > 1 | |
223 | #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using)) | |
224 | #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using)) | |
225 | #else /* NCPUS > 1 */ | |
226 | #define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE | |
227 | #define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE | |
228 | #endif /* NCPUS > 1 */ | |
229 | ||
230 | ||
231 | #define set_dirbase(mypmap, my_cpu) { \ | |
232 | struct pmap **ppmap = &real_pmap[my_cpu]; \ | |
233 | vm_offset_t pdirbase = (mypmap)->pdirbase; \ | |
234 | \ | |
235 | if (*ppmap == (vm_offset_t)NULL) { \ | |
236 | *ppmap = (mypmap); \ | |
237 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
238 | set_cr3(pdirbase); \ | |
239 | } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \ | |
240 | if (*ppmap != kernel_pmap) \ | |
241 | PMAP_CPU_CLR(*ppmap, my_cpu); \ | |
242 | *ppmap = (mypmap); \ | |
243 | PMAP_CPU_SET((mypmap), my_cpu); \ | |
244 | set_cr3(pdirbase); \ | |
245 | } \ | |
246 | assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \ | |
247 | } | |
248 | ||
249 | #if NCPUS > 1 | |
250 | /* | |
251 | * List of cpus that are actively using mapped memory. Any | |
252 | * pmap update operation must wait for all cpus in this list. | |
253 | * Update operations must still be queued to cpus not in this | |
254 | * list. | |
255 | */ | |
256 | extern cpu_set cpus_active; | |
257 | ||
258 | /* | |
259 | * List of cpus that are idle, but still operating, and will want | |
260 | * to see any kernel pmap updates when they become active. | |
261 | */ | |
262 | extern cpu_set cpus_idle; | |
263 | ||
264 | ||
55e303ae A |
265 | /* |
266 | * Quick test for pmap update requests. | |
267 | */ | |
268 | extern volatile | |
269 | boolean_t cpu_update_needed[NCPUS]; | |
270 | ||
1c79356b A |
271 | /* |
272 | * External declarations for PMAP_ACTIVATE. | |
273 | */ | |
274 | ||
275 | extern void process_pmap_updates(struct pmap *pmap); | |
276 | extern void pmap_update_interrupt(void); | |
55e303ae | 277 | extern pmap_t kernel_pmap; |
1c79356b A |
278 | |
279 | #endif /* NCPUS > 1 */ | |
280 | ||
281 | /* | |
282 | * Machine dependent routines that are used only for i386/i486/i860. | |
283 | */ | |
284 | extern vm_offset_t (phystokv)( | |
285 | vm_offset_t pa); | |
286 | ||
287 | extern vm_offset_t (kvtophys)( | |
288 | vm_offset_t addr); | |
289 | ||
290 | extern pt_entry_t *pmap_pte( | |
291 | struct pmap *pmap, | |
292 | vm_offset_t addr); | |
293 | ||
294 | extern vm_offset_t pmap_map( | |
295 | vm_offset_t virt, | |
296 | vm_offset_t start, | |
297 | vm_offset_t end, | |
298 | vm_prot_t prot); | |
299 | ||
300 | extern vm_offset_t pmap_map_bd( | |
301 | vm_offset_t virt, | |
302 | vm_offset_t start, | |
303 | vm_offset_t end, | |
304 | vm_prot_t prot); | |
305 | ||
306 | extern void pmap_bootstrap( | |
307 | vm_offset_t load_start); | |
308 | ||
309 | extern boolean_t pmap_valid_page( | |
310 | vm_offset_t pa); | |
311 | ||
312 | extern int pmap_list_resident_pages( | |
313 | struct pmap *pmap, | |
314 | vm_offset_t *listp, | |
315 | int space); | |
316 | ||
317 | extern void flush_tlb(void); | |
318 | extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); | |
319 | extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); | |
55e303ae | 320 | extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); |
1c79356b A |
321 | |
322 | /* | |
323 | * Macros for speed. | |
324 | */ | |
325 | ||
326 | #if NCPUS > 1 | |
327 | ||
328 | #include <kern/spl.h> | |
329 | ||
55e303ae A |
330 | #if defined(PMAP_ACTIVATE_KERNEL) |
331 | #undef PMAP_ACTIVATE_KERNEL | |
332 | #undef PMAP_DEACTIVATE_KERNEL | |
333 | #undef PMAP_ACTIVATE_USER | |
334 | #undef PMAP_DEACTIVATE_USER | |
335 | #endif | |
336 | ||
1c79356b A |
337 | /* |
338 | * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage | |
339 | * fields to control TLB invalidation on other CPUS. | |
340 | */ | |
341 | ||
342 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ | |
343 | \ | |
344 | /* \ | |
345 | * Let pmap updates proceed while we wait for this pmap. \ | |
346 | */ \ | |
347 | i_bit_clear((my_cpu), &cpus_active); \ | |
348 | \ | |
349 | /* \ | |
350 | * Lock the pmap to put this cpu in its active set. \ | |
351 | * Wait for updates here. \ | |
352 | */ \ | |
353 | simple_lock(&kernel_pmap->lock); \ | |
354 | \ | |
55e303ae A |
355 | /* \ |
356 | * Process invalidate requests for the kernel pmap. \ | |
357 | */ \ | |
358 | if (cpu_update_needed[(my_cpu)]) \ | |
359 | process_pmap_updates(kernel_pmap); \ | |
360 | \ | |
1c79356b A |
361 | /* \ |
362 | * Mark that this cpu is using the pmap. \ | |
363 | */ \ | |
364 | i_bit_set((my_cpu), &kernel_pmap->cpus_using); \ | |
365 | \ | |
366 | /* \ | |
367 | * Mark this cpu active - IPL will be lowered by \ | |
368 | * load_context(). \ | |
369 | */ \ | |
370 | i_bit_set((my_cpu), &cpus_active); \ | |
371 | \ | |
372 | simple_unlock(&kernel_pmap->lock); \ | |
373 | } | |
374 | ||
375 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ | |
376 | /* \ | |
377 | * Mark pmap no longer in use by this cpu even if \ | |
378 | * pmap is locked against updates. \ | |
379 | */ \ | |
380 | i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \ | |
381 | } | |
382 | ||
383 | #define PMAP_ACTIVATE_MAP(map, my_cpu) { \ | |
55e303ae | 384 | register pmap_t tpmap; \ |
1c79356b A |
385 | \ |
386 | tpmap = vm_map_pmap(map); \ | |
387 | if (tpmap == kernel_pmap) { \ | |
388 | /* \ | |
389 | * If this is the kernel pmap, switch to its page tables. \ | |
390 | */ \ | |
391 | set_dirbase(kernel_pmap, my_cpu); \ | |
392 | } \ | |
393 | else { \ | |
394 | /* \ | |
395 | * Let pmap updates proceed while we wait for this pmap. \ | |
396 | */ \ | |
397 | i_bit_clear((my_cpu), &cpus_active); \ | |
398 | \ | |
399 | /* \ | |
400 | * Lock the pmap to put this cpu in its active set. \ | |
401 | * Wait for updates here. \ | |
402 | */ \ | |
403 | simple_lock(&tpmap->lock); \ | |
404 | \ | |
405 | /* \ | |
406 | * No need to invalidate the TLB - the entire user pmap \ | |
407 | * will be invalidated by reloading dirbase. \ | |
408 | */ \ | |
409 | set_dirbase(tpmap, my_cpu); \ | |
410 | \ | |
411 | /* \ | |
412 | * Mark this cpu active - IPL will be lowered by \ | |
413 | * load_context(). \ | |
414 | */ \ | |
415 | i_bit_set((my_cpu), &cpus_active); \ | |
416 | \ | |
417 | simple_unlock(&tpmap->lock); \ | |
418 | } \ | |
419 | } | |
420 | ||
421 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) | |
422 | ||
423 | #define PMAP_ACTIVATE_USER(th, my_cpu) { \ | |
424 | spl_t spl; \ | |
425 | \ | |
426 | spl = splhigh(); \ | |
427 | PMAP_ACTIVATE_MAP(th->map, my_cpu) \ | |
428 | splx(spl); \ | |
429 | } | |
430 | ||
431 | #define PMAP_DEACTIVATE_USER(th, my_cpu) { \ | |
432 | spl_t spl; \ | |
433 | \ | |
434 | spl = splhigh(); \ | |
435 | PMAP_DEACTIVATE_MAP(th->map, my_cpu) \ | |
436 | splx(spl); \ | |
437 | } | |
438 | ||
439 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ | |
440 | spl_t spl; \ | |
441 | \ | |
442 | if (old_th->map != new_th->map) { \ | |
443 | spl = splhigh(); \ | |
444 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ | |
445 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
446 | splx(spl); \ | |
447 | } \ | |
448 | } | |
449 | ||
450 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
451 | spl_t spl; \ | |
452 | \ | |
453 | spl = splhigh(); \ | |
454 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ | |
455 | th->map = new_map; \ | |
456 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
457 | splx(spl); \ | |
458 | } | |
459 | ||
1c79356b A |
460 | #define MARK_CPU_IDLE(my_cpu) { \ |
461 | /* \ | |
462 | * Mark this cpu idle, and remove it from the active set, \ | |
463 | * since it is not actively using any pmap. Signal_cpus \ | |
464 | * will notice that it is idle, and avoid signaling it, \ | |
465 | * but will queue the update request for when the cpu \ | |
466 | * becomes active. \ | |
467 | */ \ | |
468 | int s = splhigh(); \ | |
469 | i_bit_set((my_cpu), &cpus_idle); \ | |
470 | i_bit_clear((my_cpu), &cpus_active); \ | |
471 | splx(s); \ | |
472 | set_led(my_cpu); \ | |
473 | } | |
474 | ||
475 | #define MARK_CPU_ACTIVE(my_cpu) { \ | |
476 | \ | |
477 | int s = splhigh(); \ | |
478 | /* \ | |
479 | * If a kernel_pmap update was requested while this cpu \ | |
480 | * was idle, process it as if we got the interrupt. \ | |
481 | * Before doing so, remove this cpu from the idle set. \ | |
482 | * Since we do not grab any pmap locks while we flush \ | |
483 | * our TLB, another cpu may start an update operation \ | |
484 | * before we finish. Removing this cpu from the idle \ | |
485 | * set assures that we will receive another update \ | |
486 | * interrupt if this happens. \ | |
487 | */ \ | |
488 | i_bit_clear((my_cpu), &cpus_idle); \ | |
489 | \ | |
55e303ae A |
490 | if (cpu_update_needed[(my_cpu)]) \ |
491 | pmap_update_interrupt(); \ | |
492 | \ | |
1c79356b A |
493 | /* \ |
494 | * Mark that this cpu is now active. \ | |
495 | */ \ | |
496 | i_bit_set((my_cpu), &cpus_active); \ | |
497 | splx(s); \ | |
498 | clear_led(my_cpu); \ | |
499 | } | |
500 | ||
501 | #else /* NCPUS > 1 */ | |
502 | ||
503 | /* | |
504 | * With only one CPU, we just have to indicate whether the pmap is | |
505 | * in use. | |
506 | */ | |
507 | ||
508 | #define PMAP_ACTIVATE_KERNEL(my_cpu) { \ | |
509 | kernel_pmap->cpus_using = TRUE; \ | |
510 | } | |
511 | ||
512 | #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ | |
513 | kernel_pmap->cpus_using = FALSE; \ | |
514 | } | |
515 | ||
516 | #define PMAP_ACTIVATE_MAP(map, my_cpu) \ | |
517 | set_dirbase(vm_map_pmap(map), my_cpu) | |
518 | ||
519 | #define PMAP_DEACTIVATE_MAP(map, my_cpu) | |
520 | ||
521 | #define PMAP_ACTIVATE_USER(th, my_cpu) \ | |
522 | PMAP_ACTIVATE_MAP(th->map, my_cpu) | |
523 | ||
524 | #define PMAP_DEACTIVATE_USER(th, my_cpu) \ | |
525 | PMAP_DEACTIVATE_MAP(th->map, my_cpu) | |
526 | ||
527 | #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ | |
528 | if (old_th->map != new_th->map) { \ | |
529 | PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ | |
530 | PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ | |
531 | } \ | |
532 | } | |
533 | ||
534 | #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ | |
535 | PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ | |
536 | th->map = new_map; \ | |
537 | PMAP_ACTIVATE_MAP(th->map, my_cpu); \ | |
538 | } | |
539 | ||
540 | #endif /* NCPUS > 1 */ | |
541 | ||
542 | #define PMAP_CONTEXT(pmap, thread) | |
543 | ||
544 | #define pmap_kernel_va(VA) \ | |
545 | (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) | |
546 | ||
547 | #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) | |
548 | #define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame))) | |
549 | #define pmap_phys_to_frame(phys) ((int) (intel_btop(phys))) | |
550 | #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) | |
551 | #define pmap_attribute(pmap,addr,size,attr,value) \ | |
552 | (KERN_INVALID_ADDRESS) | |
9bccf70c A |
553 | #define pmap_attribute_cache_sync(addr,size,attr,value) \ |
554 | (KERN_INVALID_ADDRESS) | |
765c9de3 | 555 | |
1c79356b A |
556 | #endif /* ASSEMBLER */ |
557 | ||
558 | #endif /* _PMAP_MACHINE_ */ |