]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.h
31c9a499dc1d54140e58edf5852fbb7b619b7d5d
[apple/xnu.git] / osfmk / i386 / pmap.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * File: pmap.h
61 *
62 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
63 * Date: 1985
64 *
65 * Machine-dependent structures for the physical map module.
66 */
67
68 #ifndef _PMAP_MACHINE_
69 #define _PMAP_MACHINE_ 1
70
71 #ifndef ASSEMBLER
72
73 #include <platforms.h>
74
75 #include <mach/kern_return.h>
76 #include <mach/machine/vm_types.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_statistics.h>
79 #include <mach/machine/vm_param.h>
80 #include <kern/kern_types.h>
81 #include <kern/thread.h>
82 #include <kern/lock.h>
83 #define PMAP_QUEUE 1
84 #ifdef PMAP_QUEUE
85 #include <kern/queue.h>
86 #endif
87
88 /*
89 * Define the generic in terms of the specific
90 */
91
92 #define INTEL_PGBYTES I386_PGBYTES
93 #define INTEL_PGSHIFT I386_PGSHIFT
94 #define intel_btop(x) i386_btop(x)
95 #define intel_ptob(x) i386_ptob(x)
96 #define intel_round_page(x) i386_round_page(x)
97 #define intel_trunc_page(x) i386_trunc_page(x)
98 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
99 #define round_intel_to_vm(x) round_i386_to_vm(x)
100 #define vm_to_intel(x) vm_to_i386(x)
101
102 /*
103 * i386/i486/i860 Page Table Entry
104 */
105
106 #ifdef PAE
107 typedef uint64_t pdpt_entry_t;
108 typedef uint64_t pt_entry_t;
109 typedef uint64_t pd_entry_t;
110 typedef uint64_t pmap_paddr_t;
111 #else
112 typedef uint32_t pt_entry_t;
113 typedef uint32_t pd_entry_t;
114 typedef uint32_t pmap_paddr_t;
115 #endif
116
117 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
118 #define PD_ENTRY_NULL ((pt_entry_t *) 0)
119
120 #endif /* ASSEMBLER */
121
122 #ifdef PAE
123 #define NPGPTD 4
124 #define PDESHIFT 21
125 #define PTEMASK 0x1ff
126 #define PTEINDX 3
127 #else
128 #define NPGPTD 1
129 #define PDESHIFT 22
130 #define PTEMASK 0x3ff
131 #define PTEINDX 2
132 #endif
133 #define PTESHIFT 12
134
135 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
136 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
137
138 #define INTEL_OFFMASK (I386_PGBYTES - 1)
139 #define PG_FRAME (~((pmap_paddr_t)PAGE_MASK))
140 #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
141
142 #define NBPTD (NPGPTD << PAGE_SHIFT)
143 #define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
144 #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
145 #define NBPDE (1 << PDESHIFT)
146 #define PDEMASK (NBPDE - 1)
147
148 #define VM_WIMG_COPYBACK VM_MEM_COHERENT
149 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
150 /* ?? intel ?? */
151 #define VM_WIMG_IO (VM_MEM_COHERENT | \
152 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
153 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
154 /* write combining mode, aka store gather */
155 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
156
157 /*
158 * Size of Kernel address space. This is the number of page table pages
159 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
160 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
161 */
162 #ifndef KVA_PAGES
163 #define KVA_PAGES 256
164 #endif
165
166 /*
167 * Pte related macros
168 */
169 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
170
171 #ifndef NKPT
172 #ifdef PAE
173 #define NKPT 500 /* actual number of kernel page tables */
174 #else
175 #define NKPT 32 /* initial number of kernel page tables */
176 #endif
177 #endif
178 #ifndef NKPDE
179 #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
180 #endif
181
182 /*
183 * The *PTDI values control the layout of virtual memory
184 *
185 */
186 #ifdef PAE
187 #define KPTDI (0x600)/* start of kernel virtual pde's */
188 #define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */
189 #define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */
190 #define UMAXPTDI (0x5FC) /* ptd entry for user space end */
191 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
192 #else
193 #define KPTDI (0x300)/* start of kernel virtual pde's */
194 #define PTDPTDI (0x3FD) /* ptd entry that points to ptd! */
195 #define APTDPTDI (0x3FE) /* alt ptd entry that points to APTD */
196 #define UMAXPTDI (0x2FF) /* ptd entry for user space end */
197 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
198 #endif
199
200 #define KERNBASE VADDR(KPTDI,0)
201
202 /*
203 * Convert address offset to page descriptor index
204 */
205 #define pdenum(pmap, a) (((a) >> PDESHIFT) & PDEMASK)
206
207
208 /*
209 * Convert page descriptor index to user virtual address
210 */
211 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
212
213 /*
214 * Convert address offset to page table index
215 */
216 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
217
218 /*
219 * Hardware pte bit definitions (to be used directly on the ptes
220 * without using the bit fields).
221 */
222
223 #define INTEL_PTE_VALID 0x00000001
224 #define INTEL_PTE_WRITE 0x00000002
225 #define INTEL_PTE_RW 0x00000002
226 #define INTEL_PTE_USER 0x00000004
227 #define INTEL_PTE_WTHRU 0x00000008
228 #define INTEL_PTE_NCACHE 0x00000010
229 #define INTEL_PTE_REF 0x00000020
230 #define INTEL_PTE_MOD 0x00000040
231 #define INTEL_PTE_PS 0x00000080
232 #define INTEL_PTE_GLOBAL 0x00000100
233 #define INTEL_PTE_WIRED 0x00000200
234 #define INTEL_PTE_PFN /*0xFFFFF000*/ (~0xFFF)
235 #define INTEL_PTE_PTA 0x00000080
236
237 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */
238 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */
239 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
240
241 #define PMAP_DEFAULT_CACHE 0
242 #define PMAP_INHIBIT_CACHE 1
243 #define PMAP_GUARDED_CACHE 2
244 #define PMAP_ACTIVATE_CACHE 4
245 #define PMAP_NO_GUARD_CACHE 8
246
247
248 #ifndef ASSEMBLER
249
250 #include <sys/queue.h>
251
252 /*
253 * Address of current and alternate address space page table maps
254 * and directories.
255 */
256
257 extern pt_entry_t PTmap[], APTmap[], Upte;
258 extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde;
259
260 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
261 #ifdef PAE
262 extern pdpt_entry_t *IdlePDPT;
263 #endif
264
265 /*
266 * virtual address to page table entry and
267 * to physical address. Likewise for alternate address space.
268 * Note: these work recursively, thus vtopte of a pte will give
269 * the corresponding pde that in turn maps it.
270 */
271 #define vtopte(va) (PTmap + i386_btop(va))
272
273
274 typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
275 /* changed by other processors */
276 struct md_page {
277 int pv_list_count;
278 TAILQ_HEAD(,pv_entry) pv_list;
279 };
280
281 #include <vm/vm_page.h>
282
283 /*
284 * For each vm_page_t, there is a list of all currently
285 * valid virtual mappings of that page. An entry is
286 * a pv_entry_t; the list is the pv_table.
287 */
288
289 struct pmap {
290 #ifdef PMAP_QUEUE
291 queue_head_t pmap_link; /* unordered queue of in use pmaps */
292 #endif
293 pd_entry_t *dirbase; /* page directory pointer register */
294 pd_entry_t *pdirbase; /* phys. address of dirbase */
295 vm_object_t pm_obj; /* object to hold pte's */
296 int ref_count; /* reference count */
297 decl_simple_lock_data(,lock) /* lock on map */
298 struct pmap_statistics stats; /* map statistics */
299 cpu_set cpus_using; /* bitmap of cpus using pmap */
300 #ifdef PAE
301 vm_offset_t pm_hold; /* true pdpt zalloc addr */
302 pdpt_entry_t *pm_pdpt; /* KVA of pg dir ptr table */
303 vm_offset_t pm_ppdpt; /* phy addr pdpt
304 should really be 32/64 bit */
305 #endif
306 };
307
308 #define PMAP_NWINDOWS 4
309 typedef struct {
310 pt_entry_t *prv_CMAP;
311 caddr_t prv_CADDR;
312 } mapwindow_t;
313
314 typedef struct cpu_pmap {
315 mapwindow_t mapwindow[PMAP_NWINDOWS];
316 struct pmap *real_pmap;
317 struct pmap_update_list *update_list;
318 volatile boolean_t update_needed;
319 } cpu_pmap_t;
320
321 /*
322 * Should be rewritten in asm anyway.
323 */
324 #define CM1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CMAP)
325 #define CM2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CMAP)
326 #define CM3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CMAP)
327 #define CM4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CMAP)
328 #define CA1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CADDR)
329 #define CA2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CADDR)
330 #define CA3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CADDR)
331 #define CA4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CADDR)
332
333 typedef struct pmap_memory_regions {
334 ppnum_t base;
335 ppnum_t end;
336 ppnum_t alloc;
337 uint32_t type;
338 } pmap_memory_region_t;
339
340 unsigned pmap_memory_region_count;
341 unsigned pmap_memory_region_current;
342
343 #define PMAP_MEMORY_REGIONS_SIZE 32
344
345 extern pmap_memory_region_t pmap_memory_regions[];
346
347 /*
348 * Optimization avoiding some TLB flushes when switching to
349 * kernel-loaded threads. This is effective only for i386:
350 * Since user task, kernel task and kernel loaded tasks share the
351 * same virtual space (with appropriate protections), any pmap
352 * allows mapping kernel and kernel loaded tasks.
353 *
354 * The idea is to avoid switching to another pmap unnecessarily when
355 * switching to a kernel-loaded task, or when switching to the kernel
356 * itself.
357 *
358 * We store the pmap we are really using (from which we fetched the
359 * dirbase value) in current_cpu_datap()->cpu_pmap.real_pmap.
360 *
361 * Invariant:
362 * current_pmap() == current_cpu_datap()->cpu_pmap.real_pmap ||
363 * current_pmap() == kernel_pmap.
364 */
365 #define PMAP_REAL(my_cpu) (cpu_datap(my_cpu)->cpu_pmap->real_pmap)
366
367 #include <i386/proc_reg.h>
368 /*
369 * If switching to the kernel pmap, don't incur the TLB cost of switching
370 * to its page tables, since all maps include the kernel map as a subset.
371 * Simply record that this CPU is logically on the kernel pmap (see
372 * pmap_destroy).
373 *
374 * Similarly, if switching to a pmap (other than kernel_pmap that is already
375 * in use, don't do anything to the hardware, to avoid a TLB flush.
376 */
377
378 #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
379 #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
380
381 #ifdef PAE
382 #define PDIRBASE pm_ppdpt
383 #else
384 #define PDIRBASE pdirbase
385 #endif
386 #define set_dirbase(mypmap, my_cpu) { \
387 struct pmap **ppmap = &PMAP_REAL(my_cpu); \
388 pmap_paddr_t pdirbase = (pmap_paddr_t)((mypmap)->PDIRBASE); \
389 \
390 if (*ppmap == (pmap_paddr_t)NULL) { \
391 *ppmap = (mypmap); \
392 PMAP_CPU_SET((mypmap), my_cpu); \
393 set_cr3(pdirbase); \
394 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
395 if (*ppmap != kernel_pmap) \
396 PMAP_CPU_CLR(*ppmap, my_cpu); \
397 *ppmap = (mypmap); \
398 PMAP_CPU_SET((mypmap), my_cpu); \
399 set_cr3(pdirbase); \
400 } \
401 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
402 }
403
404 /*
405 * List of cpus that are actively using mapped memory. Any
406 * pmap update operation must wait for all cpus in this list.
407 * Update operations must still be queued to cpus not in this
408 * list.
409 */
410 extern cpu_set cpus_active;
411
412 /*
413 * List of cpus that are idle, but still operating, and will want
414 * to see any kernel pmap updates when they become active.
415 */
416 extern cpu_set cpus_idle;
417
418
419 #define cpu_update_needed(cpu) cpu_datap(cpu)->cpu_pmap->update_needed
420 #define cpu_update_list(cpu) cpu_datap(cpu)->cpu_pmap->update_list
421
422 /*
423 * External declarations for PMAP_ACTIVATE.
424 */
425
426 extern void process_pmap_updates(struct pmap *pmap);
427 extern void pmap_update_interrupt(void);
428
429 /*
430 * Machine dependent routines that are used only for i386/i486/i860.
431 */
432
433 extern vm_offset_t (kvtophys)(
434 vm_offset_t addr);
435
436 extern pt_entry_t *pmap_pte(
437 struct pmap *pmap,
438 vm_offset_t addr);
439
440 extern vm_offset_t pmap_map(
441 vm_offset_t virt,
442 vm_offset_t start,
443 vm_offset_t end,
444 vm_prot_t prot);
445
446 extern vm_offset_t pmap_map_bd(
447 vm_offset_t virt,
448 vm_offset_t start,
449 vm_offset_t end,
450 vm_prot_t prot);
451
452 extern void pmap_bootstrap(
453 vm_offset_t load_start);
454
455 extern boolean_t pmap_valid_page(
456 ppnum_t pn);
457
458 extern int pmap_list_resident_pages(
459 struct pmap *pmap,
460 vm_offset_t *listp,
461 int space);
462
463 extern void pmap_commpage_init(
464 vm_offset_t kernel,
465 vm_offset_t user,
466 int count);
467 extern struct cpu_pmap *pmap_cpu_alloc(
468 boolean_t is_boot_cpu);
469 extern void pmap_cpu_free(
470 struct cpu_pmap *cp);
471
472 extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
473 extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
474 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
475 extern void pmap_sync_page_data_phys(ppnum_t pa);
476 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
477
478 /*
479 * Macros for speed.
480 */
481
482
483 #include <kern/spl.h>
484
485 #if defined(PMAP_ACTIVATE_KERNEL)
486 #undef PMAP_ACTIVATE_KERNEL
487 #undef PMAP_DEACTIVATE_KERNEL
488 #undef PMAP_ACTIVATE_USER
489 #undef PMAP_DEACTIVATE_USER
490 #endif
491
492 /*
493 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
494 * fields to control TLB invalidation on other CPUS.
495 */
496
497 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
498 \
499 /* \
500 * Let pmap updates proceed while we wait for this pmap. \
501 */ \
502 i_bit_clear((my_cpu), &cpus_active); \
503 \
504 /* \
505 * Lock the pmap to put this cpu in its active set. \
506 * Wait for updates here. \
507 */ \
508 simple_lock(&kernel_pmap->lock); \
509 \
510 /* \
511 * Process invalidate requests for the kernel pmap. \
512 */ \
513 if (cpu_update_needed(my_cpu)) \
514 process_pmap_updates(kernel_pmap); \
515 \
516 /* \
517 * Mark that this cpu is using the pmap. \
518 */ \
519 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
520 \
521 /* \
522 * Mark this cpu active - IPL will be lowered by \
523 * load_context(). \
524 */ \
525 i_bit_set((my_cpu), &cpus_active); \
526 \
527 simple_unlock(&kernel_pmap->lock); \
528 }
529
530 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
531 /* \
532 * Mark pmap no longer in use by this cpu even if \
533 * pmap is locked against updates. \
534 */ \
535 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
536 i_bit_clear((my_cpu), &cpus_active); \
537 PMAP_REAL(my_cpu) = NULL; \
538 }
539
540 #define PMAP_ACTIVATE_MAP(map, my_cpu) { \
541 register pmap_t tpmap; \
542 \
543 tpmap = vm_map_pmap(map); \
544 if (tpmap == kernel_pmap) { \
545 /* \
546 * If this is the kernel pmap, switch to its page tables. \
547 */ \
548 set_dirbase(kernel_pmap, my_cpu); \
549 } \
550 else { \
551 /* \
552 * Let pmap updates proceed while we wait for this pmap. \
553 */ \
554 i_bit_clear((my_cpu), &cpus_active); \
555 \
556 /* \
557 * Lock the pmap to put this cpu in its active set. \
558 * Wait for updates here. \
559 */ \
560 simple_lock(&tpmap->lock); \
561 \
562 /* \
563 * No need to invalidate the TLB - the entire user pmap \
564 * will be invalidated by reloading dirbase. \
565 */ \
566 set_dirbase(tpmap, my_cpu); \
567 \
568 /* \
569 * Mark this cpu active - IPL will be lowered by \
570 * load_context(). \
571 */ \
572 i_bit_set((my_cpu), &cpus_active); \
573 \
574 simple_unlock(&tpmap->lock); \
575 } \
576 }
577
578 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
579
580 #define PMAP_ACTIVATE_USER(th, my_cpu) { \
581 spl_t spl; \
582 \
583 spl = splhigh(); \
584 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
585 splx(spl); \
586 }
587
588 #define PMAP_DEACTIVATE_USER(th, my_cpu)
589
590 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
591 spl_t spl; \
592 \
593 if (old_th->map != new_th->map) { \
594 spl = splhigh(); \
595 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
596 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
597 splx(spl); \
598 } \
599 }
600
601 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
602 spl_t spl; \
603 \
604 spl = splhigh(); \
605 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
606 th->map = new_map; \
607 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
608 splx(spl); \
609 }
610
611 #define MARK_CPU_IDLE(my_cpu) { \
612 /* \
613 * Mark this cpu idle, and remove it from the active set, \
614 * since it is not actively using any pmap. Signal_cpus \
615 * will notice that it is idle, and avoid signaling it, \
616 * but will queue the update request for when the cpu \
617 * becomes active. \
618 */ \
619 int s = splhigh(); \
620 i_bit_set((my_cpu), &cpus_idle); \
621 i_bit_clear((my_cpu), &cpus_active); \
622 splx(s); \
623 set_led(my_cpu); \
624 }
625
626 #define MARK_CPU_ACTIVE(my_cpu) { \
627 \
628 int s = splhigh(); \
629 /* \
630 * If a kernel_pmap update was requested while this cpu \
631 * was idle, process it as if we got the interrupt. \
632 * Before doing so, remove this cpu from the idle set. \
633 * Since we do not grab any pmap locks while we flush \
634 * our TLB, another cpu may start an update operation \
635 * before we finish. Removing this cpu from the idle \
636 * set assures that we will receive another update \
637 * interrupt if this happens. \
638 */ \
639 i_bit_clear((my_cpu), &cpus_idle); \
640 \
641 if (cpu_update_needed(my_cpu)) \
642 pmap_update_interrupt(); \
643 \
644 /* \
645 * Mark that this cpu is now active. \
646 */ \
647 i_bit_set((my_cpu), &cpus_active); \
648 splx(s); \
649 clear_led(my_cpu); \
650 }
651
652 #define PMAP_CONTEXT(pmap, thread)
653
654 #define pmap_kernel_va(VA) \
655 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
656
657 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
658 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
659 #define pmap_attribute(pmap,addr,size,attr,value) \
660 (KERN_INVALID_ADDRESS)
661 #define pmap_attribute_cache_sync(addr,size,attr,value) \
662 (KERN_INVALID_ADDRESS)
663
664 #endif /* ASSEMBLER */
665
666 #endif /* _PMAP_MACHINE_ */