]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap.h
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53
54 /*
55 * File: pmap.h
56 *
57 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
58 * Date: 1985
59 *
60 * Machine-dependent structures for the physical map module.
61 */
62
63 #ifndef _PMAP_MACHINE_
64 #define _PMAP_MACHINE_ 1
65
66 #ifndef ASSEMBLER
67
68 #include <platforms.h>
69
70 #include <mach/kern_return.h>
71 #include <mach/machine/vm_types.h>
72 #include <mach/vm_prot.h>
73 #include <mach/vm_statistics.h>
74 #include <mach/machine/vm_param.h>
75 #include <kern/kern_types.h>
76 #include <kern/thread.h>
77 #include <kern/lock.h>
78 #define PMAP_QUEUE 1
79 #ifdef PMAP_QUEUE
80 #include <kern/queue.h>
81 #endif
82
83 /*
84 * Define the generic in terms of the specific
85 */
86
87 #define INTEL_PGBYTES I386_PGBYTES
88 #define INTEL_PGSHIFT I386_PGSHIFT
89 #define intel_btop(x) i386_btop(x)
90 #define intel_ptob(x) i386_ptob(x)
91 #define intel_round_page(x) i386_round_page(x)
92 #define intel_trunc_page(x) i386_trunc_page(x)
93 #define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
94 #define round_intel_to_vm(x) round_i386_to_vm(x)
95 #define vm_to_intel(x) vm_to_i386(x)
96
97 /*
98 * i386/i486/i860 Page Table Entry
99 */
100
101 #ifdef PAE
102 typedef uint64_t pdpt_entry_t;
103 typedef uint64_t pt_entry_t;
104 typedef uint64_t pd_entry_t;
105 typedef uint64_t pmap_paddr_t;
106 #else
107 typedef uint32_t pt_entry_t;
108 typedef uint32_t pd_entry_t;
109 typedef uint32_t pmap_paddr_t;
110 #endif
111
112 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
113 #define PD_ENTRY_NULL ((pt_entry_t *) 0)
114
115 #endif /* ASSEMBLER */
116
117 #ifdef PAE
118 #define NPGPTD 4
119 #define PDESHIFT 21
120 #define PTEMASK 0x1ff
121 #define PTEINDX 3
122 #else
123 #define NPGPTD 1
124 #define PDESHIFT 22
125 #define PTEMASK 0x3ff
126 #define PTEINDX 2
127 #endif
128 #define PTESHIFT 12
129
130 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
131 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
132
133 #define INTEL_OFFMASK (I386_PGBYTES - 1)
134 #define PG_FRAME (~((pmap_paddr_t)PAGE_MASK))
135 #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
136
137 #define NBPTD (NPGPTD << PAGE_SHIFT)
138 #define NPDEPTD (NBPTD / (sizeof (pd_entry_t)))
139 #define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
140 #define NBPDE (1 << PDESHIFT)
141 #define PDEMASK (NBPDE - 1)
142
143 #define VM_WIMG_COPYBACK VM_MEM_COHERENT
144 #define VM_WIMG_DEFAULT VM_MEM_COHERENT
145 /* ?? intel ?? */
146 #define VM_WIMG_IO (VM_MEM_COHERENT | \
147 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
148 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
149 /* write combining mode, aka store gather */
150 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
151
152 /*
153 * Size of Kernel address space. This is the number of page table pages
154 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
155 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
156 */
157 #ifndef KVA_PAGES
158 #define KVA_PAGES 256
159 #endif
160
161 /*
162 * Pte related macros
163 */
164 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDESHIFT)|((pti)<<PTESHIFT)))
165
166 #ifndef NKPT
167 #ifdef PAE
168 #define NKPT 500 /* actual number of kernel page tables */
169 #else
170 #define NKPT 32 /* initial number of kernel page tables */
171 #endif
172 #endif
173 #ifndef NKPDE
174 #define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */
175 #endif
176
177 /*
178 * The *PTDI values control the layout of virtual memory
179 *
180 */
181 #ifdef PAE
182 #define KPTDI (0x600)/* start of kernel virtual pde's */
183 #define PTDPTDI (0x7F4) /* ptd entry that points to ptd! */
184 #define APTDPTDI (0x7F8) /* alt ptd entry that points to APTD */
185 #define UMAXPTDI (0x5FC) /* ptd entry for user space end */
186 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
187 #else
188 #define KPTDI (0x300)/* start of kernel virtual pde's */
189 #define PTDPTDI (0x3FD) /* ptd entry that points to ptd! */
190 #define APTDPTDI (0x3FE) /* alt ptd entry that points to APTD */
191 #define UMAXPTDI (0x2FF) /* ptd entry for user space end */
192 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
193 #endif
194
195 #define KERNBASE VADDR(KPTDI,0)
196
197 /*
198 * Convert address offset to page descriptor index
199 */
200 #define pdenum(pmap, a) (((a) >> PDESHIFT) & PDEMASK)
201
202
203 /*
204 * Convert page descriptor index to user virtual address
205 */
206 #define pdetova(a) ((vm_offset_t)(a) << PDESHIFT)
207
208 /*
209 * Convert address offset to page table index
210 */
211 #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
212
213 /*
214 * Hardware pte bit definitions (to be used directly on the ptes
215 * without using the bit fields).
216 */
217
218 #define INTEL_PTE_VALID 0x00000001
219 #define INTEL_PTE_WRITE 0x00000002
220 #define INTEL_PTE_RW 0x00000002
221 #define INTEL_PTE_USER 0x00000004
222 #define INTEL_PTE_WTHRU 0x00000008
223 #define INTEL_PTE_NCACHE 0x00000010
224 #define INTEL_PTE_REF 0x00000020
225 #define INTEL_PTE_MOD 0x00000040
226 #define INTEL_PTE_PS 0x00000080
227 #define INTEL_PTE_GLOBAL 0x00000100
228 #define INTEL_PTE_WIRED 0x00000200
229 #define INTEL_PTE_PFN /*0xFFFFF000*/ (~0xFFF)
230 #define INTEL_PTE_PTA 0x00000080
231
232 #define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */
233 #define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */
234 #define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
235
236 #define PMAP_DEFAULT_CACHE 0
237 #define PMAP_INHIBIT_CACHE 1
238 #define PMAP_GUARDED_CACHE 2
239 #define PMAP_ACTIVATE_CACHE 4
240 #define PMAP_NO_GUARD_CACHE 8
241
242
243 #ifndef ASSEMBLER
244
245 #include <sys/queue.h>
246
247 /*
248 * Address of current and alternate address space page table maps
249 * and directories.
250 */
251
252 extern pt_entry_t PTmap[], APTmap[], Upte;
253 extern pd_entry_t PTD[], APTD[], PTDpde[], APTDpde[], Upde;
254
255 extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
256 #ifdef PAE
257 extern pdpt_entry_t *IdlePDPT;
258 #endif
259
260 /*
261 * virtual address to page table entry and
262 * to physical address. Likewise for alternate address space.
263 * Note: these work recursively, thus vtopte of a pte will give
264 * the corresponding pde that in turn maps it.
265 */
266 #define vtopte(va) (PTmap + i386_btop(va))
267
268
269 typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
270 /* changed by other processors */
271 struct md_page {
272 int pv_list_count;
273 TAILQ_HEAD(,pv_entry) pv_list;
274 };
275
276 #include <vm/vm_page.h>
277
278 /*
279 * For each vm_page_t, there is a list of all currently
280 * valid virtual mappings of that page. An entry is
281 * a pv_entry_t; the list is the pv_table.
282 */
283
284 struct pmap {
285 #ifdef PMAP_QUEUE
286 queue_head_t pmap_link; /* unordered queue of in use pmaps */
287 #endif
288 pd_entry_t *dirbase; /* page directory pointer register */
289 pd_entry_t *pdirbase; /* phys. address of dirbase */
290 vm_object_t pm_obj; /* object to hold pte's */
291 int ref_count; /* reference count */
292 decl_simple_lock_data(,lock) /* lock on map */
293 struct pmap_statistics stats; /* map statistics */
294 cpu_set cpus_using; /* bitmap of cpus using pmap */
295 #ifdef PAE
296 vm_offset_t pm_hold; /* true pdpt zalloc addr */
297 pdpt_entry_t *pm_pdpt; /* KVA of pg dir ptr table */
298 vm_offset_t pm_ppdpt; /* phy addr pdpt
299 should really be 32/64 bit */
300 #endif
301 };
302
303 #define PMAP_NWINDOWS 4
304 typedef struct {
305 pt_entry_t *prv_CMAP;
306 caddr_t prv_CADDR;
307 } mapwindow_t;
308
309 typedef struct cpu_pmap {
310 mapwindow_t mapwindow[PMAP_NWINDOWS];
311 struct pmap *real_pmap;
312 struct pmap_update_list *update_list;
313 volatile boolean_t update_needed;
314 } cpu_pmap_t;
315
316 /*
317 * Should be rewritten in asm anyway.
318 */
319 #define CM1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CMAP)
320 #define CM2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CMAP)
321 #define CM3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CMAP)
322 #define CM4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CMAP)
323 #define CA1 (current_cpu_datap()->cpu_pmap->mapwindow[0].prv_CADDR)
324 #define CA2 (current_cpu_datap()->cpu_pmap->mapwindow[1].prv_CADDR)
325 #define CA3 (current_cpu_datap()->cpu_pmap->mapwindow[2].prv_CADDR)
326 #define CA4 (current_cpu_datap()->cpu_pmap->mapwindow[3].prv_CADDR)
327
328 typedef struct pmap_memory_regions {
329 ppnum_t base;
330 ppnum_t end;
331 ppnum_t alloc;
332 uint32_t type;
333 } pmap_memory_region_t;
334
335 unsigned pmap_memory_region_count;
336 unsigned pmap_memory_region_current;
337
338 #define PMAP_MEMORY_REGIONS_SIZE 32
339
340 extern pmap_memory_region_t pmap_memory_regions[];
341
342 /*
343 * Optimization avoiding some TLB flushes when switching to
344 * kernel-loaded threads. This is effective only for i386:
345 * Since user task, kernel task and kernel loaded tasks share the
346 * same virtual space (with appropriate protections), any pmap
347 * allows mapping kernel and kernel loaded tasks.
348 *
349 * The idea is to avoid switching to another pmap unnecessarily when
350 * switching to a kernel-loaded task, or when switching to the kernel
351 * itself.
352 *
353 * We store the pmap we are really using (from which we fetched the
354 * dirbase value) in current_cpu_datap()->cpu_pmap.real_pmap.
355 *
356 * Invariant:
357 * current_pmap() == current_cpu_datap()->cpu_pmap.real_pmap ||
358 * current_pmap() == kernel_pmap.
359 */
360 #define PMAP_REAL(my_cpu) (cpu_datap(my_cpu)->cpu_pmap->real_pmap)
361
362 #include <i386/proc_reg.h>
363 /*
364 * If switching to the kernel pmap, don't incur the TLB cost of switching
365 * to its page tables, since all maps include the kernel map as a subset.
366 * Simply record that this CPU is logically on the kernel pmap (see
367 * pmap_destroy).
368 *
369 * Similarly, if switching to a pmap (other than kernel_pmap that is already
370 * in use, don't do anything to the hardware, to avoid a TLB flush.
371 */
372
373 #define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using))
374 #define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using))
375
376 #ifdef PAE
377 #define PDIRBASE pm_ppdpt
378 #else
379 #define PDIRBASE pdirbase
380 #endif
381 #define set_dirbase(mypmap, my_cpu) { \
382 struct pmap **ppmap = &PMAP_REAL(my_cpu); \
383 pmap_paddr_t pdirbase = (pmap_paddr_t)((mypmap)->PDIRBASE); \
384 \
385 if (*ppmap == (pmap_paddr_t)NULL) { \
386 *ppmap = (mypmap); \
387 PMAP_CPU_SET((mypmap), my_cpu); \
388 set_cr3(pdirbase); \
389 } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \
390 if (*ppmap != kernel_pmap) \
391 PMAP_CPU_CLR(*ppmap, my_cpu); \
392 *ppmap = (mypmap); \
393 PMAP_CPU_SET((mypmap), my_cpu); \
394 set_cr3(pdirbase); \
395 } \
396 assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \
397 }
398
399 /*
400 * List of cpus that are actively using mapped memory. Any
401 * pmap update operation must wait for all cpus in this list.
402 * Update operations must still be queued to cpus not in this
403 * list.
404 */
405 extern cpu_set cpus_active;
406
407 /*
408 * List of cpus that are idle, but still operating, and will want
409 * to see any kernel pmap updates when they become active.
410 */
411 extern cpu_set cpus_idle;
412
413
414 #define cpu_update_needed(cpu) cpu_datap(cpu)->cpu_pmap->update_needed
415 #define cpu_update_list(cpu) cpu_datap(cpu)->cpu_pmap->update_list
416
417 /*
418 * External declarations for PMAP_ACTIVATE.
419 */
420
421 extern void process_pmap_updates(struct pmap *pmap);
422 extern void pmap_update_interrupt(void);
423
424 /*
425 * Machine dependent routines that are used only for i386/i486/i860.
426 */
427
428 extern vm_offset_t (kvtophys)(
429 vm_offset_t addr);
430
431 extern pt_entry_t *pmap_pte(
432 struct pmap *pmap,
433 vm_offset_t addr);
434
435 extern vm_offset_t pmap_map(
436 vm_offset_t virt,
437 vm_offset_t start,
438 vm_offset_t end,
439 vm_prot_t prot);
440
441 extern vm_offset_t pmap_map_bd(
442 vm_offset_t virt,
443 vm_offset_t start,
444 vm_offset_t end,
445 vm_prot_t prot);
446
447 extern void pmap_bootstrap(
448 vm_offset_t load_start);
449
450 extern boolean_t pmap_valid_page(
451 ppnum_t pn);
452
453 extern int pmap_list_resident_pages(
454 struct pmap *pmap,
455 vm_offset_t *listp,
456 int space);
457
458 extern void pmap_commpage_init(
459 vm_offset_t kernel,
460 vm_offset_t user,
461 int count);
462 extern struct cpu_pmap *pmap_cpu_alloc(
463 boolean_t is_boot_cpu);
464 extern void pmap_cpu_free(
465 struct cpu_pmap *cp);
466
467 extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys);
468 extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
469 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
470 extern void pmap_sync_page_data_phys(ppnum_t pa);
471 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
472
473 /*
474 * Macros for speed.
475 */
476
477
478 #include <kern/spl.h>
479
480 #if defined(PMAP_ACTIVATE_KERNEL)
481 #undef PMAP_ACTIVATE_KERNEL
482 #undef PMAP_DEACTIVATE_KERNEL
483 #undef PMAP_ACTIVATE_USER
484 #undef PMAP_DEACTIVATE_USER
485 #endif
486
487 /*
488 * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
489 * fields to control TLB invalidation on other CPUS.
490 */
491
492 #define PMAP_ACTIVATE_KERNEL(my_cpu) { \
493 \
494 /* \
495 * Let pmap updates proceed while we wait for this pmap. \
496 */ \
497 i_bit_clear((my_cpu), &cpus_active); \
498 \
499 /* \
500 * Lock the pmap to put this cpu in its active set. \
501 * Wait for updates here. \
502 */ \
503 simple_lock(&kernel_pmap->lock); \
504 \
505 /* \
506 * Process invalidate requests for the kernel pmap. \
507 */ \
508 if (cpu_update_needed(my_cpu)) \
509 process_pmap_updates(kernel_pmap); \
510 \
511 /* \
512 * Mark that this cpu is using the pmap. \
513 */ \
514 i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
515 \
516 /* \
517 * Mark this cpu active - IPL will be lowered by \
518 * load_context(). \
519 */ \
520 i_bit_set((my_cpu), &cpus_active); \
521 \
522 simple_unlock(&kernel_pmap->lock); \
523 }
524
525 #define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
526 /* \
527 * Mark pmap no longer in use by this cpu even if \
528 * pmap is locked against updates. \
529 */ \
530 i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
531 i_bit_clear((my_cpu), &cpus_active); \
532 PMAP_REAL(my_cpu) = NULL; \
533 }
534
535 #define PMAP_ACTIVATE_MAP(map, my_cpu) { \
536 register pmap_t tpmap; \
537 \
538 tpmap = vm_map_pmap(map); \
539 if (tpmap == kernel_pmap) { \
540 /* \
541 * If this is the kernel pmap, switch to its page tables. \
542 */ \
543 set_dirbase(kernel_pmap, my_cpu); \
544 } \
545 else { \
546 /* \
547 * Let pmap updates proceed while we wait for this pmap. \
548 */ \
549 i_bit_clear((my_cpu), &cpus_active); \
550 \
551 /* \
552 * Lock the pmap to put this cpu in its active set. \
553 * Wait for updates here. \
554 */ \
555 simple_lock(&tpmap->lock); \
556 \
557 /* \
558 * No need to invalidate the TLB - the entire user pmap \
559 * will be invalidated by reloading dirbase. \
560 */ \
561 set_dirbase(tpmap, my_cpu); \
562 \
563 /* \
564 * Mark this cpu active - IPL will be lowered by \
565 * load_context(). \
566 */ \
567 i_bit_set((my_cpu), &cpus_active); \
568 \
569 simple_unlock(&tpmap->lock); \
570 } \
571 }
572
573 #define PMAP_DEACTIVATE_MAP(map, my_cpu)
574
575 #define PMAP_ACTIVATE_USER(th, my_cpu) { \
576 spl_t spl; \
577 \
578 spl = splhigh(); \
579 PMAP_ACTIVATE_MAP(th->map, my_cpu) \
580 splx(spl); \
581 }
582
583 #define PMAP_DEACTIVATE_USER(th, my_cpu)
584
585 #define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \
586 spl_t spl; \
587 \
588 if (old_th->map != new_th->map) { \
589 spl = splhigh(); \
590 PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \
591 PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \
592 splx(spl); \
593 } \
594 }
595
596 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \
597 spl_t spl; \
598 \
599 spl = splhigh(); \
600 PMAP_DEACTIVATE_MAP(th->map, my_cpu); \
601 th->map = new_map; \
602 PMAP_ACTIVATE_MAP(th->map, my_cpu); \
603 splx(spl); \
604 }
605
606 #define MARK_CPU_IDLE(my_cpu) { \
607 /* \
608 * Mark this cpu idle, and remove it from the active set, \
609 * since it is not actively using any pmap. Signal_cpus \
610 * will notice that it is idle, and avoid signaling it, \
611 * but will queue the update request for when the cpu \
612 * becomes active. \
613 */ \
614 int s = splhigh(); \
615 i_bit_set((my_cpu), &cpus_idle); \
616 i_bit_clear((my_cpu), &cpus_active); \
617 splx(s); \
618 set_led(my_cpu); \
619 }
620
621 #define MARK_CPU_ACTIVE(my_cpu) { \
622 \
623 int s = splhigh(); \
624 /* \
625 * If a kernel_pmap update was requested while this cpu \
626 * was idle, process it as if we got the interrupt. \
627 * Before doing so, remove this cpu from the idle set. \
628 * Since we do not grab any pmap locks while we flush \
629 * our TLB, another cpu may start an update operation \
630 * before we finish. Removing this cpu from the idle \
631 * set assures that we will receive another update \
632 * interrupt if this happens. \
633 */ \
634 i_bit_clear((my_cpu), &cpus_idle); \
635 \
636 if (cpu_update_needed(my_cpu)) \
637 pmap_update_interrupt(); \
638 \
639 /* \
640 * Mark that this cpu is now active. \
641 */ \
642 i_bit_set((my_cpu), &cpus_active); \
643 splx(s); \
644 clear_led(my_cpu); \
645 }
646
647 #define PMAP_CONTEXT(pmap, thread)
648
649 #define pmap_kernel_va(VA) \
650 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
651
652 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
653 #define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
654 #define pmap_attribute(pmap,addr,size,attr,value) \
655 (KERN_INVALID_ADDRESS)
656 #define pmap_attribute_cache_sync(addr,size,attr,value) \
657 (KERN_INVALID_ADDRESS)
658
659 #endif /* ASSEMBLER */
660
661 #endif /* _PMAP_MACHINE_ */