2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * (These guys wrote the Vax version)
58 * Physical Map management code for Intel i386, i486, and i860.
60 * Manages physical address maps.
62 * In addition to hardware address maps, this
63 * module is called upon to provide software-use-only
64 * maps which may or may not be stored in the same
65 * form as hardware maps. These pseudo-maps are
66 * used to store intermediate results from copy
67 * operations to and from address spaces.
69 * Since the information managed by this module is
70 * also stored by the logical address mapping module,
71 * this module may throw away valid virtual-to-physical
72 * mappings at almost any time. However, invalidations
73 * of virtual-to-physical mappings must be done as
76 * In order to cope with hardware architectures which
77 * make virtual-to-physical map invalidates expensive,
78 * this module may delay invalidate or reduced protection
79 * operations until such time as they are actually
80 * necessary. This module is given full information as
81 * to which processors are currently using which maps,
82 * and to when physical maps must be made correct.
90 #include <mach_ldebug.h>
92 #include <mach/machine/vm_types.h>
94 #include <mach/boolean.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
98 #include <kern/lock.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_kern.h>
104 #include <mach/vm_param.h>
105 #include <mach/vm_prot.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
109 #include <mach/machine/vm_param.h>
110 #include <machine/thread.h>
112 #include <kern/misc_protos.h> /* prototyping */
113 #include <i386/misc_protos.h>
115 #include <i386/cpuid.h>
118 #include <ddb/db_command.h>
119 #include <ddb/db_output.h>
120 #include <ddb/db_sym.h>
121 #include <ddb/db_print.h>
122 #endif /* MACH_KDB */
124 #include <kern/xpr.h>
127 #include <i386/AT386/mp/mp_events.h>
131 * Forward declarations for internal functions.
137 extern void pmap_remove_range(
143 void phys_attribute_clear(
147 boolean_t
phys_attribute_test(
151 void pmap_set_modify(vm_offset_t phys
);
153 void phys_attribute_set(
159 void set_dirbase(vm_offset_t dirbase
);
160 #endif /* set_dirbase */
162 #define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS))
163 #define iswired(pte) ((pte) & INTEL_PTE_WIRED)
165 pmap_t real_pmap
[NCPUS
];
167 #define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
168 #define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry);
171 * Private data structures.
175 * For each vm_page_t, there is a list of all currently
176 * valid virtual mappings of that page. An entry is
177 * a pv_entry_t; the list is the pv_table.
180 typedef struct pv_entry
{
181 struct pv_entry
*next
; /* next pv_entry */
182 pmap_t pmap
; /* pmap where mapping lies */
183 vm_offset_t va
; /* virtual address for mapping */
186 #define PV_ENTRY_NULL ((pv_entry_t) 0)
188 pv_entry_t pv_head_table
; /* array of entries, one per page */
191 * pv_list entries are kept on a list that can only be accessed
192 * with the pmap system locked (at SPLVM, not in the cpus_active set).
193 * The list is refilled from the pv_list_zone if it becomes empty.
195 pv_entry_t pv_free_list
; /* free list at SPLVM */
196 decl_simple_lock_data(,pv_free_list_lock
)
198 #define PV_ALLOC(pv_e) { \
199 simple_lock(&pv_free_list_lock); \
200 if ((pv_e = pv_free_list) != 0) { \
201 pv_free_list = pv_e->next; \
203 simple_unlock(&pv_free_list_lock); \
206 #define PV_FREE(pv_e) { \
207 simple_lock(&pv_free_list_lock); \
208 pv_e->next = pv_free_list; \
209 pv_free_list = pv_e; \
210 simple_unlock(&pv_free_list_lock); \
213 zone_t pv_list_zone
; /* zone of pv_entry structures */
216 * Each entry in the pv_head_table is locked by a bit in the
217 * pv_lock_table. The lock bits are accessed by the physical
218 * address of the page they lock.
221 char *pv_lock_table
; /* pointer to array of bits */
222 #define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
225 * First and last physical addresses that we maintain any information
226 * for. Initialized to zero so that pmap operations done before
227 * pmap_init won't touch any non-existent structures.
229 vm_offset_t vm_first_phys
= (vm_offset_t
) 0;
230 vm_offset_t vm_last_phys
= (vm_offset_t
) 0;
231 boolean_t pmap_initialized
= FALSE
;/* Has pmap_init completed? */
234 * Index into pv_head table, its lock bits, and the modify/reference
235 * bits starting at vm_first_phys.
238 #define pa_index(pa) (atop(pa - vm_first_phys))
240 #define pai_to_pvh(pai) (&pv_head_table[pai])
241 #define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
242 #define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
245 * Array of physical page attribites for managed pages.
246 * One byte per physical page.
248 char *pmap_phys_attributes
;
251 * Physical page attributes. Copy bits from PTE definition.
253 #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
254 #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
257 * Amount of virtual memory mapped by one
258 * page-directory entry.
260 #define PDE_MAPPED_SIZE (pdetova(1))
263 * We allocate page table pages directly from the VM system
264 * through this object. It maps physical memory.
266 vm_object_t pmap_object
= VM_OBJECT_NULL
;
269 * Locking and TLB invalidation
275 * There are two structures in the pmap module that need locking:
276 * the pmaps themselves, and the per-page pv_lists (which are locked
277 * by locking the pv_lock_table entry that corresponds to the pv_head
278 * for the list in question.) Most routines want to lock a pmap and
279 * then do operations in it that require pv_list locking -- however
280 * pmap_remove_all and pmap_copy_on_write operate on a physical page
281 * basis and want to do the locking in the reverse order, i.e. lock
282 * a pv_list and then go through all the pmaps referenced by that list.
283 * To protect against deadlock between these two cases, the pmap_lock
284 * is used. There are three different locking protocols as a result:
286 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
289 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
290 * lock on the pmap_lock (shared read), then lock the pmap
291 * and finally the pv_lists as needed [i.e. pmap lock before
294 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
295 * Get a write lock on the pmap_lock (exclusive write); this
296 * also guaranteees exclusive access to the pv_lists. Lock the
299 * At no time may any routine hold more than one pmap lock or more than
300 * one pv_list lock. Because interrupt level routines can allocate
301 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
302 * kernel_pmap can only be held at splhigh.
307 * We raise the interrupt level to splhigh, to block interprocessor
308 * interrupts during pmap operations. We must take the CPU out of
309 * the cpus_active set while interrupts are blocked.
311 #define SPLVM(spl) { \
313 mp_disable_preemption(); \
314 i_bit_clear(cpu_number(), &cpus_active); \
315 mp_enable_preemption(); \
318 #define SPLX(spl) { \
319 mp_disable_preemption(); \
320 i_bit_set(cpu_number(), &cpus_active); \
321 mp_enable_preemption(); \
326 * Lock on pmap system
328 lock_t pmap_system_lock
;
330 #define PMAP_READ_LOCK(pmap, spl) { \
332 lock_read(&pmap_system_lock); \
333 simple_lock(&(pmap)->lock); \
336 #define PMAP_WRITE_LOCK(spl) { \
338 lock_write(&pmap_system_lock); \
341 #define PMAP_READ_UNLOCK(pmap, spl) { \
342 simple_unlock(&(pmap)->lock); \
343 lock_read_done(&pmap_system_lock); \
347 #define PMAP_WRITE_UNLOCK(spl) { \
348 lock_write_done(&pmap_system_lock); \
352 #define PMAP_WRITE_TO_READ_LOCK(pmap) { \
353 simple_lock(&(pmap)->lock); \
354 lock_write_to_read(&pmap_system_lock); \
357 #define LOCK_PVH(index) lock_pvh_pai(index)
359 #define UNLOCK_PVH(index) unlock_pvh_pai(index)
361 #define PMAP_FLUSH_TLBS() \
364 i386_signal_cpus(MP_TLB_FLUSH); \
367 #define PMAP_RELOAD_TLBS() { \
368 i386_signal_cpus(MP_TLB_RELOAD); \
369 set_cr3(kernel_pmap->pdirbase); \
372 #define PMAP_INVALIDATE_PAGE(map, addr) { \
373 if (map == kernel_pmap) \
374 invlpg((vm_offset_t) addr); \
377 i386_signal_cpus(MP_TLB_FLUSH); \
380 #else /* NCPUS > 1 */
383 #define SPLVM(spl) { (spl) = splhigh(); }
384 #define SPLX(spl) splx (spl)
390 #define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
391 #define PMAP_WRITE_LOCK(spl) SPLVM(spl)
392 #define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
393 #define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
394 #define PMAP_WRITE_TO_READ_LOCK(pmap)
397 #define LOCK_PVH(index) disable_preemption()
398 #define UNLOCK_PVH(index) enable_preemption()
400 #define LOCK_PVH(index)
401 #define UNLOCK_PVH(index)
404 #define PMAP_FLUSH_TLBS() flush_tlb()
405 #define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase)
406 #define PMAP_INVALIDATE_PAGE(map, addr) { \
407 if (map == kernel_pmap) \
408 invlpg((vm_offset_t) addr); \
413 #endif /* NCPUS > 1 */
415 #define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
419 * Structures to keep track of pending TLB invalidations
423 volatile boolean_t cpu_update_needed
[NCPUS
];
426 #endif /* NCPUS > 1 */
429 * Other useful macros.
431 #define current_pmap() (vm_map_pmap(current_act()->map))
432 #define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
434 struct pmap kernel_pmap_store
;
437 struct zone
*pmap_zone
; /* zone of pmap structures */
439 int pmap_debug
= 0; /* flag for debugging prints */
440 int ptes_per_vm_page
; /* number of hardware ptes needed
441 to map one VM page. */
442 unsigned int inuse_ptepages_count
= 0; /* debugging */
445 * Pmap cache. Cache is threaded through ref_count field of pmap.
446 * Max will eventually be constant -- variable for experimentation.
448 int pmap_cache_max
= 32;
449 int pmap_alloc_chunk
= 8;
450 pmap_t pmap_cache_list
;
451 int pmap_cache_count
;
452 decl_simple_lock_data(,pmap_cache_lock
)
454 extern vm_offset_t hole_start
, hole_end
;
459 * Page directory for kernel.
461 pt_entry_t
*kpde
= 0; /* set by start.s - keep out of bss */
464 #define PMAP_ALIAS_MAX 32
470 #define PMAP_ALIAS_COOKIE 0xdeadbeef
471 } pmap_aliasbuf
[PMAP_ALIAS_MAX
];
472 int pmap_alias_index
= 0;
473 extern vm_offset_t
get_rpc();
475 #endif /* DEBUG_ALIAS */
478 * Given an offset and a map, compute the address of the
479 * pte. If the address is invalid with respect to the map
480 * then PT_ENTRY_NULL is returned (and the map may need to grow).
482 * This is only used in machine-dependent code.
487 register pmap_t pmap
,
488 register vm_offset_t addr
)
490 register pt_entry_t
*ptp
;
491 register pt_entry_t pte
;
493 pte
= pmap
->dirbase
[pdenum(pmap
, addr
)];
494 if ((pte
& INTEL_PTE_VALID
) == 0)
495 return(PT_ENTRY_NULL
);
496 ptp
= (pt_entry_t
*)ptetokv(pte
);
497 return(&ptp
[ptenum(addr
)]);
501 #define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)])
503 #define DEBUG_PTE_PAGE 0
510 register pt_entry_t
*pte
, *epte
;
513 /* check the use and wired counts */
514 if (ptep
== PTE_PAGE_NULL
)
516 pte
= pmap_pte(ptep
->pmap
, ptep
->va
);
517 epte
= pte
+ INTEL_PGBYTES
/sizeof(pt_entry_t
);
526 pte
+= ptes_per_vm_page
;
529 if (ctu
!= ptep
->use_count
|| ctw
!= ptep
->wired_count
) {
530 printf("use %d wired %d - actual use %d wired %d\n",
531 ptep
->use_count
, ptep
->wired_count
, ctu
, ctw
);
535 #endif /* DEBUG_PTE_PAGE */
538 * Map memory at initialization. The physical addresses being
539 * mapped are not managed and are never unmapped.
541 * For now, VM is already on, we only need to map the
546 register vm_offset_t virt
,
547 register vm_offset_t start
,
548 register vm_offset_t end
,
549 register vm_prot_t prot
)
554 while (start
< end
) {
555 pmap_enter(kernel_pmap
, virt
, start
, prot
, FALSE
);
563 * Back-door routine for mapping kernel VM at initialization.
564 * Useful for mapping memory outside the range
565 * Sets no-cache, A, D.
566 * [vm_first_phys, vm_last_phys) (i.e., devices).
567 * Otherwise like pmap_map.
571 register vm_offset_t virt
,
572 register vm_offset_t start
,
573 register vm_offset_t end
,
576 register pt_entry_t
template;
577 register pt_entry_t
*pte
;
579 template = pa_to_pte(start
)
585 if (prot
& VM_PROT_WRITE
)
586 template |= INTEL_PTE_WRITE
;
588 while (start
< end
) {
589 pte
= pmap_pte(kernel_pmap
, virt
);
590 if (pte
== PT_ENTRY_NULL
)
591 panic("pmap_map_bd: Invalid kernel address\n");
592 WRITE_PTE_FAST(pte
, template)
593 pte_increment_pa(template);
604 extern char *first_avail
;
605 extern vm_offset_t virtual_avail
, virtual_end
;
606 extern vm_offset_t avail_start
, avail_end
, avail_next
;
609 * Bootstrap the system enough to run with virtual memory.
610 * Map the kernel's code and data, and allocate the system page table.
611 * Called with mapping OFF. Page_size must already be set.
614 * load_start: PA where kernel was loaded
615 * avail_start PA of first available physical page -
616 * after kernel page tables
617 * avail_end PA of last available physical page
618 * virtual_avail VA of first available page -
619 * after kernel page tables
620 * virtual_end VA of last available page -
621 * end of kernel address space
623 * &start_text start of kernel text
624 * &etext end of kernel text
629 vm_offset_t load_start
)
631 vm_offset_t va
, tva
, paddr
;
633 pt_entry_t
*pde
, *pte
, *ptend
;
634 vm_size_t morevm
; /* VM space for kernel map */
637 * Set ptes_per_vm_page for general use.
639 ptes_per_vm_page
= PAGE_SIZE
/ INTEL_PGBYTES
;
642 * The kernel's pmap is statically allocated so we don't
643 * have to use pmap_create, which is unlikely to work
644 * correctly at this part of the boot sequence.
647 kernel_pmap
= &kernel_pmap_store
;
650 lock_init(&pmap_system_lock
,
651 FALSE
, /* NOT a sleep lock */
654 #endif /* NCPUS > 1 */
656 simple_lock_init(&kernel_pmap
->lock
, ETAP_VM_PMAP_KERNEL
);
657 simple_lock_init(&pv_free_list_lock
, ETAP_VM_PMAP_FREE
);
659 kernel_pmap
->ref_count
= 1;
662 * The kernel page directory has been allocated;
663 * its virtual address is in kpde.
665 * Enough kernel page table pages have been allocated
666 * to map low system memory, kernel text, kernel data/bss,
667 * kdb's symbols, and the page directory and page tables.
669 * No other physical memory has been allocated.
673 * Start mapping virtual memory to physical memory, 1-1,
674 * at end of mapped memory.
677 virtual_avail
= phystokv(avail_start
);
678 virtual_end
= phystokv(avail_end
);
681 pde
+= pdenum(kernel_pmap
, virtual_avail
);
683 if (pte_to_pa(*pde
) == 0) {
684 /* This pte has not been allocated */
688 pte
= (pt_entry_t
*)ptetokv(*pde
);
689 /* first pte of page */
690 ptend
= pte
+NPTES
; /* last pte of page */
691 pte
+= ptenum(virtual_avail
); /* point to pte that
692 maps first avail VA */
693 pde
++; /* point pde to first empty slot */
696 template = pa_to_pte(avail_start
)
700 for (va
= virtual_avail
; va
< virtual_end
; va
+= INTEL_PGBYTES
) {
702 pte
= (pt_entry_t
*)phystokv(virtual_avail
);
704 virtual_avail
= (vm_offset_t
)ptend
;
705 if (virtual_avail
== hole_start
)
706 virtual_avail
= hole_end
;
707 *pde
= PA_TO_PTE((vm_offset_t
) pte
)
712 WRITE_PTE_FAST(pte
, template)
714 pte_increment_pa(template);
717 avail_start
= virtual_avail
- VM_MIN_KERNEL_ADDRESS
;
718 avail_next
= avail_start
;
721 * Figure out maximum kernel address.
722 * Kernel virtual space is:
723 * - at least three times physical memory
724 * - at least VM_MIN_KERNEL_ADDRESS
725 * - limited by VM_MAX_KERNEL_ADDRESS
728 morevm
= 3*avail_end
;
729 if (virtual_end
+ morevm
> VM_MAX_KERNEL_ADDRESS
)
730 morevm
= VM_MAX_KERNEL_ADDRESS
- virtual_end
+ 1;
733 * startup requires additional virtual memory (for tables, buffers,
734 * etc.). The kd driver may also require some of that memory to
735 * access the graphics board.
738 *(int *)&template = 0;
741 * Leave room for kernel-loaded servers, which have been linked at
742 * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to
743 * VM_MAX_KERNEL_LOADED_ADDRESS.
745 if (virtual_end
+ morevm
< VM_MAX_KERNEL_LOADED_ADDRESS
+ 1)
746 morevm
= VM_MAX_KERNEL_LOADED_ADDRESS
+ 1 - virtual_end
;
749 virtual_end
+= morevm
;
750 for (tva
= va
; tva
< virtual_end
; tva
+= INTEL_PGBYTES
) {
752 pmap_next_page(&paddr
);
753 pte
= (pt_entry_t
*)phystokv(paddr
);
755 *pde
= PA_TO_PTE((vm_offset_t
) pte
)
760 WRITE_PTE_FAST(pte
, template)
766 /* Push the virtual avail address above hole_end */
767 if (virtual_avail
< hole_end
)
768 virtual_avail
= hole_end
;
774 virtual_end
= va
+ morevm
;
779 * invalidate user virtual addresses
783 pdenum(kernel_pmap
,VM_MIN_KERNEL_ADDRESS
)*sizeof(pt_entry_t
));
784 kernel_pmap
->dirbase
= kpde
;
785 printf("Kernel virtual space from 0x%x to 0x%x.\n",
786 VM_MIN_KERNEL_ADDRESS
, virtual_end
);
788 avail_start
= avail_next
;
789 printf("Available physical space from 0x%x to 0x%x\n",
790 avail_start
, avail_end
);
792 kernel_pmap
->pdirbase
= kvtophys((vm_offset_t
)kernel_pmap
->dirbase
);
801 *startp
= virtual_avail
;
806 * Initialize the pmap module.
807 * Called by vm_init, to initialize any structures that the pmap
808 * system needs to map virtual memory.
813 register long npages
;
815 register vm_size_t s
;
819 * Allocate memory for the pv_head_table and its lock bits,
820 * the modify bit array, and the pte_page table.
823 npages
= atop(avail_end
- avail_start
);
824 s
= (vm_size_t
) (sizeof(struct pv_entry
) * npages
825 + pv_lock_table_size(npages
)
829 if (kmem_alloc_wired(kernel_map
, &addr
, s
) != KERN_SUCCESS
)
832 memset((char *)addr
, 0, s
);
835 * Allocate the structures first to preserve word-alignment.
837 pv_head_table
= (pv_entry_t
) addr
;
838 addr
= (vm_offset_t
) (pv_head_table
+ npages
);
840 pv_lock_table
= (char *) addr
;
841 addr
= (vm_offset_t
) (pv_lock_table
+ pv_lock_table_size(npages
));
843 pmap_phys_attributes
= (char *) addr
;
846 * Create the zone of physical maps,
847 * and of the physical-to-virtual entries.
849 s
= (vm_size_t
) sizeof(struct pmap
);
850 pmap_zone
= zinit(s
, 400*s
, 4096, "pmap"); /* XXX */
851 s
= (vm_size_t
) sizeof(struct pv_entry
);
852 pv_list_zone
= zinit(s
, 10000*s
, 4096, "pv_list"); /* XXX */
855 * Only now, when all of the data structures are allocated,
856 * can we set vm_first_phys and vm_last_phys. If we set them
857 * too soon, the kmem_alloc_wired above will try to use these
858 * data structures and blow up.
861 vm_first_phys
= avail_start
;
862 vm_last_phys
= avail_end
;
863 pmap_initialized
= TRUE
;
866 * Initializie pmap cache.
868 pmap_cache_list
= PMAP_NULL
;
869 pmap_cache_count
= 0;
870 simple_lock_init(&pmap_cache_lock
, ETAP_VM_PMAP_CACHE
);
874 #define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
877 #define valid_page(x) (pmap_initialized && pmap_valid_page(x))
888 assert(phys
!= vm_page_fictitious_addr
);
889 if (!pmap_initialized
)
892 if (!pmap_valid_page(phys
))
895 PMAP_WRITE_LOCK(spl
);
897 pai
= pa_index(phys
);
898 pv_h
= pai_to_pvh(pai
);
900 result
= (pv_h
->pmap
== PMAP_NULL
);
901 PMAP_WRITE_UNLOCK(spl
);
907 * Create and return a physical map.
909 * If the size specified for the map
910 * is zero, the map is an actual physical
911 * map, and may be referenced by the
914 * If the size specified is non-zero,
915 * the map will be used in software only, and
916 * is bounded by that size.
923 register pmap_statistics_t stats
;
926 * A software use-only map doesn't even need a map.
934 * Try to get cached pmap, if this fails,
935 * allocate a pmap struct from the pmap_zone. Then allocate
936 * the page descriptor table from the pd_zone.
939 simple_lock(&pmap_cache_lock
);
940 while ((p
= pmap_cache_list
) == PMAP_NULL
) {
942 vm_offset_t dirbases
;
945 simple_unlock(&pmap_cache_lock
);
949 * XXX NEEDS MP DOING ALLOC logic so that if multiple processors
950 * XXX get here, only one allocates a chunk of pmaps.
951 * (for now we'll just let it go - safe but wasteful)
956 * Allocate a chunck of pmaps. Single kmem_alloc_wired
957 * operation reduces kernel map fragmentation.
960 if (kmem_alloc_wired(kernel_map
, &dirbases
,
961 pmap_alloc_chunk
* INTEL_PGBYTES
)
963 panic("pmap_create.1");
965 for (i
= pmap_alloc_chunk
; i
> 0 ; i
--) {
966 p
= (pmap_t
) zalloc(pmap_zone
);
968 panic("pmap_create.2");
971 * Initialize pmap. Don't bother with
972 * ref count as cache list is threaded
973 * through it. It'll be set on cache removal.
975 p
->dirbase
= (pt_entry_t
*) dirbases
;
976 dirbases
+= INTEL_PGBYTES
;
977 memcpy(p
->dirbase
, kpde
, INTEL_PGBYTES
);
978 p
->pdirbase
= kvtophys((vm_offset_t
)p
->dirbase
);
980 simple_lock_init(&p
->lock
, ETAP_VM_PMAP
);
984 * Initialize statistics.
987 stats
->resident_count
= 0;
988 stats
->wired_count
= 0;
993 simple_lock(&pmap_cache_lock
);
994 p
->ref_count
= (int) pmap_cache_list
;
997 simple_unlock(&pmap_cache_lock
);
999 simple_lock(&pmap_cache_lock
);
1002 assert(p
->stats
.resident_count
== 0);
1003 assert(p
->stats
.wired_count
== 0);
1004 p
->stats
.resident_count
= 0;
1005 p
->stats
.wired_count
= 0;
1007 pmap_cache_list
= (pmap_t
) p
->ref_count
;
1010 simple_unlock(&pmap_cache_lock
);
1016 * Retire the given physical map from service.
1017 * Should only be called if the map contains
1018 * no valid mappings.
1025 register pt_entry_t
*pdep
;
1026 register vm_offset_t pa
;
1029 register vm_page_t m
;
1035 simple_lock(&p
->lock
);
1038 register int my_cpu
;
1040 mp_disable_preemption();
1041 my_cpu
= cpu_number();
1044 * If some cpu is not using the physical pmap pointer that it
1045 * is supposed to be (see set_dirbase), we might be using the
1046 * pmap that is being destroyed! Make sure we are
1047 * physically on the right pmap:
1051 if (real_pmap
[my_cpu
] == p
) {
1052 PMAP_CPU_CLR(p
, my_cpu
);
1053 real_pmap
[my_cpu
] = kernel_pmap
;
1056 mp_enable_preemption();
1058 simple_unlock(&p
->lock
);
1062 return; /* still in use */
1066 * Free the memory maps, then the
1070 while (pdep
< &p
->dirbase
[pdenum(p
, LINEAR_KERNEL_ADDRESS
)]) {
1071 if (*pdep
& INTEL_PTE_VALID
) {
1072 pa
= pte_to_pa(*pdep
);
1073 vm_object_lock(pmap_object
);
1074 m
= vm_page_lookup(pmap_object
, pa
);
1075 if (m
== VM_PAGE_NULL
)
1076 panic("pmap_destroy: pte page not in object");
1077 vm_page_lock_queues();
1079 inuse_ptepages_count
--;
1080 vm_object_unlock(pmap_object
);
1081 vm_page_unlock_queues();
1084 * Clear pdes, this might be headed for the cache.
1086 c
= ptes_per_vm_page
;
1093 pdep
+= ptes_per_vm_page
;
1097 assert(p
->stats
.resident_count
== 0);
1098 assert(p
->stats
.wired_count
== 0);
1101 * Add to cache if not already full
1103 simple_lock(&pmap_cache_lock
);
1104 if (pmap_cache_count
<= pmap_cache_max
) {
1105 p
->ref_count
= (int) pmap_cache_list
;
1106 pmap_cache_list
= p
;
1108 simple_unlock(&pmap_cache_lock
);
1111 simple_unlock(&pmap_cache_lock
);
1112 kmem_free(kernel_map
, (vm_offset_t
)p
->dirbase
, INTEL_PGBYTES
);
1113 zfree(pmap_zone
, (vm_offset_t
) p
);
1118 * Add a reference to the specified pmap.
1127 if (p
!= PMAP_NULL
) {
1129 simple_lock(&p
->lock
);
1131 simple_unlock(&p
->lock
);
1137 * Remove a range of hardware page-table entries.
1138 * The entries given are the first (inclusive)
1139 * and last (exclusive) entries for the VM pages.
1140 * The virtual address is the va for the first pte.
1142 * The pmap must be locked.
1143 * If the pmap is not the kernel pmap, the range must lie
1144 * entirely within one pte-page. This is NOT checked.
1145 * Assumes that the pte-page exists.
1156 register pt_entry_t
*cpte
;
1157 int num_removed
, num_unwired
;
1162 if (pmap
!= kernel_pmap
)
1163 ptep_check(get_pte_page(spte
));
1164 #endif /* DEBUG_PTE_PAGE */
1168 for (cpte
= spte
; cpte
< epte
;
1169 cpte
+= ptes_per_vm_page
, va
+= PAGE_SIZE
) {
1171 pa
= pte_to_pa(*cpte
);
1179 if (!valid_page(pa
)) {
1182 * Outside range of managed physical memory.
1183 * Just remove the mappings.
1185 register int i
= ptes_per_vm_page
;
1186 register pt_entry_t
*lpte
= cpte
;
1198 * Get the modify and reference bits.
1202 register pt_entry_t
*lpte
;
1204 i
= ptes_per_vm_page
;
1207 pmap_phys_attributes
[pai
] |=
1208 *lpte
& (PHYS_MODIFIED
|PHYS_REFERENCED
);
1215 * Remove the mapping from the pvlist for
1216 * this physical page.
1219 register pv_entry_t pv_h
, prev
, cur
;
1221 pv_h
= pai_to_pvh(pai
);
1222 if (pv_h
->pmap
== PMAP_NULL
) {
1223 panic("pmap_remove: null pv_list!");
1225 if (pv_h
->va
== va
&& pv_h
->pmap
== pmap
) {
1227 * Header is the pv_entry. Copy the next one
1228 * to header and free the next one (we cannot
1232 if (cur
!= PV_ENTRY_NULL
) {
1237 pv_h
->pmap
= PMAP_NULL
;
1244 if ((cur
= prev
->next
) == PV_ENTRY_NULL
) {
1245 panic("pmap-remove: mapping not in pv_list!");
1247 } while (cur
->va
!= va
|| cur
->pmap
!= pmap
);
1248 prev
->next
= cur
->next
;
1258 assert(pmap
->stats
.resident_count
>= num_removed
);
1259 pmap
->stats
.resident_count
-= num_removed
;
1260 assert(pmap
->stats
.wired_count
>= num_unwired
);
1261 pmap
->stats
.wired_count
-= num_unwired
;
1265 * Remove the given range of addresses
1266 * from the specified map.
1268 * It is assumed that the start and end are properly
1269 * rounded to the hardware page size.
1279 register pt_entry_t
*pde
;
1280 register pt_entry_t
*spte
, *epte
;
1283 if (map
== PMAP_NULL
)
1286 PMAP_READ_LOCK(map
, spl
);
1288 pde
= pmap_pde(map
, s
);
1291 l
= (s
+ PDE_MAPPED_SIZE
) & ~(PDE_MAPPED_SIZE
-1);
1294 if (*pde
& INTEL_PTE_VALID
) {
1295 spte
= (pt_entry_t
*)ptetokv(*pde
);
1296 spte
= &spte
[ptenum(s
)];
1297 epte
= &spte
[intel_btop(l
-s
)];
1298 pmap_remove_range(map
, s
, spte
, epte
);
1306 PMAP_READ_UNLOCK(map
, spl
);
1310 * Routine: pmap_page_protect
1313 * Lower the permission for all mappings to a given
1321 pv_entry_t pv_h
, prev
;
1322 register pv_entry_t pv_e
;
1323 register pt_entry_t
*pte
;
1325 register pmap_t pmap
;
1329 assert(phys
!= vm_page_fictitious_addr
);
1330 if (!valid_page(phys
)) {
1332 * Not a managed page.
1338 * Determine the new protection.
1342 case VM_PROT_READ
|VM_PROT_EXECUTE
:
1346 return; /* nothing to do */
1353 * Lock the pmap system first, since we will be changing
1357 PMAP_WRITE_LOCK(spl
);
1359 pai
= pa_index(phys
);
1360 pv_h
= pai_to_pvh(pai
);
1363 * Walk down PV list, changing or removing all mappings.
1364 * We do not have to lock the pv_list because we have
1365 * the entire pmap system locked.
1367 if (pv_h
->pmap
!= PMAP_NULL
) {
1373 * Lock the pmap to block pmap_extract and similar routines.
1375 simple_lock(&pmap
->lock
);
1378 register vm_offset_t va
;
1381 pte
= pmap_pte(pmap
, va
);
1384 * Consistency checks.
1386 /* assert(*pte & INTEL_PTE_VALID); XXX */
1387 /* assert(pte_to_phys(*pte) == phys); */
1390 * Invalidate TLBs for all CPUs using this mapping.
1392 PMAP_INVALIDATE_PAGE(pmap
, va
);
1396 * Remove the mapping if new protection is NONE
1397 * or if write-protecting a kernel mapping.
1399 if (remove
|| pmap
== kernel_pmap
) {
1401 * Remove the mapping, collecting any modify bits.
1404 panic("pmap_remove_all removing a wired page");
1407 register int i
= ptes_per_vm_page
;
1410 pmap_phys_attributes
[pai
] |=
1411 *pte
& (PHYS_MODIFIED
|PHYS_REFERENCED
);
1416 assert(pmap
->stats
.resident_count
>= 1);
1417 pmap
->stats
.resident_count
--;
1420 * Remove the pv_entry.
1424 * Fix up head later.
1426 pv_h
->pmap
= PMAP_NULL
;
1430 * Delete this entry.
1432 prev
->next
= pv_e
->next
;
1440 register int i
= ptes_per_vm_page
;
1443 *pte
&= ~INTEL_PTE_WRITE
;
1453 simple_unlock(&pmap
->lock
);
1455 } while ((pv_e
= prev
->next
) != PV_ENTRY_NULL
);
1458 * If pv_head mapping was removed, fix it up.
1460 if (pv_h
->pmap
== PMAP_NULL
) {
1462 if (pv_e
!= PV_ENTRY_NULL
) {
1469 PMAP_WRITE_UNLOCK(spl
);
1473 * Set the physical protection on the
1474 * specified range of this map as requested.
1475 * Will not increase permissions.
1484 register pt_entry_t
*pde
;
1485 register pt_entry_t
*spte
, *epte
;
1490 if (map
== PMAP_NULL
)
1494 * Determine the new protection.
1498 case VM_PROT_READ
|VM_PROT_EXECUTE
:
1500 case VM_PROT_READ
|VM_PROT_WRITE
:
1502 return; /* nothing to do */
1504 pmap_remove(map
, s
, e
);
1509 * If write-protecting in the kernel pmap,
1510 * remove the mappings; the i386 ignores
1511 * the write-permission bit in kernel mode.
1513 * XXX should be #if'd for i386
1516 if (cpuid_family
== CPUID_FAMILY_386
)
1517 if (map
== kernel_pmap
) {
1518 pmap_remove(map
, s
, e
);
1523 simple_lock(&map
->lock
);
1526 pde
= pmap_pde(map
, s
);
1528 l
= (s
+ PDE_MAPPED_SIZE
) & ~(PDE_MAPPED_SIZE
-1);
1531 if (*pde
& INTEL_PTE_VALID
) {
1532 spte
= (pt_entry_t
*)ptetokv(*pde
);
1533 spte
= &spte
[ptenum(s
)];
1534 epte
= &spte
[intel_btop(l
-s
)];
1536 while (spte
< epte
) {
1537 if (*spte
& INTEL_PTE_VALID
)
1538 *spte
&= ~INTEL_PTE_WRITE
;
1548 simple_unlock(&map
->lock
);
1555 * Insert the given physical page (p) at
1556 * the specified virtual address (v) in the
1557 * target physical map with the protection requested.
1559 * If specified, the page will be wired down, meaning
1560 * that the related pte cannot be reclaimed.
1562 * NB: This is the only routine which MAY NOT lazy-evaluate
1563 * or lose information. That is, this routine must actually
1564 * insert this page into the given map NOW.
1568 register pmap_t pmap
,
1570 register vm_offset_t pa
,
1574 register pt_entry_t
*pte
;
1575 register pv_entry_t pv_h
;
1576 register int i
, pai
;
1578 pt_entry_t
template;
1582 XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n",
1583 current_thread()->top_act
,
1587 assert(pa
!= vm_page_fictitious_addr
);
1589 printf("pmap(%x, %x)\n", v
, pa
);
1590 if (pmap
== PMAP_NULL
)
1593 if (cpuid_family
== CPUID_FAMILY_386
)
1594 if (pmap
== kernel_pmap
&& (prot
& VM_PROT_WRITE
) == 0
1595 && !wired
/* hack for io_wire */ ) {
1597 * Because the 386 ignores write protection in kernel mode,
1598 * we cannot enter a read-only kernel mapping, and must
1599 * remove an existing mapping if changing it.
1601 * XXX should be #if'd for i386
1603 PMAP_READ_LOCK(pmap
, spl
);
1605 pte
= pmap_pte(pmap
, v
);
1606 if (pte
!= PT_ENTRY_NULL
&& pte_to_pa(*pte
) != 0) {
1608 * Invalidate the translation buffer,
1609 * then remove the mapping.
1611 PMAP_INVALIDATE_PAGE(pmap
, v
);
1612 pmap_remove_range(pmap
, v
, pte
,
1613 pte
+ ptes_per_vm_page
);
1615 PMAP_READ_UNLOCK(pmap
, spl
);
1620 * Must allocate a new pvlist entry while we're unlocked;
1621 * zalloc may cause pageout (which will lock the pmap system).
1622 * If we determine we need a pvlist entry, we will unlock
1623 * and allocate one. Then we will retry, throughing away
1624 * the allocated entry later (if we no longer need it).
1626 pv_e
= PV_ENTRY_NULL
;
1628 PMAP_READ_LOCK(pmap
, spl
);
1631 * Expand pmap to include this pte. Assume that
1632 * pmap is always expanded to include enough hardware
1633 * pages to map one VM page.
1636 while ((pte
= pmap_pte(pmap
, v
)) == PT_ENTRY_NULL
) {
1638 * Must unlock to expand the pmap.
1640 PMAP_READ_UNLOCK(pmap
, spl
);
1642 pmap_expand(pmap
, v
);
1644 PMAP_READ_LOCK(pmap
, spl
);
1647 * Special case if the physical page is already mapped
1650 old_pa
= pte_to_pa(*pte
);
1653 * May be changing its wired attribute or protection
1656 template = pa_to_pte(pa
) | INTEL_PTE_VALID
;
1657 if (pmap
!= kernel_pmap
)
1658 template |= INTEL_PTE_USER
;
1659 if (prot
& VM_PROT_WRITE
)
1660 template |= INTEL_PTE_WRITE
;
1662 template |= INTEL_PTE_WIRED
;
1664 pmap
->stats
.wired_count
++;
1667 if (iswired(*pte
)) {
1668 assert(pmap
->stats
.wired_count
>= 1);
1669 pmap
->stats
.wired_count
--;
1673 PMAP_INVALIDATE_PAGE(pmap
, v
);
1675 i
= ptes_per_vm_page
;
1677 if (*pte
& INTEL_PTE_MOD
)
1678 template |= INTEL_PTE_MOD
;
1679 WRITE_PTE(pte
, template)
1681 pte_increment_pa(template);
1688 * Outline of code from here:
1689 * 1) If va was mapped, update TLBs, remove the mapping
1690 * and remove old pvlist entry.
1691 * 2) Add pvlist entry for new mapping
1692 * 3) Enter new mapping.
1694 * SHARING_FAULTS complicates this slightly in that it cannot
1695 * replace the mapping, but must remove it (because adding the
1696 * pvlist entry for the new mapping may remove others), and
1697 * hence always enters the new mapping at step 3)
1699 * If the old physical page is not managed step 1) is skipped
1700 * (except for updating the TLBs), and the mapping is
1701 * overwritten at step 3). If the new physical page is not
1702 * managed, step 2) is skipped.
1705 if (old_pa
!= (vm_offset_t
) 0) {
1707 PMAP_INVALIDATE_PAGE(pmap
, v
);
1710 if (pmap
!= kernel_pmap
)
1711 ptep_check(get_pte_page(pte
));
1712 #endif /* DEBUG_PTE_PAGE */
1715 * Don't do anything to pages outside valid memory here.
1716 * Instead convince the code that enters a new mapping
1717 * to overwrite the old one.
1720 if (valid_page(old_pa
)) {
1722 pai
= pa_index(old_pa
);
1725 assert(pmap
->stats
.resident_count
>= 1);
1726 pmap
->stats
.resident_count
--;
1727 if (iswired(*pte
)) {
1728 assert(pmap
->stats
.wired_count
>= 1);
1729 pmap
->stats
.wired_count
--;
1731 i
= ptes_per_vm_page
;
1733 pmap_phys_attributes
[pai
] |=
1734 *pte
& (PHYS_MODIFIED
|PHYS_REFERENCED
);
1737 pte_increment_pa(template);
1741 * Put pte back to beginning of page since it'll be
1742 * used later to enter the new page.
1744 pte
-= ptes_per_vm_page
;
1747 * Remove the mapping from the pvlist for
1748 * this physical page.
1751 register pv_entry_t prev
, cur
;
1753 pv_h
= pai_to_pvh(pai
);
1754 if (pv_h
->pmap
== PMAP_NULL
) {
1755 panic("pmap_enter: null pv_list!");
1757 if (pv_h
->va
== v
&& pv_h
->pmap
== pmap
) {
1759 * Header is the pv_entry. Copy the next one
1760 * to header and free the next one (we cannot
1764 if (cur
!= PV_ENTRY_NULL
) {
1769 pv_h
->pmap
= PMAP_NULL
;
1776 if ((cur
= prev
->next
) == PV_ENTRY_NULL
) {
1777 panic("pmap_enter: mapping not in pv_list!");
1779 } while (cur
->va
!= v
|| cur
->pmap
!= pmap
);
1780 prev
->next
= cur
->next
;
1789 * old_pa is not managed. Pretend it's zero so code
1790 * at Step 3) will enter new mapping (overwriting old
1791 * one). Do removal part of accounting.
1793 old_pa
= (vm_offset_t
) 0;
1794 assert(pmap
->stats
.resident_count
>= 1);
1795 pmap
->stats
.resident_count
--;
1796 if (iswired(*pte
)) {
1797 assert(pmap
->stats
.wired_count
>= 1);
1798 pmap
->stats
.wired_count
--;
1803 if (valid_page(pa
)) {
1806 * Step 2) Enter the mapping in the PV list for this
1816 * We can return here from the sharing fault code below
1817 * in case we removed the only entry on the pv list and thus
1818 * must enter the new one in the list header.
1820 #endif /* SHARING_FAULTS */
1822 pv_h
= pai_to_pvh(pai
);
1824 if (pv_h
->pmap
== PMAP_NULL
) {
1830 pv_h
->next
= PV_ENTRY_NULL
;
1836 * check that this mapping is not already there
1837 * or there is no alias for this mapping in the same map
1839 pv_entry_t e
= pv_h
;
1840 while (e
!= PV_ENTRY_NULL
) {
1841 if (e
->pmap
== pmap
&& e
->va
== v
)
1842 panic("pmap_enter: already in pv_list");
1850 * do sharing faults.
1851 * if we find an entry on this pv list in the same address
1852 * space, remove it. we know there will not be more
1855 pv_entry_t e
= pv_h
;
1858 while (e
!= PV_ENTRY_NULL
) {
1859 if (e
->pmap
== pmap
) {
1861 * Remove it, drop pv list lock first.
1865 opte
= pmap_pte(pmap
, e
->va
);
1866 assert(opte
!= PT_ENTRY_NULL
);
1868 * Invalidate the translation buffer,
1869 * then remove the mapping.
1871 PMAP_INVALIDATE_PAGE(pmap
, e
->va
);
1872 pmap_remove_range(pmap
, e
->va
, opte
,
1873 opte
+ ptes_per_vm_page
);
1875 * We could have remove the head entry,
1876 * so there could be no more entries
1877 * and so we have to use the pv head entry.
1878 * so, go back to the top and try the entry
1887 * check that this mapping is not already there
1890 while (e
!= PV_ENTRY_NULL
) {
1891 if (e
->pmap
== pmap
)
1892 panic("pmap_enter: alias in pv_list");
1896 #endif /* SHARING_FAULTS */
1900 * check for aliases within the same address space.
1902 pv_entry_t e
= pv_h
;
1903 vm_offset_t rpc
= get_rpc();
1905 while (e
!= PV_ENTRY_NULL
) {
1906 if (e
->pmap
== pmap
) {
1908 * log this entry in the alias ring buffer
1909 * if it's not there already.
1911 struct pmap_alias
*pma
;
1915 for (ii
= 0; ii
< pmap_alias_index
; ii
++) {
1916 if (pmap_aliasbuf
[ii
].rpc
== rpc
) {
1917 /* found it in the log already */
1923 pma
= &pmap_aliasbuf
[pmap_alias_index
];
1927 pma
->cookie
= PMAP_ALIAS_COOKIE
;
1928 if (++pmap_alias_index
>= PMAP_ALIAS_MAX
)
1929 panic("pmap_enter: exhausted alias log");
1935 #endif /* DEBUG_ALIAS */
1937 * Add new pv_entry after header.
1939 if (pv_e
== PV_ENTRY_NULL
) {
1941 if (pv_e
== PV_ENTRY_NULL
) {
1943 PMAP_READ_UNLOCK(pmap
, spl
);
1948 pv_e
= (pv_entry_t
) zalloc(pv_list_zone
);
1954 pv_e
->next
= pv_h
->next
;
1957 * Remember that we used the pvlist entry.
1959 pv_e
= PV_ENTRY_NULL
;
1965 * Step 3) Enter and count the mapping.
1968 pmap
->stats
.resident_count
++;
1971 * Build a template to speed up entering -
1972 * only the pfn changes.
1974 template = pa_to_pte(pa
) | INTEL_PTE_VALID
;
1975 if (pmap
!= kernel_pmap
)
1976 template |= INTEL_PTE_USER
;
1977 if (prot
& VM_PROT_WRITE
)
1978 template |= INTEL_PTE_WRITE
;
1980 template |= INTEL_PTE_WIRED
;
1981 pmap
->stats
.wired_count
++;
1983 i
= ptes_per_vm_page
;
1985 WRITE_PTE(pte
, template)
1987 pte_increment_pa(template);
1990 if (pv_e
!= PV_ENTRY_NULL
) {
1994 PMAP_READ_UNLOCK(pmap
, spl
);
1998 * Routine: pmap_change_wiring
1999 * Function: Change the wiring attribute for a map/virtual-address
2001 * In/out conditions:
2002 * The mapping must already exist in the pmap.
2006 register pmap_t map
,
2010 register pt_entry_t
*pte
;
2015 * We must grab the pmap system lock because we may
2016 * change a pte_page queue.
2018 PMAP_READ_LOCK(map
, spl
);
2020 if ((pte
= pmap_pte(map
, v
)) == PT_ENTRY_NULL
)
2021 panic("pmap_change_wiring: pte missing");
2023 if (wired
&& !iswired(*pte
)) {
2025 * wiring down mapping
2027 map
->stats
.wired_count
++;
2028 i
= ptes_per_vm_page
;
2030 *pte
++ |= INTEL_PTE_WIRED
;
2033 else if (!wired
&& iswired(*pte
)) {
2037 assert(map
->stats
.wired_count
>= 1);
2038 map
->stats
.wired_count
--;
2039 i
= ptes_per_vm_page
;
2041 *pte
++ &= ~INTEL_PTE_WIRED
;
2045 PMAP_READ_UNLOCK(map
, spl
);
2049 * Routine: pmap_extract
2051 * Extract the physical page address associated
2052 * with the given map/virtual_address pair.
2057 register pmap_t pmap
,
2060 register pt_entry_t
*pte
;
2061 register vm_offset_t pa
;
2065 simple_lock(&pmap
->lock
);
2066 if ((pte
= pmap_pte(pmap
, va
)) == PT_ENTRY_NULL
)
2067 pa
= (vm_offset_t
) 0;
2068 else if (!(*pte
& INTEL_PTE_VALID
))
2069 pa
= (vm_offset_t
) 0;
2071 pa
= pte_to_pa(*pte
) + (va
& INTEL_OFFMASK
);
2072 simple_unlock(&pmap
->lock
);
2078 * Routine: pmap_expand
2080 * Expands a pmap to be able to map the specified virtual address.
2082 * Allocates new virtual memory for the P0 or P1 portion of the
2083 * pmap, then re-maps the physical pages that were in the old
2084 * pmap to be in the new pmap.
2086 * Must be called with the pmap system and the pmap unlocked,
2087 * since these must be unlocked to use vm_allocate or vm_deallocate.
2088 * Thus it must be called in a loop that checks whether the map
2089 * has been expanded enough.
2090 * (We won't loop forever, since page tables aren't shrunk.)
2094 register pmap_t map
,
2095 register vm_offset_t v
)
2098 register vm_page_t m
;
2099 register vm_offset_t pa
;
2103 if (map
== kernel_pmap
)
2104 panic("pmap_expand");
2107 * We cannot allocate the pmap_object in pmap_init,
2108 * because it is called before the zone package is up.
2109 * Allocate it now if it is missing.
2111 if (pmap_object
== VM_OBJECT_NULL
)
2112 pmap_object
= vm_object_allocate(avail_end
);
2115 * Allocate a VM page for the level 2 page table entries.
2117 while ((m
= vm_page_grab()) == VM_PAGE_NULL
)
2121 * Map the page to its physical address so that it
2122 * can be found later.
2125 vm_object_lock(pmap_object
);
2126 vm_page_insert(m
, pmap_object
, pa
);
2127 vm_page_lock_queues();
2129 inuse_ptepages_count
++;
2130 vm_object_unlock(pmap_object
);
2131 vm_page_unlock_queues();
2136 memset((void *)phystokv(pa
), 0, PAGE_SIZE
);
2138 PMAP_READ_LOCK(map
, spl
);
2140 * See if someone else expanded us first
2142 if (pmap_pte(map
, v
) != PT_ENTRY_NULL
) {
2143 PMAP_READ_UNLOCK(map
, spl
);
2144 vm_object_lock(pmap_object
);
2145 vm_page_lock_queues();
2147 inuse_ptepages_count
--;
2148 vm_page_unlock_queues();
2149 vm_object_unlock(pmap_object
);
2154 * Set the page directory entry for this page table.
2155 * If we have allocated more than one hardware page,
2156 * set several page directory entries.
2159 i
= ptes_per_vm_page
;
2160 pdp
= &map
->dirbase
[pdenum(map
, v
) & ~(i
-1)];
2162 *pdp
= pa_to_pte(pa
)
2167 pa
+= INTEL_PGBYTES
;
2170 PMAP_READ_UNLOCK(map
, spl
);
2175 * Copy the range specified by src_addr/len
2176 * from the source map to the range dst_addr/len
2177 * in the destination map.
2179 * This routine is only advisory and need not do anything.
2186 vm_offset_t dst_addr
,
2188 vm_offset_t src_addr
)
2191 dst_pmap
++; src_pmap
++; dst_addr
++; len
++; src_addr
++;
2200 * Routine: pmap_collect
2202 * Garbage collects the physical map system for
2203 * pages which are no longer used.
2204 * Success need not be guaranteed -- that is, there
2205 * may well be pages which are not referenced, but
2206 * others may be collected.
2208 * Called by the pageout daemon when pages are scarce.
2214 register pt_entry_t
*pdp
, *ptp
;
2223 if (p
== kernel_pmap
)
2227 * Garbage collect map.
2229 PMAP_READ_LOCK(p
, spl
);
2232 for (pdp
= p
->dirbase
;
2233 pdp
< &p
->dirbase
[pdenum(p
, LINEAR_KERNEL_ADDRESS
)];
2234 pdp
+= ptes_per_vm_page
)
2236 if (*pdp
& INTEL_PTE_VALID
)
2237 if(*pdp
& INTEL_PTE_REF
) {
2238 *pdp
&= ~INTEL_PTE_REF
;
2242 pa
= pte_to_pa(*pdp
);
2243 ptp
= (pt_entry_t
*)phystokv(pa
);
2244 eptp
= ptp
+ NPTES
*ptes_per_vm_page
;
2247 * If the pte page has any wired mappings, we cannot
2252 register pt_entry_t
*ptep
;
2253 for (ptep
= ptp
; ptep
< eptp
; ptep
++) {
2254 if (iswired(*ptep
)) {
2262 * Remove the virtual addresses mapped by this pte page.
2264 pmap_remove_range(p
,
2265 pdetova(pdp
- p
->dirbase
),
2270 * Invalidate the page directory pointer.
2273 register int i
= ptes_per_vm_page
;
2274 register pt_entry_t
*pdep
= pdp
;
2280 PMAP_READ_UNLOCK(p
, spl
);
2283 * And free the pte page itself.
2286 register vm_page_t m
;
2288 vm_object_lock(pmap_object
);
2289 m
= vm_page_lookup(pmap_object
, pa
);
2290 if (m
== VM_PAGE_NULL
)
2291 panic("pmap_collect: pte page not in object");
2292 vm_page_lock_queues();
2294 inuse_ptepages_count
--;
2295 vm_page_unlock_queues();
2296 vm_object_unlock(pmap_object
);
2299 PMAP_READ_LOCK(p
, spl
);
2303 PMAP_READ_UNLOCK(p
, spl
);
2309 * Routine: pmap_kernel
2311 * Returns the physical map handle for the kernel.
2317 return (kernel_pmap
);
2322 * pmap_zero_page zeros the specified (machine independent) page.
2323 * See machine/phys.c or machine/phys.s for implementation.
2328 register vm_offset_t phys
)
2332 assert(phys
!= vm_page_fictitious_addr
);
2333 i
= PAGE_SIZE
/ INTEL_PGBYTES
;
2334 phys
= intel_pfn(phys
);
2342 * pmap_copy_page copies the specified (machine independent) page.
2343 * See machine/phys.c or machine/phys.s for implementation.
2353 assert(src
!= vm_page_fictitious_addr
);
2354 assert(dst
!= vm_page_fictitious_addr
);
2355 i
= PAGE_SIZE
/ INTEL_PGBYTES
;
2358 copy_phys(intel_pfn(src
), intel_pfn(dst
));
2359 src
+= INTEL_PGBYTES
;
2360 dst
+= INTEL_PGBYTES
;
2366 * Routine: pmap_pageable
2368 * Make the specified pages (by pmap, offset)
2369 * pageable (or not) as requested.
2371 * A page which is not pageable may not take
2372 * a fault; therefore, its page table entry
2373 * must remain valid for the duration.
2375 * This routine is merely advisory; pmap_enter
2376 * will specify that these pages are to be wired
2377 * down (or not) as appropriate.
2387 pmap
++; start
++; end
++; pageable
++;
2392 * Clear specified attribute bits.
2395 phys_attribute_clear(
2400 register pv_entry_t pv_e
;
2401 register pt_entry_t
*pte
;
2403 register pmap_t pmap
;
2406 assert(phys
!= vm_page_fictitious_addr
);
2407 if (!valid_page(phys
)) {
2409 * Not a managed page.
2415 * Lock the pmap system first, since we will be changing
2419 PMAP_WRITE_LOCK(spl
);
2421 pai
= pa_index(phys
);
2422 pv_h
= pai_to_pvh(pai
);
2425 * Walk down PV list, clearing all modify or reference bits.
2426 * We do not have to lock the pv_list because we have
2427 * the entire pmap system locked.
2429 if (pv_h
->pmap
!= PMAP_NULL
) {
2431 * There are some mappings.
2433 for (pv_e
= pv_h
; pv_e
!= PV_ENTRY_NULL
; pv_e
= pv_e
->next
) {
2437 * Lock the pmap to block pmap_extract and similar routines.
2439 simple_lock(&pmap
->lock
);
2442 register vm_offset_t va
;
2445 pte
= pmap_pte(pmap
, va
);
2449 * Consistency checks.
2451 assert(*pte
& INTEL_PTE_VALID
);
2452 /* assert(pte_to_phys(*pte) == phys); */
2456 * Invalidate TLBs for all CPUs using this mapping.
2458 PMAP_INVALIDATE_PAGE(pmap
, va
);
2462 * Clear modify or reference bits.
2465 register int i
= ptes_per_vm_page
;
2470 simple_unlock(&pmap
->lock
);
2474 pmap_phys_attributes
[pai
] &= ~bits
;
2476 PMAP_WRITE_UNLOCK(spl
);
2480 * Check specified attribute bits.
2483 phys_attribute_test(
2488 register pv_entry_t pv_e
;
2489 register pt_entry_t
*pte
;
2491 register pmap_t pmap
;
2494 assert(phys
!= vm_page_fictitious_addr
);
2495 if (!valid_page(phys
)) {
2497 * Not a managed page.
2503 * Lock the pmap system first, since we will be checking
2507 PMAP_WRITE_LOCK(spl
);
2509 pai
= pa_index(phys
);
2510 pv_h
= pai_to_pvh(pai
);
2512 if (pmap_phys_attributes
[pai
] & bits
) {
2513 PMAP_WRITE_UNLOCK(spl
);
2518 * Walk down PV list, checking all mappings.
2519 * We do not have to lock the pv_list because we have
2520 * the entire pmap system locked.
2522 if (pv_h
->pmap
!= PMAP_NULL
) {
2524 * There are some mappings.
2526 for (pv_e
= pv_h
; pv_e
!= PV_ENTRY_NULL
; pv_e
= pv_e
->next
) {
2530 * Lock the pmap to block pmap_extract and similar routines.
2532 simple_lock(&pmap
->lock
);
2535 register vm_offset_t va
;
2538 pte
= pmap_pte(pmap
, va
);
2542 * Consistency checks.
2544 assert(*pte
& INTEL_PTE_VALID
);
2545 /* assert(pte_to_phys(*pte) == phys); */
2550 * Check modify or reference bits.
2553 register int i
= ptes_per_vm_page
;
2556 if (*pte
++ & bits
) {
2557 simple_unlock(&pmap
->lock
);
2558 PMAP_WRITE_UNLOCK(spl
);
2563 simple_unlock(&pmap
->lock
);
2566 PMAP_WRITE_UNLOCK(spl
);
2571 * Set specified attribute bits.
2580 assert(phys
!= vm_page_fictitious_addr
);
2581 if (!valid_page(phys
)) {
2583 * Not a managed page.
2589 * Lock the pmap system and set the requested bits in
2590 * the phys attributes array. Don't need to bother with
2591 * ptes because the test routine looks here first.
2594 PMAP_WRITE_LOCK(spl
);
2595 pmap_phys_attributes
[pa_index(phys
)] |= bits
;
2596 PMAP_WRITE_UNLOCK(spl
);
2600 * Set the modify bit on the specified physical page.
2603 void pmap_set_modify(
2604 register vm_offset_t phys
)
2606 phys_attribute_set(phys
, PHYS_MODIFIED
);
2610 * Clear the modify bits on the specified physical page.
2615 register vm_offset_t phys
)
2617 phys_attribute_clear(phys
, PHYS_MODIFIED
);
2623 * Return whether or not the specified physical page is modified
2624 * by any physical maps.
2629 register vm_offset_t phys
)
2631 return (phys_attribute_test(phys
, PHYS_MODIFIED
));
2635 * pmap_clear_reference:
2637 * Clear the reference bit on the specified physical page.
2641 pmap_clear_reference(
2644 phys_attribute_clear(phys
, PHYS_REFERENCED
);
2648 * pmap_is_referenced:
2650 * Return whether or not the specified physical page is referenced
2651 * by any physical maps.
2658 return (phys_attribute_test(phys
, PHYS_REFERENCED
));
2662 * Set the modify bit on the specified range
2663 * of this map as requested.
2665 * This optimization stands only if each time the dirty bit
2666 * in vm_page_t is tested, it is also tested in the pmap.
2675 register pt_entry_t
*pde
;
2676 register pt_entry_t
*spte
, *epte
;
2679 if (map
== PMAP_NULL
)
2682 PMAP_READ_LOCK(map
, spl
);
2684 pde
= pmap_pde(map
, s
);
2685 while (s
&& s
< e
) {
2686 l
= (s
+ PDE_MAPPED_SIZE
) & ~(PDE_MAPPED_SIZE
-1);
2689 if (*pde
& INTEL_PTE_VALID
) {
2690 spte
= (pt_entry_t
*)ptetokv(*pde
);
2692 spte
= &spte
[ptenum(s
)];
2693 epte
= &spte
[intel_btop(l
-s
)];
2695 epte
= &spte
[intel_btop(PDE_MAPPED_SIZE
)];
2696 spte
= &spte
[ptenum(s
)];
2698 while (spte
< epte
) {
2699 if (*spte
& INTEL_PTE_VALID
) {
2700 *spte
|= (INTEL_PTE_MOD
| INTEL_PTE_WRITE
);
2709 PMAP_READ_UNLOCK(map
, spl
);
2714 invalidate_icache(vm_offset_t addr
, unsigned cnt
, int phys
)
2719 flush_dcache(vm_offset_t addr
, unsigned count
, int phys
)
2727 pmap_wait_for_clear()
2729 register int my_cpu
;
2731 register pmap_t my_pmap
;
2733 mp_disable_preemption();
2734 my_cpu
= cpu_number();
2737 my_pmap
= real_pmap
[my_cpu
];
2739 if (!(my_pmap
&& pmap_in_use(my_pmap
, my_cpu
)))
2740 my_pmap
= kernel_pmap
;
2743 * Raise spl to splhigh (above splip) to block out pmap_extract
2744 * from IO code (which would put this cpu back in the active
2750 * Wait for any pmap updates in progress, on either user
2753 while (*(volatile hw_lock_t
)&my_pmap
->lock
.interlock
||
2754 *(volatile hw_lock_t
)&kernel_pmap
->lock
.interlock
) {
2759 mp_enable_preemption();
2763 pmap_flush_tlb_interrupt(void) {
2764 pmap_wait_for_clear();
2770 pmap_reload_tlb_interrupt(void) {
2771 pmap_wait_for_clear();
2773 set_cr3(kernel_pmap
->pdirbase
);
2777 #endif /* NCPUS > 1 */
2781 /* show phys page mappings and attributes */
2783 extern void db_show_page(vm_offset_t pa
);
2786 db_show_page(vm_offset_t pa
)
2793 pv_h
= pai_to_pvh(pai
);
2795 attr
= pmap_phys_attributes
[pai
];
2796 printf("phys page %x ", pa
);
2797 if (attr
& PHYS_MODIFIED
)
2798 printf("modified, ");
2799 if (attr
& PHYS_REFERENCED
)
2800 printf("referenced, ");
2801 if (pv_h
->pmap
|| pv_h
->next
)
2802 printf(" mapped at\n");
2804 printf(" not mapped\n");
2805 for (; pv_h
; pv_h
= pv_h
->next
)
2807 printf("%x in pmap %x\n", pv_h
->va
, pv_h
->pmap
);
2810 #endif /* MACH_KDB */
2813 void db_kvtophys(vm_offset_t
);
2814 void db_show_vaddrs(pt_entry_t
*);
2817 * print out the results of kvtophys(arg)
2823 db_printf("0x%x", kvtophys(vaddr
));
2827 * Walk the pages tables.
2831 pt_entry_t
*dirbase
)
2833 pt_entry_t
*ptep
, *pdep
, tmp
;
2834 int x
, y
, pdecnt
, ptecnt
;
2837 dirbase
= kernel_pmap
->dirbase
;
2840 db_printf("need a dirbase...\n");
2843 dirbase
= (pt_entry_t
*) ((unsigned long) dirbase
& ~INTEL_OFFMASK
);
2845 db_printf("dirbase: 0x%x\n", dirbase
);
2847 pdecnt
= ptecnt
= 0;
2849 for (y
= 0; y
< NPDES
; y
++, pdep
++) {
2850 if (((tmp
= *pdep
) & INTEL_PTE_VALID
) == 0) {
2854 ptep
= (pt_entry_t
*) ((*pdep
) & ~INTEL_OFFMASK
);
2855 db_printf("dir[%4d]: 0x%x\n", y
, *pdep
);
2856 for (x
= 0; x
< NPTES
; x
++, ptep
++) {
2857 if (((tmp
= *ptep
) & INTEL_PTE_VALID
) == 0) {
2861 db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n",
2864 (y
<< 22) | (x
<< 12),
2865 *ptep
& ~INTEL_OFFMASK
);
2869 db_printf("total: %d tables, %d page table entries.\n", pdecnt
, ptecnt
);
2872 #endif /* MACH_KDB */
2874 #include <mach_vm_debug.h>
2876 #include <vm/vm_debug.h>
2879 pmap_list_resident_pages(
2880 register pmap_t pmap
,
2881 register vm_offset_t
*listp
,
2886 #endif /* MACH_VM_DEBUG */
2892 * BSD support routine to reassign virtual addresses.
2896 pmap_movepage(unsigned long from
, unsigned long to
, vm_size_t size
)
2899 pt_entry_t
*pte
, saved_pte
;
2900 /* Lock the kernel map */
2904 PMAP_READ_LOCK(kernel_pmap
, spl
);
2905 pte
= pmap_pte(kernel_pmap
, from
);
2907 panic("pmap_pagemove from pte NULL");
2909 PMAP_READ_UNLOCK(kernel_pmap
, spl
);
2911 pmap_enter(kernel_pmap
, to
, i386_trunc_page(*pte
),
2912 VM_PROT_READ
|VM_PROT_WRITE
, *pte
& INTEL_PTE_WIRED
);
2914 pmap_remove(kernel_pmap
, from
, from
+PAGE_SIZE
);
2916 PMAP_READ_LOCK(kernel_pmap
, spl
);
2917 pte
= pmap_pte(kernel_pmap
, to
);
2919 panic("pmap_pagemove 'to' pte NULL");
2922 PMAP_READ_UNLOCK(kernel_pmap
, spl
);
2929 /* Get the processors to update the TLBs */
2934 kern_return_t
bmapvideo(vm_offset_t
*info
);
2935 kern_return_t
bmapvideo(vm_offset_t
*info
) {
2937 extern struct vc_info vinfo
;
2938 #ifdef NOTIMPLEMENTED
2939 (void)copyout((char *)&vinfo
, (char *)info
, sizeof(struct vc_info
)); /* Copy out the video info */
2941 return KERN_SUCCESS
;
2944 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
);
2945 kern_return_t
bmapmap(vm_offset_t va
, vm_offset_t pa
, vm_size_t size
, vm_prot_t prot
, int attr
) {
2947 #ifdef NOTIMPLEMENTED
2948 pmap_map_block(current_act()->task
->map
->pmap
, va
, pa
, size
, prot
, attr
); /* Map it in */
2950 return KERN_SUCCESS
;
2953 kern_return_t
bmapmapr(vm_offset_t va
);
2954 kern_return_t
bmapmapr(vm_offset_t va
) {
2956 #ifdef NOTIMPLEMENTED
2957 mapping_remove(current_act()->task
->map
->pmap
, va
); /* Remove map */
2959 return KERN_SUCCESS
;