/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#include <string.h>
-#include <norma_vm.h>
#include <mach_kdb.h>
#include <mach_ldebug.h>
+#include <libkern/OSAtomic.h>
+
#include <mach/machine/vm_types.h>
#include <mach/boolean.h>
#include <kern/thread.h>
#include <kern/zalloc.h>
+#include <kern/queue.h>
#include <kern/lock.h>
#include <kern/kalloc.h>
#include <i386/cpu_data.h>
#include <i386/cpu_number.h>
#include <i386/machine_cpu.h>
-#include <i386/mp_slave_boot.h>
#include <i386/seg.h>
+#include <i386/serial_io.h>
#include <i386/cpu_capabilities.h>
+#include <i386/machine_routines.h>
+#include <i386/proc_reg.h>
+#include <i386/tsc.h>
+#include <i386/acpi.h>
+#include <i386/pmap_internal.h>
#if MACH_KDB
#include <ddb/db_command.h>
#include <ddb/db_print.h>
#endif /* MACH_KDB */
-#include <kern/xpr.h>
-
#include <vm/vm_protos.h>
#include <i386/mp.h>
#include <i386/mp_desc.h>
+#include <i386/i386_lowmem.h>
+#include <i386/lowglobals.h>
-#include <sys/kdebug.h>
+
+/* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */
+#ifdef DEBUGINTERRUPTS
+#define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);}
+#else
+#define pmap_intr_assert()
+#endif
#ifdef IWANTTODEBUG
#undef DEBUG
#include <i386/postcode.h>
#endif /* IWANTTODEBUG */
-/*
- * Forward declarations for internal functions.
- */
-void pmap_expand_pml4(
- pmap_t map,
- vm_map_offset_t v);
-
-void pmap_expand_pdpt(
- pmap_t map,
- vm_map_offset_t v);
-
-void pmap_expand(
- pmap_t map,
- vm_map_offset_t v);
-
-static void pmap_remove_range(
- pmap_t pmap,
- vm_map_offset_t va,
- pt_entry_t *spte,
- pt_entry_t *epte);
-
-void phys_attribute_clear(
- ppnum_t phys,
- int bits);
-
-boolean_t phys_attribute_test(
- ppnum_t phys,
- int bits);
-
-void phys_attribute_set(
- ppnum_t phys,
- int bits);
-
-void pmap_set_reference(
- ppnum_t pn);
-
-void pmap_movepage(
- unsigned long from,
- unsigned long to,
- vm_size_t size);
-
-boolean_t phys_page_exists(
- ppnum_t pn);
-
#ifdef PMAP_DEBUG
void dump_pmap(pmap_t);
void dump_4GB_pdpt(pmap_t p);
void dump_4GB_pdpt_thread(thread_t tp);
#endif
-#define iswired(pte) ((pte) & INTEL_PTE_WIRED)
-
int nx_enabled = 1; /* enable no-execute protection */
+#ifdef CONFIG_EMBEDDED
+int allow_data_exec = 0; /* no exec from data, embedded is hardcore like that */
+#else
+int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
+#endif
+int allow_stack_exec = 0; /* No apps may execute from the stack by default */
-int cpu_64bit = 0;
-
-
-/*
- * Private data structures.
- */
-
-/*
- * For each vm_page_t, there is a list of all currently
- * valid virtual mappings of that page. An entry is
- * a pv_entry_t; the list is the pv_table.
- */
+#if CONFIG_YONAH
+boolean_t cpu_64bit = FALSE;
+#else
+const boolean_t cpu_64bit = TRUE;
+#endif
+boolean_t pmap_trace = FALSE;
-typedef struct pv_entry {
- struct pv_entry *next; /* next pv_entry */
- pmap_t pmap; /* pmap where mapping lies */
- vm_map_offset_t va; /* virtual address for mapping */
-} *pv_entry_t;
+uint64_t max_preemption_latency_tsc = 0;
-#define PV_ENTRY_NULL ((pv_entry_t) 0)
+pv_hashed_entry_t *pv_hash_table; /* hash lists */
-pv_entry_t pv_head_table; /* array of entries, one per page */
+uint32_t npvhash = 0;
/*
* pv_list entries are kept on a list that can only be accessed
* with the pmap system locked (at SPLVM, not in the cpus_active set).
- * The list is refilled from the pv_list_zone if it becomes empty.
+ * The list is refilled from the pv_hashed_list_zone if it becomes empty.
*/
-pv_entry_t pv_free_list; /* free list at SPLVM */
-decl_simple_lock_data(,pv_free_list_lock)
-int pv_free_count = 0;
-#define PV_LOW_WATER_MARK 5000
-#define PV_ALLOC_CHUNK 2000
-thread_call_t mapping_adjust_call;
-static thread_call_data_t mapping_adjust_call_data;
-int mappingrecurse = 0;
-
-#define PV_ALLOC(pv_e) { \
- simple_lock(&pv_free_list_lock); \
- if ((pv_e = pv_free_list) != 0) { \
- pv_free_list = pv_e->next; \
- pv_free_count--; \
- if (pv_free_count < PV_LOW_WATER_MARK) \
- if (hw_compare_and_store(0,1,(u_int *)&mappingrecurse)) \
- thread_call_enter(mapping_adjust_call); \
- } \
- simple_unlock(&pv_free_list_lock); \
-}
-
-#define PV_FREE(pv_e) { \
- simple_lock(&pv_free_list_lock); \
- pv_e->next = pv_free_list; \
- pv_free_list = pv_e; \
- pv_free_count++; \
- simple_unlock(&pv_free_list_lock); \
-}
+pv_rooted_entry_t pv_free_list = PV_ROOTED_ENTRY_NULL; /* free list at SPLVM */
+pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
+pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
+decl_simple_lock_data(,pv_hashed_free_list_lock)
+decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
+decl_simple_lock_data(,pv_hash_table_lock)
-zone_t pv_list_zone; /* zone of pv_entry structures */
+zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */
static zone_t pdpt_zone;
-/*
- * Each entry in the pv_head_table is locked by a bit in the
- * pv_lock_table. The lock bits are accessed by the physical
- * address of the page they lock.
- */
-
-char *pv_lock_table; /* pointer to array of bits */
-#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
-
/*
* First and last physical addresses that we maintain any information
* for. Initialized to zero so that pmap operations done before
* pmap_init won't touch any non-existent structures.
*/
-pmap_paddr_t vm_first_phys = (pmap_paddr_t) 0;
-pmap_paddr_t vm_last_phys = (pmap_paddr_t) 0;
boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
static struct vm_object kptobj_object_store;
static vm_object_t kptobj;
/*
- * Index into pv_head table, its lock bits, and the modify/reference
- * bits starting at vm_first_phys.
+ * Index into pv_head table, its lock bits, and the modify/reference and managed bits
*/
-#define pa_index(pa) (i386_btop(pa - vm_first_phys))
-
-#define pai_to_pvh(pai) (&pv_head_table[pai])
-#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table)
-#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table)
-
/*
* Array of physical page attribites for managed pages.
* One byte per physical page.
*/
char *pmap_phys_attributes;
+unsigned int last_managed_page = 0;
-/*
- * Physical page attributes. Copy bits from PTE definition.
- */
-#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
-#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
-#define PHYS_NCACHE INTEL_PTE_NCACHE
-
-/*
- * Amount of virtual memory mapped by one
- * page-directory entry.
- */
-#define PDE_MAPPED_SIZE (pdetova(1))
uint64_t pde_mapped_size;
/*
*/
/*
- * Locking Protocols:
+ * Locking Protocols: (changed 2/2007 JK)
*
* There are two structures in the pmap module that need locking:
* the pmaps themselves, and the per-page pv_lists (which are locked
* pmap_remove_all and pmap_copy_on_write operate on a physical page
* basis and want to do the locking in the reverse order, i.e. lock
* a pv_list and then go through all the pmaps referenced by that list.
- * To protect against deadlock between these two cases, the pmap_lock
- * is used. There are three different locking protocols as a result:
- *
- * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
- * the pmap.
*
- * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
- * lock on the pmap_lock (shared read), then lock the pmap
- * and finally the pv_lists as needed [i.e. pmap lock before
- * pv_list lock.]
- *
- * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
- * Get a write lock on the pmap_lock (exclusive write); this
- * also guaranteees exclusive access to the pv_lists. Lock the
- * pmaps as needed.
- *
- * At no time may any routine hold more than one pmap lock or more than
- * one pv_list lock. Because interrupt level routines can allocate
- * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
- * kernel_pmap can only be held at splhigh.
+ * The system wide pmap lock has been removed. Now, paths take a lock
+ * on the pmap before changing its 'shape' and the reverse order lockers
+ * (coming in by phys ppn) take a lock on the corresponding pv and then
+ * retest to be sure nothing changed during the window before they locked
+ * and can then run up/down the pv lists holding the list lock. This also
+ * lets the pmap layer run (nearly completely) interrupt enabled, unlike
+ * previously.
*/
-/*
- * We raise the interrupt level to splvm, to block interprocessor
- * interrupts during pmap operations. We mark the cpu's cr3 inactive
- * while interrupts are blocked.
- */
-#define SPLVM(spl) { \
- spl = splhigh(); \
- CPU_CR3_MARK_INACTIVE(); \
-}
-#define SPLX(spl) { \
- if (current_cpu_datap()->cpu_tlb_invalid) \
- process_pmap_updates(); \
- CPU_CR3_MARK_ACTIVE(); \
- splx(spl); \
-}
-
/*
- * Lock on pmap system
+ * PV locking
*/
-lock_t pmap_system_lock;
-
-#define PMAP_READ_LOCK(pmap, spl) { \
- SPLVM(spl); \
- lock_read(&pmap_system_lock); \
- simple_lock(&(pmap)->lock); \
-}
-#define PMAP_WRITE_LOCK(spl) { \
- SPLVM(spl); \
- lock_write(&pmap_system_lock); \
+#define LOCK_PVH(index) { \
+ mp_disable_preemption(); \
+ lock_pvh_pai(index); \
}
-#define PMAP_READ_UNLOCK(pmap, spl) { \
- simple_unlock(&(pmap)->lock); \
- lock_read_done(&pmap_system_lock); \
- SPLX(spl); \
+#define UNLOCK_PVH(index) { \
+ unlock_pvh_pai(index); \
+ mp_enable_preemption(); \
}
-#define PMAP_WRITE_UNLOCK(spl) { \
- lock_write_done(&pmap_system_lock); \
- SPLX(spl); \
-}
-
-#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
- simple_lock(&(pmap)->lock); \
- lock_write_to_read(&pmap_system_lock); \
-}
+/*
+ * PV hash locking
+ */
-#define LOCK_PVH(index) lock_pvh_pai(index)
+#define LOCK_PV_HASH(hash) lock_hash_hash(hash)
-#define UNLOCK_PVH(index) unlock_pvh_pai(index)
+#define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash)
#if USLOCK_DEBUG
extern int max_lock_loops;
-extern int disableSerialOuput;
#define LOOP_VAR \
unsigned int loop_count; \
- loop_count = disableSerialOuput ? max_lock_loops \
+ loop_count = disable_serial_output ? max_lock_loops \
: max_lock_loops*100
#define LOOP_CHECK(msg, pmap) \
if (--loop_count == 0) { \
#define LOOP_CHECK(msg, pmap)
#endif /* USLOCK_DEBUG */
-
-static void pmap_flush_tlbs(pmap_t pmap);
-
-#define PMAP_UPDATE_TLBS(pmap, s, e) \
- pmap_flush_tlbs(pmap)
-
-
-#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
-
+unsigned pmap_memory_region_count;
+unsigned pmap_memory_region_current;
pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
int pmap_debug = 0; /* flag for debugging prints */
-unsigned int inuse_ptepages_count = 0; /* debugging */
+unsigned int inuse_ptepages_count = 0;
+long long alloc_ptepages_count __attribute__((aligned(8))) = 0LL; /* aligned for atomic access */
+unsigned int bootstrap_wired_pages = 0;
+int pt_fake_zone_index = -1;
+
+extern long NMIPI_acks;
+
+static inline void
+PMAP_ZINFO_SALLOC(vm_size_t bytes)
+{
+ current_thread()->tkm_shared.alloc += bytes;
+}
+
+static inline void
+PMAP_ZINFO_SFREE(vm_size_t bytes)
+{
+ current_thread()->tkm_shared.free += (bytes);
+}
addr64_t kernel64_cr3;
boolean_t no_shared_cr3 = FALSE; /* -no_shared_cr3 boot arg */
-/*
- * Pmap cache. Cache is threaded through ref_count field of pmap.
- * Max will eventually be constant -- variable for experimentation.
- */
-int pmap_cache_max = 32;
-int pmap_alloc_chunk = 8;
-pmap_t pmap_cache_list;
-int pmap_cache_count;
-decl_simple_lock_data(,pmap_cache_lock)
+boolean_t kernel_text_ps_4K = TRUE;
+boolean_t wpkernel = TRUE;
extern char end;
-
static int nkpt;
-extern uint32_t lowGlo;
-extern void *version;
pt_entry_t *DMAP1, *DMAP2;
caddr_t DADDR1;
caddr_t DADDR2;
-#if DEBUG_ALIAS
-#define PMAP_ALIAS_MAX 32
-struct pmap_alias {
- vm_offset_t rpc;
- pmap_t pmap;
- vm_map_offset_t va;
- int cookie;
-#define PMAP_ALIAS_COOKIE 0xdeadbeef
-} pmap_aliasbuf[PMAP_ALIAS_MAX];
-int pmap_alias_index = 0;
-extern vm_offset_t get_rpc();
-
-#endif /* DEBUG_ALIAS */
-
/*
* for legacy, returns the address of the pde entry.
* for 64 bit, causes the pdpt page containing the pde entry to be mapped,
return pde;
}
-
/*
* the single pml4 page per pmap is allocated at pmap create time and exists
* for the duration of the pmap. we allocate this page in kernel vm (to save us one
((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1))));
}
- return (0);
+ return (NULL);
}
/*
((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1))));
}
- return (0);
+ return (NULL);
}
+/*
+ * Because the page tables (top 3 levels) are mapped into per cpu windows,
+ * callers must either disable interrupts or disable preemption before calling
+ * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
+ * is in one of those mapped windows and that cannot be allowed to change until
+ * the caller is done using the returned pte pointer. When done, the caller
+ * restores interrupts or preemption to its previous state after which point the
+ * vaddr for the returned pte can no longer be used
+ */
/*
pde = pmap_pde(pmap,vaddr);
if (pde && ((*pde & INTEL_PTE_VALID))) {
- if (pmap == kernel_pmap) {
- return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
- }
-
+ if (*pde & INTEL_PTE_PS)
+ return pde;
+ if (pmap == kernel_pmap)
+ return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
+#if TESTING
+ if (ml_get_interrupts_enabled() && get_preemption_level() == 0)
+ panic("pmap_pte: unsafe call");
+#endif
assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
newpf = *pde & PG_FRAME;
((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1)));
}
- return(0);
+ return(NULL);
}
-
+
/*
* Map memory at initialization. The physical addresses being
return(virt);
}
-/*
- * Back-door routine for mapping kernel VM at initialization.
- * Useful for mapping memory outside the range
- * Sets no-cache, A, D.
- * [vm_first_phys, vm_last_phys) (i.e., devices).
- * Otherwise like pmap_map.
- */
-vm_offset_t
-pmap_map_bd(
- vm_offset_t virt,
- vm_map_offset_t start_addr,
- vm_map_offset_t end_addr,
- vm_prot_t prot,
- unsigned int flags)
-{
- pt_entry_t template;
- pt_entry_t *pte;
-
- template = pa_to_pte(start_addr)
- | INTEL_PTE_REF
- | INTEL_PTE_MOD
- | INTEL_PTE_WIRED
- | INTEL_PTE_VALID;
-
- if(flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) {
- template |= INTEL_PTE_NCACHE;
- if(!(flags & (VM_MEM_GUARDED | VM_WIMG_USE_DEFAULT)))
- template |= INTEL_PTE_PTA;
- }
+extern pmap_paddr_t first_avail;
+extern vm_offset_t virtual_avail, virtual_end;
+extern pmap_paddr_t avail_start, avail_end;
+extern vm_offset_t sHIB;
+extern vm_offset_t eHIB;
+extern vm_offset_t stext;
+extern vm_offset_t etext;
+extern vm_offset_t sdata;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
+extern void *KPTphys;
- while (start_addr < end_addr) {
- pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
- if (pte == PT_ENTRY_NULL) {
- panic("pmap_map_bd: Invalid kernel address\n");
- }
- pmap_store_pte(pte, template);
- pte_increment_pa(template);
- virt += PAGE_SIZE;
- start_addr += PAGE_SIZE;
- }
+void
+pmap_cpu_init(void)
+{
+ /*
+ * Here early in the life of a processor (from cpu_mode_init()).
+ */
- flush_tlb();
- return(virt);
+ /*
+ * Initialize the per-cpu, TLB-related fields.
+ */
+ current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3;
+ current_cpu_datap()->cpu_tlb_invalid = FALSE;
}
-extern char *first_avail;
-extern vm_offset_t virtual_avail, virtual_end;
-extern pmap_paddr_t avail_start, avail_end;
-extern vm_offset_t etext;
-extern void *sectHIBB;
-extern int sectSizeHIB;
-
-
vm_offset_t
pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz)
{
pt_entry_t *ptep;
pmap_paddr_t pa;
int i;
+ spl_t s;
assert(0 == (va & PAGE_MASK)); /* expecting page aligned */
+ s = splhigh();
ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ve);
for (i=0; i< sz; i++) {
va+= PAGE_SIZE;
ptep++;
}
+ splx(s);
return ve;
}
{
vm_offset_t haddr;
- struct __gdt_desc_struct gdt_desc = {0,0,0};
- struct __idt_desc_struct idt_desc = {0,0,0};
+ spl_t s;
#if MACH_KDB
struct i386_tss *ttss;
#endif
+ cpu_desc_index_t * cdi = &cpu_data_master.cpu_desc_index;
+
kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n",
HIGH_MEM_BASE,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+ s = splhigh();
pte_unique_base = pmap_pte(kernel_pmap, (vm_map_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN));
+ splx(s);
if (i386_btop(&hi_remap_etext - &hi_remap_text + 1) >
HIGH_FIXED_TRAMPS_END - HIGH_FIXED_TRAMPS + 1)
haddr = pmap_high_shared_remap(HIGH_FIXED_TRAMPS,
(vm_offset_t) &hi_remap_text, 3);
kprintf("tramp: 0x%x, ",haddr);
- printf("hi mem tramps at 0x%x\n",haddr);
/* map gdt up high and update ptr for reload */
haddr = pmap_high_shared_remap(HIGH_FIXED_GDT,
(vm_offset_t) master_gdt, 1);
- __asm__ __volatile__("sgdt %0": "=m" (gdt_desc): :"memory");
- gdt_desc.address = haddr;
+ cdi->cdi_gdt.ptr = (void *)haddr;
kprintf("GDT: 0x%x, ",haddr);
/* map ldt up high */
haddr = pmap_high_shared_remap(HIGH_FIXED_LDT_BEGIN,
(vm_offset_t) master_ldt,
HIGH_FIXED_LDT_END - HIGH_FIXED_LDT_BEGIN + 1);
+ cdi->cdi_ldt = (struct fake_descriptor *)haddr;
kprintf("LDT: 0x%x, ",haddr);
/* put new ldt addr into gdt */
- master_gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
- master_gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(KERNEL_LDT)], 1);
- master_gdt[sel_idx(USER_LDT)] = ldt_desc_pattern;
- master_gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(USER_LDT)], 1);
+ struct fake_descriptor temp_fake_desc;
+ temp_fake_desc = ldt_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) haddr;
+ fix_desc(&temp_fake_desc, 1);
+
+ *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_LDT)] = temp_fake_desc;
+ *(struct fake_descriptor *) &master_gdt[sel_idx(USER_LDT)] = temp_fake_desc;
/* map idt up high */
haddr = pmap_high_shared_remap(HIGH_FIXED_IDT,
(vm_offset_t) master_idt, 1);
- __asm__ __volatile__("sidt %0" : "=m" (idt_desc));
- idt_desc.address = haddr;
+ cdi->cdi_idt.ptr = (void *)haddr;
kprintf("IDT: 0x%x, ", haddr);
/* remap ktss up high and put new high addr into gdt */
haddr = pmap_high_shared_remap(HIGH_FIXED_KTSS,
(vm_offset_t) &master_ktss, 1);
- master_gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
- master_gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(KERNEL_TSS)], 1);
+
+ temp_fake_desc = tss_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) haddr;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_TSS)] = temp_fake_desc;
kprintf("KTSS: 0x%x, ",haddr);
#if MACH_KDB
/* remap dbtss up high and put new high addr into gdt */
haddr = pmap_high_shared_remap(HIGH_FIXED_DBTSS,
(vm_offset_t) &master_dbtss, 1);
- master_gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
- master_gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(DEBUG_TSS)], 1);
+ temp_fake_desc = tss_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) haddr;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *)&master_gdt[sel_idx(DEBUG_TSS)] = temp_fake_desc;
ttss = (struct i386_tss *)haddr;
kprintf("DBTSS: 0x%x, ",haddr);
#endif /* MACH_KDB */
/* remap dftss up high and put new high addr into gdt */
haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
(vm_offset_t) &master_dftss, 1);
- master_gdt[sel_idx(DF_TSS)] = tss_desc_pattern;
- master_gdt[sel_idx(DF_TSS)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(DF_TSS)], 1);
+ temp_fake_desc = tss_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) haddr;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(DF_TSS)] = temp_fake_desc;
kprintf("DFTSS: 0x%x\n",haddr);
/* remap mctss up high and put new high addr into gdt */
haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS,
(vm_offset_t) &master_mctss, 1);
- master_gdt[sel_idx(MC_TSS)] = tss_desc_pattern;
- master_gdt[sel_idx(MC_TSS)].offset = (vm_offset_t) haddr;
- fix_desc(&master_gdt[sel_idx(MC_TSS)], 1);
+ temp_fake_desc = tss_desc_pattern;
+ temp_fake_desc.offset = (vm_offset_t) haddr;
+ fix_desc(&temp_fake_desc, 1);
+ *(struct fake_descriptor *) &master_gdt[sel_idx(MC_TSS)] = temp_fake_desc;
kprintf("MCTSS: 0x%x\n",haddr);
- __asm__ __volatile__("lgdt %0": "=m" (gdt_desc));
- __asm__ __volatile__("lidt %0": "=m" (idt_desc));
- kprintf("gdt/idt reloaded, ");
- set_tr(KERNEL_TSS);
- kprintf("tr reset to KERNEL_TSS\n");
+ cpu_desc_load(&cpu_data_master);
}
* Bootstrap the system enough to run with virtual memory.
* Map the kernel's code and data, and allocate the system page table.
* Called with mapping OFF. Page_size must already be set.
- *
- * Parameters:
- * load_start: PA where kernel was loaded
- * avail_start PA of first available physical page -
- * after kernel page tables
- * avail_end PA of last available physical page
- * virtual_avail VA of first available page -
- * after kernel page tables
- * virtual_end VA of last available page -
- * end of kernel address space
- *
- * &start_text start of kernel text
- * &etext end of kernel text
*/
void
boolean_t IA32e)
{
vm_offset_t va;
- pt_entry_t *pte;
int i;
- int wpkernel, boot_arg;
pdpt_entry_t *pdpt;
+ spl_t s;
vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address
* known to VM */
kernel_pmap = &kernel_pmap_store;
kernel_pmap->ref_count = 1;
kernel_pmap->nx_enabled = FALSE;
- kernel_pmap->pm_64bit = 0;
+ kernel_pmap->pm_task_map = TASK_MAP_32BIT;
kernel_pmap->pm_obj = (vm_object_t) NULL;
kernel_pmap->dirbase = (pd_entry_t *)((unsigned int)IdlePTD | KERNBASE);
kernel_pmap->pdirbase = (pmap_paddr_t)((int)IdlePTD);
kernel_pmap->pm_pdpt = pdpt;
kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePDPT);
+
va = (vm_offset_t)kernel_pmap->dirbase;
/* setup self referential mapping(s) */
for (i = 0; i< NPGPTD; i++, pdpt++) {
pmap_paddr_t pa;
- pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
+ pa = (pmap_paddr_t) kvtophys((vm_offset_t)(va + i386_ptob(i)));
pmap_store_pte(
(pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i),
(pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF |
pmap_store_pte(pdpt, pa | INTEL_PTE_VALID);
}
+#if CONFIG_YONAH
+ /* 32-bit and legacy support depends on IA32e mode being disabled */
cpu_64bit = IA32e;
+#endif
lo_kernel_cr3 = kernel_pmap->pm_cr3;
current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
high_shared_pde = *pmap_pde(kernel_pmap, HIGH_MEM_BASE);
/* make sure G bit is on for high shared pde entry */
high_shared_pde |= INTEL_PTE_GLOBAL;
+ s = splhigh();
pmap_store_pte(pmap_pde(kernel_pmap, HIGH_MEM_BASE), high_shared_pde);
+ splx(s);
nkpt = NKPT;
- inuse_ptepages_count += NKPT;
+ OSAddAtomic(NKPT, &inuse_ptepages_count);
+ OSAddAtomic64(NKPT, &alloc_ptepages_count);
+ bootstrap_wired_pages = NKPT;
virtual_avail = (vm_offset_t)VADDR(KPTDI,0) + (vm_offset_t)first_avail;
virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
* Reserve some special page table entries/VA space for temporary
* mapping of pages.
*/
-#define SYSMAP(c, p, v, n) \
- v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
-
va = virtual_avail;
+ pt_entry_t *pte;
pte = vtopte(va);
+#define SYSMAP(c, p, v, n) \
+ v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
for (i=0; i<PMAP_NWINDOWS; i++) {
SYSMAP(caddr_t,
SYSMAP(caddr_t, DMAP1, DADDR1, 1);
SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */
-
- lock_init(&pmap_system_lock,
- FALSE, /* NOT a sleep lock */
- 0, 0);
-
virtual_avail = va;
- wpkernel = 1;
- if (PE_parse_boot_arg("wpkernel", &boot_arg)) {
- if (boot_arg == 0)
- wpkernel = 0;
- }
-
- /* Remap kernel text readonly unless the "wpkernel" boot-arg is present
- * and set to 0.
- */
- if (wpkernel)
- {
- vm_offset_t myva;
- pt_entry_t *ptep;
-
- for (myva = i386_round_page(MP_BOOT + MP_BOOTSTACK); myva < etext; myva += PAGE_SIZE) {
- if (myva >= (vm_offset_t)sectHIBB && myva < ((vm_offset_t)sectHIBB + sectSizeHIB))
- continue;
- ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
- if (ptep)
- pmap_store_pte(ptep, *ptep & ~INTEL_PTE_RW);
- }
+ if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) {
+ if (0 != ((npvhash+1) & npvhash)) {
+ kprintf("invalid hash %d, must be ((2^N)-1), using default %d\n",npvhash,NPVHASH);
+ npvhash = NPVHASH;
+ }
+ } else {
+ npvhash = NPVHASH;
}
-
- /* no matter what, kernel page zero is not accessible */
- pte = pmap_pte(kernel_pmap, 0);
- pmap_store_pte(pte, INTEL_PTE_INVALID);
-
- /* map lowmem global page into fixed addr 0x2000 */
- if (0 == (pte = pmap_pte(kernel_pmap,0x2000))) panic("lowmem pte");
-
- pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)|INTEL_PTE_VALID|INTEL_PTE_REF|INTEL_PTE_MOD|INTEL_PTE_WIRED|INTEL_PTE_RW);
- flush_tlb();
+ printf("npvhash=%d\n",npvhash);
simple_lock_init(&kernel_pmap->lock, 0);
- simple_lock_init(&pv_free_list_lock, 0);
+ simple_lock_init(&pv_hashed_free_list_lock, 0);
+ simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
+ simple_lock_init(&pv_hash_table_lock,0);
- pmap_init_high_shared();
+ pmap_init_high_shared();
pde_mapped_size = PDE_MAPPED_SIZE;
if (cpu_64bit) {
- pdpt_entry_t *ppdpt = (pdpt_entry_t *)IdlePDPT;
+ pdpt_entry_t *ppdpt = IdlePDPT;
pdpt_entry_t *ppdpt64 = (pdpt_entry_t *)IdlePDPT64;
pdpt_entry_t *ppml4 = (pdpt_entry_t *)IdlePML4;
int istate = ml_set_interrupts_enabled(FALSE);
pmap_store_pte((ppml4+KERNEL_UBER_PML4_INDEX), *(ppml4+0));
kernel64_cr3 = (addr64_t) kernel_pmap->pm_cr3;
- cpu_IA32e_enable(current_cpu_datap());
- current_cpu_datap()->cpu_is64bit = TRUE;
- /* welcome to a 64 bit world */
- /* Re-initialize and load descriptors */
- cpu_desc_init64(&cpu_data_master, TRUE);
- cpu_desc_load64(&cpu_data_master);
- fast_syscall_init64();
+ /* Re-initialize descriptors and prepare to switch modes */
+ cpu_desc_init64(&cpu_data_master);
+ current_cpu_datap()->cpu_is64bit = TRUE;
+ current_cpu_datap()->cpu_active_cr3 = kernel64_cr3;
pde_mapped_size = 512*4096 ;
ml_set_interrupts_enabled(istate);
-
}
+
+ /* Sets 64-bit mode if required. */
+ cpu_mode_init(&cpu_data_master);
+ /* Update in-kernel CPUID information if we're now in 64-bit mode */
+ if (IA32e)
+ cpuid_set_info();
+
kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
* By default for 64-bit users loaded at 4GB, share kernel mapping.
* But this may be overridden by the -no_shared_cr3 boot-arg.
*/
- if (PE_parse_boot_arg("-no_shared_cr3", &no_shared_cr3)) {
+ if (PE_parse_boot_argn("-no_shared_cr3", &no_shared_cr3, sizeof (no_shared_cr3))) {
kprintf("Shared kernel address space disabled\n");
- }
+ }
+
+#ifdef PMAP_TRACES
+ if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
+ kprintf("Kernel traces for pmap operations enabled\n");
+ }
+#endif /* PMAP_TRACES */
}
void
void
pmap_init(void)
{
- register long npages;
- vm_offset_t addr;
- register vm_size_t s;
- vm_map_offset_t vaddr;
- ppnum_t ppn;
+ long npages;
+ vm_map_offset_t vaddr;
+ vm_offset_t addr;
+ vm_size_t s, vsize;
+ ppnum_t ppn;
/*
* Allocate memory for the pv_head_table and its lock bits,
* the modify bit array, and the pte_page table.
*/
- /* zero bias all these arrays now instead of off avail_start
- so we cover all memory */
- npages = i386_btop(avail_end);
- s = (vm_size_t) (sizeof(struct pv_entry) * npages
- + pv_lock_table_size(npages)
+ /*
+ * zero bias all these arrays now instead of off avail_start
+ * so we cover all memory
+ */
+
+ npages = (long)i386_btop(avail_end);
+ s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
+ + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
+ + pv_lock_table_size(npages)
+ + pv_hash_lock_table_size((npvhash+1))
+ npages);
s = round_page(s);
- if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ if (kernel_memory_allocate(kernel_map, &addr, s, 0,
+ KMA_KOBJECT | KMA_PERMANENT)
+ != KERN_SUCCESS)
panic("pmap_init");
memset((char *)addr, 0, s);
+ vaddr = addr;
+ vsize = s;
+
+#if PV_DEBUG
+ if (0 == npvhash) panic("npvhash not initialized");
+#endif
+
/*
* Allocate the structures first to preserve word-alignment.
*/
- pv_head_table = (pv_entry_t) addr;
+ pv_head_table = (pv_rooted_entry_t) addr;
addr = (vm_offset_t) (pv_head_table + npages);
+ pv_hash_table = (pv_hashed_entry_t *)addr;
+ addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
+
pv_lock_table = (char *) addr;
addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
+ pv_hash_lock_table = (char *) addr;
+ addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
+
pmap_phys_attributes = (char *) addr;
+ {
+ unsigned int i;
+ unsigned int pn;
+ ppnum_t last_pn;
+ pmap_memory_region_t *pmptr = pmap_memory_regions;
+
+ last_pn = (ppnum_t)i386_btop(avail_end);
+
+ for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
+ if (pmptr->type == kEfiConventionalMemory) {
+
+ for (pn = pmptr->base; pn <= pmptr->end; pn++) {
+ if (pn < last_pn) {
+ pmap_phys_attributes[pn] |= PHYS_MANAGED;
+
+ if (pn > last_managed_page)
+ last_managed_page = pn;
+
+ if (pn >= lowest_hi && pn <= highest_hi)
+ pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
+ }
+ }
+ }
+ }
+ }
+ while (vsize) {
+ ppn = pmap_find_phys(kernel_pmap, vaddr);
+
+ pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
+ vaddr += PAGE_SIZE;
+ vsize -= PAGE_SIZE;
+ }
/*
* Create the zone of physical maps,
* and of the physical-to-virtual entries.
*/
s = (vm_size_t) sizeof(struct pmap);
pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
- s = (vm_size_t) sizeof(struct pv_entry);
- pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */
- s = 63;
- pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
+ zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
- /*
- * Only now, when all of the data structures are allocated,
- * can we set vm_first_phys and vm_last_phys. If we set them
- * too soon, the kmem_alloc_wired above will try to use these
- * data structures and blow up.
- */
+ s = (vm_size_t) sizeof(struct pv_hashed_entry);
+ pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
+ 4096 * 4 /* LCM i386 */, "pv_list");
+ zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
- /* zero bias this now so we cover all memory */
- vm_first_phys = 0;
- vm_last_phys = avail_end;
+ s = 63;
+ pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */
+ zone_change(pdpt_zone, Z_NOENCRYPT, TRUE);
kptobj = &kptobj_object_store;
- _vm_object_allocate((vm_object_size_t)NKPDE, kptobj);
+ _vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG), kptobj);
kernel_pmap->pm_obj = kptobj;
/* create pv entries for kernel pages mapped by low level
vaddr = (vm_map_offset_t)0;
for (ppn = 0; ppn < i386_btop(avail_start) ; ppn++ ) {
- pv_entry_t pv_e;
+ pv_rooted_entry_t pv_e;
pv_e = pai_to_pvh(ppn);
pv_e->va = vaddr;
vaddr += PAGE_SIZE;
- kernel_pmap->stats.resident_count++;
pv_e->pmap = kernel_pmap;
- pv_e->next = PV_ENTRY_NULL;
+ queue_init(&pv_e->qlink);
}
pmap_initialized = TRUE;
- /*
- * Initializie pmap cache.
- */
- pmap_cache_list = PMAP_NULL;
- pmap_cache_count = 0;
- simple_lock_init(&pmap_cache_lock, 0);
+ max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
+
}
+#ifdef PMAP_DEBUG
+#define DBG(x...) kprintf("DBG: " x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * Called once VM is fully initialized so that we can release unused
+ * sections of low memory to the general pool.
+ * Also complete the set-up of identity-mapped sections of the kernel:
+ * 1) write-protect kernel text
+ * 2) map kernel text using large pages if possible
+ * 3) read and write-protect page zero (for K32)
+ * 4) map the global page at the appropriate virtual address.
+ *
+ * Use of large pages
+ * ------------------
+ * To effectively map and write-protect all kernel text pages, the text
+ * must be 2M-aligned at the base, and the data section above must also be
+ * 2M-aligned. That is, there's padding below and above. This is achieved
+ * through linker directives. Large pages are used only if this alignment
+ * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
+ * memory layout is:
+ *
+ * : :
+ * | __DATA |
+ * sdata: ================== 2Meg
+ * | |
+ * | zero-padding |
+ * | |
+ * etext: ------------------
+ * | |
+ * : :
+ * | |
+ * | __TEXT |
+ * | |
+ * : :
+ * | |
+ * stext: ================== 2Meg
+ * | |
+ * | zero-padding |
+ * | |
+ * eHIB: ------------------
+ * | __HIB |
+ * : :
+ *
+ * Prior to changing the mapping from 4K to 2M, the zero-padding pages
+ * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
+ * 4K pages covering [stext,etext] are coalesced as 2M large pages.
+ * The now unused level-1 PTE pages are also freed.
+ */
+extern uint32_t pmap_reserved_ranges;
void
-x86_lowmem_free(void)
+pmap_lowmem_finalize(void)
{
- /* free lowmem pages back to the vm system. we had to defer doing this
- until the vm system was fully up.
- the actual pages that are released are determined by which
- pages the memory sizing code puts into the region table */
+ spl_t spl;
+ int i;
- ml_static_mfree((vm_offset_t) i386_ptob(pmap_memory_regions[0].base),
- (vm_size_t) i386_ptob(pmap_memory_regions[0].end - pmap_memory_regions[0].base));
-}
+ /* Check the kernel is linked at the expected base address */
+ if (i386_btop(kvtophys((vm_offset_t) &IdlePML4)) !=
+ I386_KERNEL_IMAGE_BASE_PAGE)
+ panic("pmap_lowmem_finalize() unexpected kernel base address");
+
+ /*
+ * Update wired memory statistics for early boot pages
+ */
+ PMAP_ZINFO_PALLOC(bootstrap_wired_pages * PAGE_SIZE);
+
+ /*
+ * Free all pages in pmap regions below the base:
+ * rdar://6332712
+ * We can't free all the pages to VM that EFI reports available.
+ * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
+ * There's also a size miscalculation here: pend is one page less
+ * than it should be but this is not fixed to be backwards
+ * compatible.
+ * Due to this current EFI limitation, we take only the first
+ * entry in the memory region table. However, the loop is retained
+ * (with the intended termination criteria commented out) in the
+ * hope that some day we can free all low-memory ranges.
+ */
+ for (i = 0;
+// pmap_memory_regions[i].end <= I386_KERNEL_IMAGE_BASE_PAGE;
+ i < 1 && (pmap_reserved_ranges == 0);
+ i++) {
+ vm_offset_t pbase = (vm_offset_t)i386_ptob(pmap_memory_regions[i].base);
+ vm_offset_t pend = (vm_offset_t)i386_ptob(pmap_memory_regions[i].end);
+// vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
+
+ DBG("ml_static_mfree(%p,%p) for pmap region %d\n",
+ (void *) ml_static_ptovirt(pbase),
+ (void *) (pend - pbase), i);
+ ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
+ }
+
+ /*
+ * If text and data are both 2MB-aligned,
+ * we can map text with large-pages,
+ * unless the -kernel_text_ps_4K boot-arg overrides.
+ */
+ if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
+ kprintf("Kernel text is 2MB aligned");
+ kernel_text_ps_4K = FALSE;
+ if (PE_parse_boot_argn("-kernel_text_ps_4K",
+ &kernel_text_ps_4K,
+ sizeof (kernel_text_ps_4K)))
+ kprintf(" but will be mapped with 4K pages\n");
+ else
+ kprintf(" and will be mapped with 2M pages\n");
+ }
+
+ (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
+ if (wpkernel)
+ kprintf("Kernel text %p-%p to be write-protected\n",
+ (void *) stext, (void *) etext);
+
+ spl = splhigh();
+
+ /*
+ * Scan over text if mappings are to be changed:
+ * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
+ * - Change to large-pages if possible and not overriden.
+ */
+ if (kernel_text_ps_4K && wpkernel) {
+ vm_offset_t myva;
+ for (myva = stext; myva < etext; myva += PAGE_SIZE) {
+ pt_entry_t *ptep;
+
+ ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+ if (ptep)
+ pmap_store_pte(ptep, *ptep & ~INTEL_PTE_RW);
+ }
+ }
+
+ if (!kernel_text_ps_4K) {
+ vm_offset_t myva;
+ /*
+ * Release zero-filled page padding used for 2M-alignment.
+ */
+ DBG("ml_static_mfree(%p,%p) for padding below text\n",
+ (void *) eHIB, (void *) (stext - eHIB));
+ ml_static_mfree(eHIB, stext - eHIB);
+ DBG("ml_static_mfree(%p,%p) for padding above text\n",
+ (void *) etext, (void *) (sdata - etext));
+ ml_static_mfree(etext, sdata - etext);
+
+ /*
+ * Coalesce text pages into large pages.
+ */
+ for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
+ pt_entry_t *ptep;
+ vm_offset_t pte_phys;
+ pt_entry_t *pdep;
+ pt_entry_t pde;
+
+ pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
+ ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+ DBG("myva: %p pdep: %p ptep: %p\n",
+ (void *) myva, (void *) pdep, (void *) ptep);
+ if ((*ptep & INTEL_PTE_VALID) == 0)
+ continue;
+ pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
+ pde = *pdep & PTMASK; /* page attributes from pde */
+ pde |= INTEL_PTE_PS; /* make it a 2M entry */
+ pde |= pte_phys; /* take page frame from pte */
+
+ if (wpkernel)
+ pde &= ~INTEL_PTE_RW;
+ DBG("pmap_store_pte(%p,0x%llx)\n",
+ (void *)pdep, pde);
+ pmap_store_pte(pdep, pde);
+
+ /*
+ * Free the now-unused level-1 pte.
+ * Note: ptep is a virtual address to the pte in the
+ * recursive map. We can't use this address to free
+ * the page. Instead we need to compute its address
+ * in the Idle PTEs in "low memory".
+ */
+ vm_offset_t vm_ptep = (vm_offset_t) KPTphys
+ + (pte_phys >> PTPGSHIFT);
+ DBG("ml_static_mfree(%p,0x%x) for pte\n",
+ (void *) vm_ptep, PAGE_SIZE);
+ ml_static_mfree(vm_ptep, PAGE_SIZE);
+ }
+
+ /* Change variable read by sysctl machdep.pmap */
+ pmap_kernel_text_ps = I386_LPGBYTES;
+ }
+
+ /* no matter what, kernel page zero is not accessible */
+ pmap_store_pte(pmap_pte(kernel_pmap, 0), INTEL_PTE_INVALID);
+
+ /* map lowmem global page into fixed addr */
+ pt_entry_t *pte = NULL;
+ if (0 == (pte = pmap_pte(kernel_pmap,
+ VM_MIN_KERNEL_LOADED_ADDRESS + 0x2000)))
+ panic("lowmem pte");
+ /* make sure it is defined on page boundary */
+ assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
+ pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
+ | INTEL_PTE_REF
+ | INTEL_PTE_MOD
+ | INTEL_PTE_WIRED
+ | INTEL_PTE_VALID
+ | INTEL_PTE_RW);
+ splx(spl);
+ flush_tlb();
+}
-#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+#define managed_page(x) ( (unsigned int)x <= last_managed_page && (pmap_phys_attributes[x] & PHYS_MANAGED) )
+/*
+ * this function is only used for debugging fron the vm layer
+ */
boolean_t
pmap_verify_free(
ppnum_t pn)
{
- pmap_paddr_t phys;
- pv_entry_t pv_h;
+ pv_rooted_entry_t pv_h;
int pai;
- spl_t spl;
boolean_t result;
assert(pn != vm_page_fictitious_addr);
- phys = (pmap_paddr_t)i386_ptob(pn);
+
if (!pmap_initialized)
return(TRUE);
- if (!pmap_valid_page(pn))
- return(FALSE);
-
- PMAP_WRITE_LOCK(spl);
-
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
+ if (pn == vm_page_guard_addr)
+ return TRUE;
+ pai = ppn_to_pai(pn);
+ if (!managed_page(pai))
+ return(FALSE);
+ pv_h = pai_to_pvh(pn);
result = (pv_h->pmap == PMAP_NULL);
- PMAP_WRITE_UNLOCK(spl);
-
return(result);
}
+boolean_t
+pmap_is_empty(
+ pmap_t pmap,
+ vm_map_offset_t va_start,
+ vm_map_offset_t va_end)
+{
+ vm_map_offset_t offset;
+ ppnum_t phys_page;
+
+ if (pmap == PMAP_NULL) {
+ return TRUE;
+ }
+
+ /*
+ * Check the resident page count
+ * - if it's zero, the pmap is completely empty.
+ * This short-circuit test prevents a virtual address scan which is
+ * painfully slow for 64-bit spaces.
+ * This assumes the count is correct
+ * .. the debug kernel ought to be checking perhaps by page table walk.
+ */
+ if (pmap->stats.resident_count == 0)
+ return TRUE;
+
+ for (offset = va_start;
+ offset < va_end;
+ offset += PAGE_SIZE_64) {
+ phys_page = pmap_find_phys(pmap, offset);
+ if (phys_page) {
+ if (pmap != kernel_pmap &&
+ pmap->pm_task_map == TASK_MAP_32BIT &&
+ offset >= HIGH_MEM_BASE) {
+ /*
+ * The "high_shared_pde" is used to share
+ * the entire top-most 2MB of address space
+ * between the kernel and all 32-bit tasks.
+ * So none of this can be removed from 32-bit
+ * tasks.
+ * Let's pretend there's nothing up
+ * there...
+ */
+ return TRUE;
+ }
+ kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
+ "page %d at 0x%llx\n",
+ pmap, va_start, va_end, phys_page, offset);
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
/*
* Create and return a physical map.
*
pmap_t
pmap_create(
vm_map_size_t sz,
- boolean_t is_64bit)
+ boolean_t is_64bit)
{
- register pmap_t p;
+ pmap_t p;
int i;
vm_offset_t va;
vm_size_t size;
pdpt_entry_t *pdpt;
pml4_entry_t *pml4p;
- int template;
pd_entry_t *pdp;
+ int template;
spl_t s;
+ PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+ (int) (sz>>32), (int) sz, (int) is_64bit, 0, 0);
+
size = (vm_size_t) sz;
/*
p = (pmap_t) zalloc(pmap_zone);
if (PMAP_NULL == p)
- panic("pmap_create zalloc");
+ panic("pmap_create zalloc");
/* init counts now since we'll be bumping some */
simple_lock_init(&p->lock, 0);
p->stats.resident_count = 0;
+ p->stats.resident_max = 0;
p->stats.wired_count = 0;
p->ref_count = 1;
p->nx_enabled = 1;
- p->pm_64bit = is_64bit;
- p->pm_kernel_cr3 = FALSE;
p->pm_shared = FALSE;
+ assert(!is_64bit || cpu_64bit);
+ p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
+
if (!cpu_64bit) {
- /* legacy 32 bit setup */
- /* in the legacy case the pdpt layer is hardwired to 4 entries and each
- * entry covers 1GB of addr space */
- if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
- panic("pmap_create kmem_alloc_wired");
- p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
- if ((vm_offset_t)NULL == p->pm_hold) {
- panic("pdpt zalloc");
- }
- pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
- p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
- if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
- panic("pmap_create vm_object_allocate");
+ /* legacy 32 bit setup */
+ /* in the legacy case the pdpt layer is hardwired to 4 entries and each
+ * entry covers 1GB of addr space */
+ if (KERN_SUCCESS != kmem_alloc_kobject(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD))
+ panic("pmap_create kmem_alloc_kobject");
+ p->pm_hold = (vm_offset_t)zalloc(pdpt_zone);
+ if ((vm_offset_t)NULL == p->pm_hold) {
+ panic("pdpt zalloc");
+ }
+ pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31);
+ p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt);
+ if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG))))
+ panic("pmap_create vm_object_allocate");
- memset((char *)p->dirbase, 0, NBPTD);
+ memset((char *)p->dirbase, 0, NBPTD);
- va = (vm_offset_t)p->dirbase;
- p->pdirbase = kvtophys(va);
+ va = (vm_offset_t)p->dirbase;
+ p->pdirbase = kvtophys(va);
- template = cpu_64bit ? INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF : INTEL_PTE_VALID;
- for (i = 0; i< NPGPTD; i++, pdpt++) {
- pmap_paddr_t pa;
- pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i));
- pmap_store_pte(pdpt, pa | template);
- }
+ PMAP_ZINFO_SALLOC(NBPTD);
+
+ template = INTEL_PTE_VALID;
+ for (i = 0; i< NPGPTD; i++, pdpt++ ) {
+ pmap_paddr_t pa;
+ pa = (pmap_paddr_t) kvtophys((vm_offset_t)(va + i386_ptob(i)));
+ pmap_store_pte(pdpt, pa | template);
+ }
- /* map the high shared pde */
- pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+ /* map the high shared pde */
+ s = splhigh();
+ pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde);
+ splx(s);
} else {
+ /* 64 bit setup */
- /* 64 bit setup */
+ /* alloc the pml4 page in kernel vm */
+ if (KERN_SUCCESS != kmem_alloc_kobject(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
+ panic("pmap_create kmem_alloc_kobject pml4");
- /* alloc the pml4 page in kernel vm */
- if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE))
- panic("pmap_create kmem_alloc_wired pml4");
+ memset((char *)p->pm_hold, 0, PAGE_SIZE);
+ p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
- memset((char *)p->pm_hold, 0, PAGE_SIZE);
- p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold);
+ OSAddAtomic(1, &inuse_ptepages_count);
+ OSAddAtomic64(1, &alloc_ptepages_count);
+ PMAP_ZINFO_SALLOC(PAGE_SIZE);
- inuse_ptepages_count++;
- p->stats.resident_count++;
- p->stats.wired_count++;
+ /* allocate the vm_objs to hold the pdpt, pde and pte pages */
- /* allocate the vm_objs to hold the pdpt, pde and pte pages */
+ if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
+ panic("pmap_create pdpt obj");
- if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS))))
- panic("pmap_create pdpt obj");
+ if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
+ panic("pmap_create pdpt obj");
- if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS))))
- panic("pmap_create pdpt obj");
+ if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
+ panic("pmap_create pte obj");
- if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS))))
- panic("pmap_create pte obj");
+ /* uber space points to uber mapped kernel */
+ s = splhigh();
+ pml4p = pmap64_pml4(p, 0ULL);
+ pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
- /* uber space points to uber mapped kernel */
- s = splhigh();
- pml4p = pmap64_pml4(p, 0ULL);
- pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX),*kernel_pmap->pm_pml4);
- if (!is_64bit) {
- while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
- splx(s);
- pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
- s = splhigh();
- }
- pmap_store_pte(pdp, high_shared_pde);
- }
- splx(s);
+ if (!is_64bit) {
+ while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) {
+ splx(s);
+ pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */
+ s = splhigh();
+ }
+ pmap_store_pte(pdp, high_shared_pde);
+ }
+ splx(s);
}
+ PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
+ (int) p, is_64bit, 0, 0, 0);
+
return(p);
}
+/*
+ * The following routines implement the shared address optmization for 64-bit
+ * users with a 4GB page zero.
+ *
+ * pmap_set_4GB_pagezero()
+ * is called in the exec and fork paths to mirror the kernel's
+ * mapping in the bottom 4G of the user's pmap. The task mapping changes
+ * from TASK_MAP_64BIT to TASK_MAP_64BIT_SHARED. This routine returns
+ * without doing anything if the -no_shared_cr3 boot-arg is set.
+ *
+ * pmap_clear_4GB_pagezero()
+ * is called in the exec/exit paths to undo this mirror. The task mapping
+ * reverts to TASK_MAP_64BIT. In addition, we switch to the kernel's
+ * CR3 by calling pmap_load_kernel_cr3().
+ *
+ * pmap_load_kernel_cr3()
+ * loads cr3 with the kernel's page table. In addition to being called
+ * by pmap_clear_4GB_pagezero(), it is used both prior to teardown and
+ * when we go idle in the context of a shared map.
+ *
+ * Further notes on per-cpu data used:
+ *
+ * cpu_kernel_cr3 is the cr3 for the kernel's pmap.
+ * This is loaded in a trampoline on entering the kernel
+ * from a 32-bit user (or non-shared-cr3 64-bit user).
+ * cpu_task_cr3 is the cr3 for the current thread.
+ * This is loaded in a trampoline as we exit the kernel.
+ * cpu_active_cr3 reflects the cr3 currently loaded.
+ * However, the low order bit is set when the
+ * processor is idle or interrupts are disabled
+ * while the system pmap lock is held. It is used by
+ * tlb shoot-down.
+ * cpu_task_map indicates whether the task cr3 belongs to
+ * a 32-bit, a 64-bit or a 64-bit shared map.
+ * The latter allows the avoidance of the cr3 load
+ * on kernel entry and exit.
+ * cpu_tlb_invalid set TRUE when a tlb flush is requested.
+ * If the cr3 is "inactive" (the cpu is idle or the
+ * system-wide pmap lock is held) this not serviced by
+ * an IPI but at time when the cr3 becomes "active".
+ */
+
void
pmap_set_4GB_pagezero(pmap_t p)
{
- int spl;
pdpt_entry_t *user_pdptp;
pdpt_entry_t *kern_pdptp;
- assert(p->pm_64bit);
+ assert(p->pm_task_map != TASK_MAP_32BIT);
/* Kernel-shared cr3 may be disabled by boot arg. */
if (no_shared_cr3)
/*
* Set the bottom 4 3rd-level pte's to be the kernel's.
*/
- spl = splhigh();
+ PMAP_LOCK(p);
while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) {
- splx(spl);
+ PMAP_UNLOCK(p);
pmap_expand_pml4(p, 0x0);
- spl = splhigh();
+ PMAP_LOCK(p);
}
kern_pdptp = kernel_pmap->pm_pdpt;
pmap_store_pte(user_pdptp+0, *(kern_pdptp+0));
pmap_store_pte(user_pdptp+1, *(kern_pdptp+1));
pmap_store_pte(user_pdptp+2, *(kern_pdptp+2));
pmap_store_pte(user_pdptp+3, *(kern_pdptp+3));
-
- p->pm_kernel_cr3 = TRUE;
-
- splx(spl);
-
-}
-
-void
-pmap_load_kernel_cr3(void)
-{
- uint32_t kernel_cr3;
-
- assert(!ml_get_interrupts_enabled());
-
- /*
- * Reload cr3 with the true kernel cr3.
- * Note: kernel's pml4 resides below 4GB physical.
- */
- kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
- set_cr3(kernel_cr3);
- current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
- current_cpu_datap()->cpu_task_map = TASK_MAP_32BIT;
- current_cpu_datap()->cpu_tlb_invalid = FALSE;
- __asm__ volatile("mfence");
+ p->pm_task_map = TASK_MAP_64BIT_SHARED;
+ PMAP_UNLOCK(p);
}
void
pmap_clear_4GB_pagezero(pmap_t p)
{
- int spl;
pdpt_entry_t *user_pdptp;
+ boolean_t istate;
- if (!p->pm_kernel_cr3)
+ if (p->pm_task_map != TASK_MAP_64BIT_SHARED)
return;
- spl = splhigh();
+ PMAP_LOCK(p);
+
+ p->pm_task_map = TASK_MAP_64BIT;
+
+ istate = ml_set_interrupts_enabled(FALSE);
+ if (current_cpu_datap()->cpu_task_map == TASK_MAP_64BIT_SHARED)
+ current_cpu_datap()->cpu_task_map = TASK_MAP_64BIT;
+ pmap_load_kernel_cr3();
+
user_pdptp = pmap64_pdpt(p, 0x0);
pmap_store_pte(user_pdptp+0, 0);
pmap_store_pte(user_pdptp+1, 0);
pmap_store_pte(user_pdptp+2, 0);
pmap_store_pte(user_pdptp+3, 0);
- p->pm_kernel_cr3 = FALSE;
+ ml_set_interrupts_enabled(istate);
- pmap_load_kernel_cr3();
+ PMAP_UNLOCK(p);
+}
- splx(spl);
+void
+pmap_load_kernel_cr3(void)
+{
+ uint64_t kernel_cr3;
+
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
+ /*
+ * Reload cr3 with the true kernel cr3.
+ */
+ kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3;
+ set64_cr3(kernel_cr3);
+ current_cpu_datap()->cpu_active_cr3 = kernel_cr3;
+ current_cpu_datap()->cpu_tlb_invalid = FALSE;
+ __asm__ volatile("mfence");
}
/*
register pmap_t p)
{
register int c;
- spl_t s;
-#if 0
- register pt_entry_t *pdep;
- register vm_page_t m;
-#endif
if (p == PMAP_NULL)
return;
- SPLVM(s);
- simple_lock(&p->lock);
+
+ PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
+ (int) p, 0, 0, 0, 0);
+
+ PMAP_LOCK(p);
+
c = --p->ref_count;
+
if (c == 0) {
/*
* If some cpu is not using the physical pmap pointer that it
* physically on the right pmap:
*/
PMAP_UPDATE_TLBS(p,
- VM_MIN_ADDRESS,
- VM_MAX_KERNEL_ADDRESS);
-
+ 0x0ULL,
+ 0xFFFFFFFFFFFFF000ULL);
}
- simple_unlock(&p->lock);
- SPLX(s);
+
+ PMAP_UNLOCK(p);
if (c != 0) {
- return; /* still in use */
+ PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+ (int) p, 1, 0, 0, 0);
+ return; /* still in use */
}
/*
* Free the memory maps, then the
* pmap structure.
*/
-
if (!cpu_64bit) {
-#if 0
- pdep = (pt_entry_t *)p->dirbase;
-
- while (pdep < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]) {
- int ind;
+ OSAddAtomic(-p->pm_obj->resident_page_count, &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(p->pm_obj->resident_page_count * PAGE_SIZE);
- if (*pdep & INTEL_PTE_VALID) {
- ind = pdep - (pt_entry_t *)&p->dirbase[0];
+ kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
+ PMAP_ZINFO_SFREE(NBPTD);
- vm_object_lock(p->pm_obj);
- m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)ind);
- if (m == VM_PAGE_NULL) {
- panic("pmap_destroy: pte page not in object");
- }
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
-
- vm_object_unlock(p->pm_obj);
- vm_page_unlock_queues();
+ zfree(pdpt_zone, (void *)p->pm_hold);
- /*
- * Clear pdes, this might be headed for the cache.
- */
- pmap_store_pte(pdep, 0);
- pdep++;
- }
- else {
- pmap_store_pte(pdep, 0);
- pdep++;
- }
-
- }
-#else
- inuse_ptepages_count -= p->pm_obj->resident_page_count;
-#endif
- vm_object_deallocate(p->pm_obj);
- kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD);
- zfree(pdpt_zone, (void *)p->pm_hold);
+ vm_object_deallocate(p->pm_obj);
} else {
+ /* 64 bit */
+ int inuse_ptepages = 0;
- /* 64 bit */
+ /* free 64 bit mode structs */
+ kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
+ PMAP_ZINFO_SFREE(PAGE_SIZE);
- pmap_unmap_sharedpage(p);
+ inuse_ptepages += p->pm_obj_pml4->resident_page_count;
+ vm_object_deallocate(p->pm_obj_pml4);
- /* free 64 bit mode structs */
- inuse_ptepages_count--;
- kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE);
+ inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
+ vm_object_deallocate(p->pm_obj_pdpt);
- inuse_ptepages_count -= p->pm_obj_pml4->resident_page_count;
- vm_object_deallocate(p->pm_obj_pml4);
-
- inuse_ptepages_count -= p->pm_obj_pdpt->resident_page_count;
- vm_object_deallocate(p->pm_obj_pdpt);
-
- inuse_ptepages_count -= p->pm_obj->resident_page_count;
- vm_object_deallocate(p->pm_obj);
+ inuse_ptepages += p->pm_obj->resident_page_count;
+ vm_object_deallocate(p->pm_obj);
+ OSAddAtomic(-(inuse_ptepages+1), &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(inuse_ptepages * PAGE_SIZE);
}
zfree(pmap_zone, p);
+
+ PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
+ 0, 0, 0, 0, 0);
+
}
/*
pmap_reference(
register pmap_t p)
{
- spl_t s;
if (p != PMAP_NULL) {
- SPLVM(s);
- simple_lock(&p->lock);
+ PMAP_LOCK(p);
p->ref_count++;
- simple_unlock(&p->lock);
- SPLX(s);
+ PMAP_UNLOCK(p);;
}
}
-/*
- * Remove a range of hardware page-table entries.
- * The entries given are the first (inclusive)
- * and last (exclusive) entries for the VM pages.
- * The virtual address is the va for the first pte.
- *
- * The pmap must be locked.
- * If the pmap is not the kernel pmap, the range must lie
- * entirely within one pte-page. This is NOT checked.
- * Assumes that the pte-page exists.
- */
-
-static void
-pmap_remove_range(
- pmap_t pmap,
- vm_map_offset_t vaddr,
- pt_entry_t *spte,
- pt_entry_t *epte)
-{
- register pt_entry_t *cpte;
- int num_removed, num_unwired;
- int pai;
- pmap_paddr_t pa;
-
- num_removed = 0;
- num_unwired = 0;
-
- for (cpte = spte; cpte < epte;
- cpte++, vaddr += PAGE_SIZE) {
-
- pa = pte_to_pa(*cpte);
- if (pa == 0)
- continue;
-
- if (iswired(*cpte))
- num_unwired++;
-
- if (!valid_page(i386_btop(pa))) {
-
- /*
- * Outside range of managed physical memory.
- * Just remove the mappings.
- */
- register pt_entry_t *lpte = cpte;
-
- pmap_store_pte(lpte, 0);
- continue;
- }
- num_removed++;
-
- pai = pa_index(pa);
- LOCK_PVH(pai);
-
- /*
- * Get the modify and reference bits.
- */
- {
- register pt_entry_t *lpte;
-
- lpte = cpte;
- pmap_phys_attributes[pai] |=
- *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
- pmap_store_pte(lpte, 0);
-
- }
-
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- {
- register pv_entry_t pv_h, prev, cur;
-
- pv_h = pai_to_pvh(pai);
- if (pv_h->pmap == PMAP_NULL) {
- panic("pmap_remove: null pv_list!");
- }
- if (pv_h->va == vaddr && pv_h->pmap == pmap) {
- /*
- * Header is the pv_entry. Copy the next one
- * to header and free the next one (we cannot
- * free the header)
- */
- cur = pv_h->next;
- if (cur != PV_ENTRY_NULL) {
- *pv_h = *cur;
- PV_FREE(cur);
- }
- else {
- pv_h->pmap = PMAP_NULL;
- }
- }
- else {
- cur = pv_h;
- do {
- prev = cur;
- if ((cur = prev->next) == PV_ENTRY_NULL) {
- panic("pmap-remove: mapping not in pv_list!");
- }
- } while (cur->va != vaddr || cur->pmap != pmap);
- prev->next = cur->next;
- PV_FREE(cur);
- }
- UNLOCK_PVH(pai);
- }
- }
-
- /*
- * Update the counts
- */
- assert(pmap->stats.resident_count >= num_removed);
- pmap->stats.resident_count -= num_removed;
- assert(pmap->stats.wired_count >= num_unwired);
- pmap->stats.wired_count -= num_unwired;
-}
-
/*
* Remove phys addr if mapped in specified map
*
}
-/*
- * Remove the given range of addresses
- * from the specified map.
- *
- * It is assumed that the start and end are properly
- * rounded to the hardware page size.
- */
-
-
-void
-pmap_remove(
- pmap_t map,
- addr64_t s64,
- addr64_t e64)
-{
- spl_t spl;
- register pt_entry_t *pde;
- register pt_entry_t *spte, *epte;
- addr64_t l64;
- addr64_t orig_s64;
-
- if (map == PMAP_NULL || s64 == e64)
- return;
-
- PMAP_READ_LOCK(map, spl);
-
- orig_s64 = s64;
-
- while (s64 < e64) {
- l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size-1);
- if (l64 > e64)
- l64 = e64;
- pde = pmap_pde(map, s64);
- if (pde && (*pde & INTEL_PTE_VALID)) {
- spte = (pt_entry_t *)pmap_pte(map, (s64 & ~(pde_mapped_size-1)));
- spte = &spte[ptenum(s64)];
- epte = &spte[intel_btop(l64-s64)];
- pmap_remove_range(map, s64, spte, epte);
- }
- s64 = l64;
- pde++;
- }
- PMAP_UPDATE_TLBS(map, orig_s64, e64);
-
- PMAP_READ_UNLOCK(map, spl);
-}
-
-/*
- * Routine: pmap_page_protect
- *
- * Function:
- * Lower the permission for all mappings to a given
- * page.
- */
-void
-pmap_page_protect(
- ppnum_t pn,
- vm_prot_t prot)
-{
- pv_entry_t pv_h, prev;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
- boolean_t remove;
- pmap_paddr_t phys;
-
- assert(pn != vm_page_fictitious_addr);
-
- if (!valid_page(pn)) {
- /*
- * Not a managed page.
- */
- return;
- }
-
- /*
- * Determine the new protection.
- */
- switch (prot) {
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- remove = FALSE;
- break;
- case VM_PROT_ALL:
- return; /* nothing to do */
- default:
- remove = TRUE;
- break;
- }
- phys = (pmap_paddr_t)i386_ptob(pn);
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
-
-
- /*
- * Lock the pmap system first, since we will be changing
- * several pmaps.
- */
- PMAP_WRITE_LOCK(spl);
-
- /*
- * Walk down PV list, changing or removing all mappings.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
- */
- if (pv_h->pmap != PMAP_NULL) {
-
- prev = pv_e = pv_h;
-
- do {
- register vm_map_offset_t vaddr;
-
- pmap = pv_e->pmap;
- /*
- * Lock the pmap to block pmap_extract and similar routines.
- */
- simple_lock(&pmap->lock);
-
- vaddr = pv_e->va;
- pte = pmap_pte(pmap, vaddr);
- if(0 == pte) {
- kprintf("pmap_page_protect pmap 0x%x pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
- panic("pmap_page_protect");
- }
- /*
- * Consistency checks.
- */
- /* assert(*pte & INTEL_PTE_VALID); XXX */
- /* assert(pte_to_phys(*pte) == phys); */
-
-
- /*
- * Remove the mapping if new protection is NONE
- * or if write-protecting a kernel mapping.
- */
- if (remove || pmap == kernel_pmap) {
- /*
- * Remove the mapping, collecting any modify bits.
- */
- pmap_store_pte(pte, *pte & ~INTEL_PTE_VALID);
-
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-
- pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
- pmap_store_pte(pte, 0);
-
-
- //XXX breaks DEBUG build assert(pmap->stats.resident_count >= 1);
- pmap->stats.resident_count--;
-
- /*
- * Remove the pv_entry.
- */
- if (pv_e == pv_h) {
- /*
- * Fix up head later.
- */
- pv_h->pmap = PMAP_NULL;
- }
- else {
- /*
- * Delete this entry.
- */
- prev->next = pv_e->next;
- PV_FREE(pv_e);
- }
- } else {
- /*
- * Write-protect.
- */
- pmap_store_pte(pte, *pte & ~INTEL_PTE_WRITE);
-
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
- /*
- * Advance prev.
- */
- prev = pv_e;
- }
-
- simple_unlock(&pmap->lock);
-
- } while ((pv_e = prev->next) != PV_ENTRY_NULL);
-
- /*
- * If pv_head mapping was removed, fix it up.
- */
- if (pv_h->pmap == PMAP_NULL) {
- pv_e = pv_h->next;
-
- if (pv_e != PV_ENTRY_NULL) {
- *pv_h = *pv_e;
- PV_FREE(pv_e);
- }
- }
- }
- PMAP_WRITE_UNLOCK(spl);
-}
-
-/*
- * Routine:
- * pmap_disconnect
- *
- * Function:
- * Disconnect all mappings for this page and return reference and change status
- * in generic format.
- *
- */
-unsigned int pmap_disconnect(
- ppnum_t pa)
-{
- pmap_page_protect(pa, 0); /* disconnect the page */
- return (pmap_get_refmod(pa)); /* return ref/chg status */
-}
-
/*
* Set the physical protection on the
* specified range of this map as requested.
register pt_entry_t *spte, *epte;
vm_map_offset_t lva;
vm_map_offset_t orig_sva;
- spl_t spl;
boolean_t set_NX;
+ int num_found = 0;
+
+ pmap_intr_assert();
if (map == PMAP_NULL)
return;
return;
}
+ PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
+ (int) map,
+ (int) (sva>>32), (int) sva,
+ (int) (eva>>32), (int) eva);
+
if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled )
set_NX = FALSE;
else
set_NX = TRUE;
- SPLVM(spl);
- simple_lock(&map->lock);
+ PMAP_LOCK(map);
orig_sva = sva;
while (sva < eva) {
epte = &spte[intel_btop(lva-sva)];
while (spte < epte) {
+
if (*spte & INTEL_PTE_VALID) {
if (prot & VM_PROT_WRITE)
- pmap_store_pte(spte, *spte | INTEL_PTE_WRITE);
- else
- pmap_store_pte(spte, *spte & ~INTEL_PTE_WRITE);
-
- if (set_NX == TRUE)
- pmap_store_pte(spte, *spte | INTEL_PTE_NX);
+ pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
else
- pmap_store_pte(spte, *spte & ~INTEL_PTE_NX);
-
- }
- spte++;
- }
- }
- sva = lva;
- pde++;
- }
- PMAP_UPDATE_TLBS(map, orig_sva, eva);
-
- simple_unlock(&map->lock);
- SPLX(spl);
-}
-
-/* Map a (possibly) autogenned block */
-void
-pmap_map_block(
- pmap_t pmap,
- addr64_t va,
- ppnum_t pa,
- uint32_t size,
- vm_prot_t prot,
- int attr,
- __unused unsigned int flags)
-{
- uint32_t page;
-
- for (page = 0; page < size; page++) {
- pmap_enter(pmap, va, pa, prot, attr, TRUE);
- va += PAGE_SIZE;
- pa++;
- }
-}
-
-
-/*
- * Insert the given physical page (p) at
- * the specified virtual address (v) in the
- * target physical map with the protection requested.
- *
- * If specified, the page will be wired down, meaning
- * that the related pte cannot be reclaimed.
- *
- * NB: This is the only routine which MAY NOT lazy-evaluate
- * or lose information. That is, this routine must actually
- * insert this page into the given map NOW.
- */
-void
-pmap_enter(
- register pmap_t pmap,
- vm_map_offset_t vaddr,
- ppnum_t pn,
- vm_prot_t prot,
- unsigned int flags,
- boolean_t wired)
-{
- register pt_entry_t *pte;
- register pv_entry_t pv_h;
- register int pai;
- pv_entry_t pv_e;
- pt_entry_t template;
- spl_t spl;
- pmap_paddr_t old_pa;
- pmap_paddr_t pa = (pmap_paddr_t)i386_ptob(pn);
- boolean_t need_tlbflush = FALSE;
- boolean_t set_NX;
-
- XPR(0x80000000, "%x/%x: pmap_enter %x/%qx/%x\n",
- current_thread(),
- current_thread(),
- pmap, vaddr, pn);
-
- assert(pn != vm_page_fictitious_addr);
- if (pmap_debug)
- printf("pmap(%qx, %x)\n", vaddr, pn);
- if (pmap == PMAP_NULL)
- return;
-
- if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled )
- set_NX = FALSE;
- else
- set_NX = TRUE;
-
- /*
- * Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
- * If we determine we need a pvlist entry, we will unlock
- * and allocate one. Then we will retry, throughing away
- * the allocated entry later (if we no longer need it).
- */
- pv_e = PV_ENTRY_NULL;
-
- PMAP_READ_LOCK(pmap, spl);
-
- /*
- * Expand pmap to include this pte. Assume that
- * pmap is always expanded to include enough hardware
- * pages to map one VM page.
- */
-
- while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
- /*
- * Must unlock to expand the pmap.
- */
- PMAP_READ_UNLOCK(pmap, spl);
-
- pmap_expand(pmap, vaddr); /* going to grow pde level page(s) */
-
- PMAP_READ_LOCK(pmap, spl);
- }
- /*
- * Special case if the physical page is already mapped
- * at this address.
- */
- old_pa = pte_to_pa(*pte);
- if (old_pa == pa) {
- /*
- * May be changing its wired attribute or protection
- */
-
- template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
- if(VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
- if(!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
-
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
-
- if (set_NX == TRUE)
- template |= INTEL_PTE_NX;
-
- if (wired) {
- template |= INTEL_PTE_WIRED;
- if (!iswired(*pte))
- pmap->stats.wired_count++;
- }
- else {
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
- }
-
- if (*pte & INTEL_PTE_MOD)
- template |= INTEL_PTE_MOD;
-
- pmap_store_pte(pte, template);
- pte++;
-
- need_tlbflush = TRUE;
- goto Done;
- }
-
- /*
- * Outline of code from here:
- * 1) If va was mapped, update TLBs, remove the mapping
- * and remove old pvlist entry.
- * 2) Add pvlist entry for new mapping
- * 3) Enter new mapping.
- *
- * SHARING FAULTS IS HORRIBLY BROKEN
- * SHARING_FAULTS complicates this slightly in that it cannot
- * replace the mapping, but must remove it (because adding the
- * pvlist entry for the new mapping may remove others), and
- * hence always enters the new mapping at step 3)
- *
- * If the old physical page is not managed step 1) is skipped
- * (except for updating the TLBs), and the mapping is
- * overwritten at step 3). If the new physical page is not
- * managed, step 2) is skipped.
- */
-
- if (old_pa != (pmap_paddr_t) 0) {
-
- /*
- * Don't do anything to pages outside valid memory here.
- * Instead convince the code that enters a new mapping
- * to overwrite the old one.
- */
-
- if (valid_page(i386_btop(old_pa))) {
-
- pai = pa_index(old_pa);
- LOCK_PVH(pai);
-
- assert(pmap->stats.resident_count >= 1);
- pmap->stats.resident_count--;
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
-
- pmap_phys_attributes[pai] |=
- *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
- pmap_store_pte(pte, 0);
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- {
- register pv_entry_t prev, cur;
-
- pv_h = pai_to_pvh(pai);
- if (pv_h->pmap == PMAP_NULL) {
- panic("pmap_enter: null pv_list!");
- }
-
- if (pv_h->va == vaddr && pv_h->pmap == pmap) {
- /*
- * Header is the pv_entry. Copy the next one
- * to header and free the next one (we cannot
- * free the header)
- */
- cur = pv_h->next;
- if (cur != PV_ENTRY_NULL) {
- *pv_h = *cur;
- pv_e = cur;
- }
- else {
- pv_h->pmap = PMAP_NULL;
- }
- }
- else {
- cur = pv_h;
- do {
- prev = cur;
- if ((cur = prev->next) == PV_ENTRY_NULL) {
- panic("pmap_enter: mapping not in pv_list!");
- }
- } while (cur->va != vaddr || cur->pmap != pmap);
- prev->next = cur->next;
- pv_e = cur;
- }
- }
- UNLOCK_PVH(pai);
- }
- else {
-
- /*
- * old_pa is not managed. Pretend it's zero so code
- * at Step 3) will enter new mapping (overwriting old
- * one). Do removal part of accounting.
- */
- old_pa = (pmap_paddr_t) 0;
-
- if (iswired(*pte)) {
- assert(pmap->stats.wired_count >= 1);
- pmap->stats.wired_count--;
- }
- }
- need_tlbflush = TRUE;
-
- }
-
- if (valid_page(i386_btop(pa))) {
-
- /*
- * Step 2) Enter the mapping in the PV list for this
- * physical page.
- */
-
- pai = pa_index(pa);
-
-
-#if SHARING_FAULTS /* this is horribly broken , do not enable */
-RetryPvList:
- /*
- * We can return here from the sharing fault code below
- * in case we removed the only entry on the pv list and thus
- * must enter the new one in the list header.
- */
-#endif /* SHARING_FAULTS */
- LOCK_PVH(pai);
- pv_h = pai_to_pvh(pai);
-
- if (pv_h->pmap == PMAP_NULL) {
- /*
- * No mappings yet
- */
- pv_h->va = vaddr;
- pv_h->pmap = pmap;
- pv_h->next = PV_ENTRY_NULL;
- }
- else {
-#if DEBUG
- {
- /*
- * check that this mapping is not already there
- * or there is no alias for this mapping in the same map
- */
- pv_entry_t e = pv_h;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap && e->va == vaddr)
- panic("pmap_enter: already in pv_list");
- e = e->next;
- }
- }
-#endif /* DEBUG */
-#if SHARING_FAULTS /* broken, do not enable */
- {
- /*
- * do sharing faults.
- * if we find an entry on this pv list in the same address
- * space, remove it. we know there will not be more
- * than one.
- */
- pv_entry_t e = pv_h;
- pt_entry_t *opte;
-
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap) {
- /*
- * Remove it, drop pv list lock first.
- */
- UNLOCK_PVH(pai);
-
- opte = pmap_pte(pmap, e->va);
- assert(opte != PT_ENTRY_NULL);
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
- pmap_remove_range(pmap, e->va, opte,
- opte + 1);
-
- PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE);
-
- /*
- * We could have remove the head entry,
- * so there could be no more entries
- * and so we have to use the pv head entry.
- * so, go back to the top and try the entry
- * again.
- */
- goto RetryPvList;
- }
- e = e->next;
- }
-
- /*
- * check that this mapping is not already there
- */
- e = pv_h;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap)
- panic("pmap_enter: alias in pv_list");
- e = e->next;
- }
- }
-#endif /* SHARING_FAULTS */
-#if DEBUG_ALIAS
- {
- /*
- * check for aliases within the same address space.
- */
- pv_entry_t e = pv_h;
- vm_offset_t rpc = get_rpc();
-
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap) {
- /*
- * log this entry in the alias ring buffer
- * if it's not there already.
- */
- struct pmap_alias *pma;
- int ii, logit;
-
- logit = TRUE;
- for (ii = 0; ii < pmap_alias_index; ii++) {
- if (pmap_aliasbuf[ii].rpc == rpc) {
- /* found it in the log already */
- logit = FALSE;
- break;
- }
- }
- if (logit) {
- pma = &pmap_aliasbuf[pmap_alias_index];
- pma->pmap = pmap;
- pma->va = vaddr;
- pma->rpc = rpc;
- pma->cookie = PMAP_ALIAS_COOKIE;
- if (++pmap_alias_index >= PMAP_ALIAS_MAX)
- panic("pmap_enter: exhausted alias log");
- }
- }
- e = e->next;
- }
- }
-#endif /* DEBUG_ALIAS */
- /*
- * Add new pv_entry after header.
- */
- if (pv_e == PV_ENTRY_NULL) {
- PV_ALLOC(pv_e);
- if (pv_e == PV_ENTRY_NULL) {
- panic("pmap no pv_e's");
- }
- }
- pv_e->va = vaddr;
- pv_e->pmap = pmap;
- pv_e->next = pv_h->next;
- pv_h->next = pv_e;
- /*
- * Remember that we used the pvlist entry.
- */
- pv_e = PV_ENTRY_NULL;
- }
- UNLOCK_PVH(pai);
-
- /*
- * only count the mapping
- * for 'managed memory'
- */
- pmap->stats.resident_count++;
- }
-
- /*
- * Step 3) Enter the mapping.
- */
-
-
- /*
- * Build a template to speed up entering -
- * only the pfn changes.
- */
- template = pa_to_pte(pa) | INTEL_PTE_VALID;
-
- if(flags & VM_MEM_NOT_CACHEABLE) {
- if(!(flags & VM_MEM_GUARDED))
- template |= INTEL_PTE_PTA;
- template |= INTEL_PTE_NCACHE;
- }
-
- if (pmap != kernel_pmap)
- template |= INTEL_PTE_USER;
- if (prot & VM_PROT_WRITE)
- template |= INTEL_PTE_WRITE;
-
- if (set_NX == TRUE)
- template |= INTEL_PTE_NX;
-
- if (wired) {
- template |= INTEL_PTE_WIRED;
- pmap->stats.wired_count++;
- }
- pmap_store_pte(pte, template);
-
-Done:
- if (need_tlbflush == TRUE)
- PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
-
- if (pv_e != PV_ENTRY_NULL) {
- PV_FREE(pv_e);
- }
-
- PMAP_READ_UNLOCK(pmap, spl);
-}
-
-/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
- */
-void
-pmap_change_wiring(
- register pmap_t map,
- vm_map_offset_t vaddr,
- boolean_t wired)
-{
- register pt_entry_t *pte;
- spl_t spl;
+ if (set_NX == TRUE)
+ pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
+ else
+ pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
-#if 1
- /*
- * We must grab the pmap system lock because we may
- * change a pte_page queue.
- */
- PMAP_READ_LOCK(map, spl);
-
- if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
- panic("pmap_change_wiring: pte missing");
-
- if (wired && !iswired(*pte)) {
- /*
- * wiring down mapping
- */
- map->stats.wired_count++;
- pmap_store_pte(pte, *pte | INTEL_PTE_WIRED);
- pte++;
+ num_found++;
+ }
+ spte++;
+ }
+ }
+ sva = lva;
}
- else if (!wired && iswired(*pte)) {
- /*
- * unwiring mapping
- */
- assert(map->stats.wired_count >= 1);
- map->stats.wired_count--;
- pmap_store_pte(pte, *pte & ~INTEL_PTE_WIRED);
- pte++;
+ if (num_found)
+ {
+ PMAP_UPDATE_TLBS(map, orig_sva, eva);
}
- PMAP_READ_UNLOCK(map, spl);
+ PMAP_UNLOCK(map);
-#else
- return;
-#endif
+ PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
+ 0, 0, 0, 0, 0);
}
-ppnum_t
-pmap_find_phys(pmap_t pmap, addr64_t va)
+/* Map a (possibly) autogenned block */
+void
+pmap_map_block(
+ pmap_t pmap,
+ addr64_t va,
+ ppnum_t pa,
+ uint32_t size,
+ vm_prot_t prot,
+ int attr,
+ __unused unsigned int flags)
{
- pt_entry_t *ptp;
- ppnum_t ppn;
-
- mp_disable_preemption();
+ uint32_t page;
- ptp = pmap_pte(pmap, va);
- if (PT_ENTRY_NULL == ptp) {
- ppn = 0;
- } else {
- ppn = (ppnum_t) i386_btop(pte_to_pa(*ptp));
- }
- mp_enable_preemption();
-
- return ppn;
+ for (page = 0; page < size; page++) {
+ pmap_enter(pmap, va, pa, prot, attr, TRUE);
+ va += PAGE_SIZE;
+ pa++;
+ }
}
/*
paddr = (vm_offset_t)0;
ppn = pmap_find_phys(pmap, vaddr);
+
if (ppn) {
- paddr = ((vm_offset_t)i386_ptob(ppn)) | (vaddr & INTEL_OFFMASK);
+ paddr = ((vm_offset_t)i386_ptob(ppn)) | ((vm_offset_t)vaddr & INTEL_OFFMASK);
}
return (paddr);
}
if (kernel_pmap == map) panic("expand kernel pml4");
spl = splhigh();
- pml4p = pmap64_pml4(map, vaddr);
- splx(spl);
- if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
+ pml4p = pmap64_pml4(map, vaddr);
+ splx(spl);
+ if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p");
/*
* Allocate a VM page for the pml4 page
pa = i386_ptob(pn);
i = pml4idx(map, vaddr);
- vm_object_lock(map->pm_obj_pml4);
-#if 0 /* DEBUG */
- if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
- kprintf("pmap_expand_pml4: obj_pml4 not empty, pmap 0x%x pm_obj_pml4 0x%x vaddr 0x%llx i 0x%llx\n",
- map, map->pm_obj_pml4, vaddr, i);
- }
-#endif
- vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
-
- vm_page_lock_queues();
- vm_page_wire(m);
-
- vm_page_unlock_queues();
- vm_object_unlock(map->pm_obj_pml4);
- inuse_ptepages_count++;
- map->stats.resident_count++;
- map->stats.wired_count++;
-
/*
* Zero the page.
*/
pmap_zero_page(pn);
- PMAP_READ_LOCK(map, spl);
+ vm_page_lockspin_queues();
+ vm_page_wire(m);
+ vm_page_unlock_queues();
+
+ OSAddAtomic(1, &inuse_ptepages_count);
+ OSAddAtomic64(1, &alloc_ptepages_count);
+ PMAP_ZINFO_PALLOC(PAGE_SIZE);
+
+ /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+ vm_object_lock(map->pm_obj_pml4);
+
+ PMAP_LOCK(map);
/*
* See if someone else expanded us first
*/
if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
- PMAP_READ_UNLOCK(map, spl);
- vm_object_lock(map->pm_obj_pml4);
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
- map->stats.resident_count--;
- map->stats.wired_count--;
-
- vm_page_unlock_queues();
+ PMAP_UNLOCK(map);
vm_object_unlock(map->pm_obj_pml4);
+
+ VM_PAGE_FREE(m);
+
+ OSAddAtomic(-1, &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(PAGE_SIZE);
return;
}
+ pmap_set_noencrypt(pn);
+
+#if 0 /* DEBUG */
+ if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
+ panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+ map, map->pm_obj_pml4, vaddr, i);
+ }
+#endif
+ vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
+ vm_object_unlock(map->pm_obj_pml4);
/*
* Set the page directory entry for this page table.
- * If we have allocated more than one hardware page,
- * set several page directory entries.
*/
-
pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
pmap_store_pte(pml4p, pa_to_pte(pa)
| INTEL_PTE_USER
| INTEL_PTE_WRITE);
- PMAP_READ_UNLOCK(map, spl);
+ PMAP_UNLOCK(map);
return;
if (kernel_pmap == map) panic("expand kernel pdpt");
spl = splhigh();
- while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
- splx(spl);
- pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
- spl = splhigh();
- }
- splx(spl);
-
+ while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
+ splx(spl);
+ pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */
+ spl = splhigh();
+ }
+ splx(spl);
/*
* Allocate a VM page for the pdpt page
pa = i386_ptob(pn);
i = pdptidx(map, vaddr);
- vm_object_lock(map->pm_obj_pdpt);
-#if 0 /* DEBUG */
- if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
- kprintf("pmap_expand_pdpt: obj_pdpt not empty, pmap 0x%x pm_obj_pdpt 0x%x vaddr 0x%llx i 0x%llx\n",
- map, map->pm_obj_pdpt, vaddr, i);
- }
-#endif
- vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
-
- vm_page_lock_queues();
- vm_page_wire(m);
-
- vm_page_unlock_queues();
- vm_object_unlock(map->pm_obj_pdpt);
- inuse_ptepages_count++;
- map->stats.resident_count++;
- map->stats.wired_count++;
-
/*
* Zero the page.
*/
pmap_zero_page(pn);
- PMAP_READ_LOCK(map, spl);
+ vm_page_lockspin_queues();
+ vm_page_wire(m);
+ vm_page_unlock_queues();
+
+ OSAddAtomic(1, &inuse_ptepages_count);
+ OSAddAtomic64(1, &alloc_ptepages_count);
+ PMAP_ZINFO_PALLOC(PAGE_SIZE);
+
+ /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+ vm_object_lock(map->pm_obj_pdpt);
+
+ PMAP_LOCK(map);
/*
* See if someone else expanded us first
*/
if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
- PMAP_READ_UNLOCK(map, spl);
- vm_object_lock(map->pm_obj_pdpt);
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
- map->stats.resident_count--;
- map->stats.wired_count--;
-
- vm_page_unlock_queues();
+ PMAP_UNLOCK(map);
vm_object_unlock(map->pm_obj_pdpt);
+
+ VM_PAGE_FREE(m);
+
+ OSAddAtomic(-1, &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(PAGE_SIZE);
return;
}
+ pmap_set_noencrypt(pn);
+
+#if 0 /* DEBUG */
+ if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
+ panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
+ map, map->pm_obj_pdpt, vaddr, i);
+ }
+#endif
+ vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
+ vm_object_unlock(map->pm_obj_pdpt);
/*
* Set the page directory entry for this page table.
- * If we have allocated more than one hardware page,
- * set several page directory entries.
*/
-
pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
pmap_store_pte(pdptp, pa_to_pte(pa)
| INTEL_PTE_USER
| INTEL_PTE_WRITE);
- PMAP_READ_UNLOCK(map, spl);
+ PMAP_UNLOCK(map);
return;
*/
if (cpu_64bit && (map != kernel_pmap)) {
- spl = splhigh();
- while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
- splx(spl);
- pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
- spl = splhigh();
- }
- splx(spl);
- } else {
- pdp = pmap_pde(map, vaddr);
+ spl = splhigh();
+ while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
+ splx(spl);
+ pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */
+ spl = splhigh();
+ }
+ splx(spl);
}
-
/*
* Allocate a VM page for the pde entries.
*/
pa = i386_ptob(pn);
i = pdeidx(map, vaddr);
- vm_object_lock(map->pm_obj);
-#if 0 /* DEBUG */
- if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
- kprintf("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
- map, map->pm_obj, vaddr, i);
- }
-#endif
- vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
-
- vm_page_lock_queues();
- vm_page_wire(m);
- inuse_ptepages_count++;
-
- vm_page_unlock_queues();
- vm_object_unlock(map->pm_obj);
-
/*
* Zero the page.
*/
pmap_zero_page(pn);
- PMAP_READ_LOCK(map, spl);
+ vm_page_lockspin_queues();
+ vm_page_wire(m);
+ vm_page_unlock_queues();
+
+ OSAddAtomic(1, &inuse_ptepages_count);
+ OSAddAtomic64(1, &alloc_ptepages_count);
+ PMAP_ZINFO_PALLOC(PAGE_SIZE);
+
+ /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
+ vm_object_lock(map->pm_obj);
+
+ PMAP_LOCK(map);
/*
* See if someone else expanded us first
*/
+
if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
- PMAP_READ_UNLOCK(map, spl);
- vm_object_lock(map->pm_obj);
+ PMAP_UNLOCK(map);
+ vm_object_unlock(map->pm_obj);
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
+ VM_PAGE_FREE(m);
- vm_page_unlock_queues();
- vm_object_unlock(map->pm_obj);
+ OSAddAtomic(-1, &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(PAGE_SIZE);
return;
}
+ pmap_set_noencrypt(pn);
+
+#if 0 /* DEBUG */
+ if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
+ panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
+ map, map->pm_obj, vaddr, i);
+ }
+#endif
+ vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
+ vm_object_unlock(map->pm_obj);
+
+ /*
+ * refetch while locked
+ */
- pdp = pmap_pde(map, vaddr); /* refetch while locked */
+ pdp = pmap_pde(map, vaddr);
/*
* Set the page directory entry for this page table.
- * If we have allocated more than one hardware page,
- * set several page directory entries.
*/
-
pmap_store_pte(pdp, pa_to_pte(pa)
| INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE);
-
- PMAP_READ_UNLOCK(map, spl);
+ PMAP_UNLOCK(map);
return;
}
cache_flush_page_phys(pa);
}
+
+
+#ifdef CURRENTLY_UNUSED_AND_UNTESTED
+
int collect_ref;
int collect_unref;
register pt_entry_t *pdp, *ptp;
pt_entry_t *eptp;
int wired;
- spl_t spl;
if (p == PMAP_NULL)
return;
/*
* Garbage collect map.
*/
- PMAP_READ_LOCK(p, spl);
+ PMAP_LOCK(p);
for (pdp = (pt_entry_t *)p->dirbase;
pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
* Invalidate the page directory pointer.
*/
pmap_store_pte(pdp, 0x0);
-
- PMAP_READ_UNLOCK(p, spl);
-
- /*
- * And free the pte page itself.
- */
- {
- register vm_page_t m;
-
- vm_object_lock(p->pm_obj);
- m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
- if (m == VM_PAGE_NULL)
- panic("pmap_collect: pte page not in object");
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
- vm_page_unlock_queues();
- vm_object_unlock(p->pm_obj);
- }
-
- PMAP_READ_LOCK(p, spl);
- }
- }
- }
- }
- PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
-
- PMAP_READ_UNLOCK(p, spl);
- return;
-
-}
-
-
-void
-pmap_copy_page(src, dst)
- ppnum_t src;
- ppnum_t dst;
-{
- bcopy_phys((addr64_t)i386_ptob(src),
- (addr64_t)i386_ptob(dst),
- PAGE_SIZE);
-}
-
-
-/*
- * Routine: pmap_pageable
- * Function:
- * Make the specified pages (by pmap, offset)
- * pageable (or not) as requested.
- *
- * A page which is not pageable may not take
- * a fault; therefore, its page table entry
- * must remain valid for the duration.
- *
- * This routine is merely advisory; pmap_enter
- * will specify that these pages are to be wired
- * down (or not) as appropriate.
- */
-void
-pmap_pageable(
- __unused pmap_t pmap,
- __unused vm_map_offset_t start_addr,
- __unused vm_map_offset_t end_addr,
- __unused boolean_t pageable)
-{
-#ifdef lint
- pmap++; start_addr++; end_addr++; pageable++;
-#endif /* lint */
-}
-
-/*
- * Clear specified attribute bits.
- */
-void
-phys_attribute_clear(
- ppnum_t pn,
- int bits)
-{
- pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
- pmap_paddr_t phys;
-
- assert(pn != vm_page_fictitious_addr);
- if (!valid_page(pn)) {
- /*
- * Not a managed page.
- */
- return;
- }
-
- /*
- * Lock the pmap system first, since we will be changing
- * several pmaps.
- */
-
- PMAP_WRITE_LOCK(spl);
- phys = i386_ptob(pn);
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
-
- /*
- * Walk down PV list, clearing all modify or reference bits.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
- */
- if (pv_h->pmap != PMAP_NULL) {
- /*
- * There are some mappings.
- */
- for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
-
- pmap = pv_e->pmap;
- /*
- * Lock the pmap to block pmap_extract and similar routines.
- */
- simple_lock(&pmap->lock);
-
- {
- register vm_map_offset_t va;
-
- va = pv_e->va;
- pte = pmap_pte(pmap, va);
-
-#if 0
- /*
- * Consistency checks.
- */
- assert(*pte & INTEL_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
-#endif
-
- /*
- * Clear modify or reference bits.
- */
-
- pmap_store_pte(pte, *pte & ~bits);
- pte++;
- PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
- }
- simple_unlock(&pmap->lock);
-
- }
- }
-
- pmap_phys_attributes[pai] &= ~bits;
-
- PMAP_WRITE_UNLOCK(spl);
-}
-
-/*
- * Check specified attribute bits.
- */
-boolean_t
-phys_attribute_test(
- ppnum_t pn,
- int bits)
-{
- pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
- pmap_paddr_t phys;
-
- assert(pn != vm_page_fictitious_addr);
- if (!valid_page(pn)) {
- /*
- * Not a managed page.
- */
- return (FALSE);
- }
-
- phys = i386_ptob(pn);
- pai = pa_index(phys);
- /*
- * super fast check... if bits already collected
- * no need to take any locks...
- * if not set, we need to recheck after taking
- * the lock in case they got pulled in while
- * we were waiting for the lock
- */
- if (pmap_phys_attributes[pai] & bits)
- return (TRUE);
- pv_h = pai_to_pvh(pai);
-
- /*
- * Lock the pmap system first, since we will be checking
- * several pmaps.
- */
- PMAP_WRITE_LOCK(spl);
-
- if (pmap_phys_attributes[pai] & bits) {
- PMAP_WRITE_UNLOCK(spl);
- return (TRUE);
- }
-
- /*
- * Walk down PV list, checking all mappings.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
- */
- if (pv_h->pmap != PMAP_NULL) {
- /*
- * There are some mappings.
- */
- for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
-
- pmap = pv_e->pmap;
- /*
- * Lock the pmap to block pmap_extract and similar routines.
- */
- simple_lock(&pmap->lock);
-
- {
- register vm_map_offset_t va;
-
- va = pv_e->va;
- pte = pmap_pte(pmap, va);
-
-#if 0
- /*
- * Consistency checks.
- */
- assert(*pte & INTEL_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
-#endif
- }
-
- /*
- * Check modify or reference bits.
- */
- {
- if (*pte++ & bits) {
- simple_unlock(&pmap->lock);
- PMAP_WRITE_UNLOCK(spl);
- return (TRUE);
- }
- }
- simple_unlock(&pmap->lock);
- }
- }
- PMAP_WRITE_UNLOCK(spl);
- return (FALSE);
-}
+
+ PMAP_UNLOCK(p);
-/*
- * Set specified attribute bits.
- */
-void
-phys_attribute_set(
- ppnum_t pn,
- int bits)
-{
- int spl;
- pmap_paddr_t phys;
+ /*
+ * And free the pte page itself.
+ */
+ {
+ register vm_page_t m;
- assert(pn != vm_page_fictitious_addr);
- if (!valid_page(pn)) {
- /*
- * Not a managed page.
- */
- return;
- }
+ vm_object_lock(p->pm_obj);
- /*
- * Lock the pmap system and set the requested bits in
- * the phys attributes array. Don't need to bother with
- * ptes because the test routine looks here first.
- */
- phys = i386_ptob(pn);
- PMAP_WRITE_LOCK(spl);
- pmap_phys_attributes[pa_index(phys)] |= bits;
- PMAP_WRITE_UNLOCK(spl);
-}
+ m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
+ if (m == VM_PAGE_NULL)
+ panic("pmap_collect: pte page not in object");
-/*
- * Set the modify bit on the specified physical page.
- */
+ vm_object_unlock(p->pm_obj);
-void pmap_set_modify(
- ppnum_t pn)
-{
- phys_attribute_set(pn, PHYS_MODIFIED);
-}
+ VM_PAGE_FREE(m);
-/*
- * Clear the modify bits on the specified physical page.
- */
+ OSAddAtomic(-1, &inuse_ptepages_count);
+ PMAP_ZINFO_PFREE(PAGE_SIZE);
+ }
-void
-pmap_clear_modify(
- ppnum_t pn)
-{
- phys_attribute_clear(pn, PHYS_MODIFIED);
-}
+ PMAP_LOCK(p);
+ }
+ }
+ }
+ }
-/*
- * pmap_is_modified:
- *
- * Return whether or not the specified physical page is modified
- * by any physical maps.
- */
+ PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
+ PMAP_UNLOCK(p);
+ return;
-boolean_t
-pmap_is_modified(
- ppnum_t pn)
-{
- return (phys_attribute_test(pn, PHYS_MODIFIED));
}
+#endif
-/*
- * pmap_clear_reference:
- *
- * Clear the reference bit on the specified physical page.
- */
void
-pmap_clear_reference(
- ppnum_t pn)
+pmap_copy_page(ppnum_t src, ppnum_t dst)
{
- phys_attribute_clear(pn, PHYS_REFERENCED);
+ bcopy_phys((addr64_t)i386_ptob(src),
+ (addr64_t)i386_ptob(dst),
+ PAGE_SIZE);
}
-void
-pmap_set_reference(ppnum_t pn)
-{
- phys_attribute_set(pn, PHYS_REFERENCED);
-}
/*
- * pmap_is_referenced:
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
*
- * Return whether or not the specified physical page is referenced
- * by any physical maps.
- */
-
-boolean_t
-pmap_is_referenced(
- ppnum_t pn)
-{
- return (phys_attribute_test(pn, PHYS_REFERENCED));
-}
-
-/*
- * pmap_get_refmod(phys)
- * returns the referenced and modified bits of the specified
- * physical page.
- */
-unsigned int
-pmap_get_refmod(ppnum_t pa)
-{
- return ( ((phys_attribute_test(pa, PHYS_MODIFIED))? VM_MEM_MODIFIED : 0)
- | ((phys_attribute_test(pa, PHYS_REFERENCED))? VM_MEM_REFERENCED : 0));
-}
-
-/*
- * pmap_clear_refmod(phys, mask)
- * clears the referenced and modified bits as specified by the mask
- * of the specified physical page.
- */
-void
-pmap_clear_refmod(ppnum_t pa, unsigned int mask)
-{
- unsigned int x86Mask;
-
- x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
- | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
- phys_attribute_clear(pa, x86Mask);
-}
-
-/*
- * Set the modify bit on the specified range
- * of this map as requested.
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
*
- * This optimization stands only if each time the dirty bit
- * in vm_page_t is tested, it is also tested in the pmap.
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
*/
void
-pmap_modify_pages(
- pmap_t map,
- vm_map_offset_t sva,
- vm_map_offset_t eva)
+pmap_pageable(
+ __unused pmap_t pmap,
+ __unused vm_map_offset_t start_addr,
+ __unused vm_map_offset_t end_addr,
+ __unused boolean_t pageable)
{
- spl_t spl;
- register pt_entry_t *pde;
- register pt_entry_t *spte, *epte;
- vm_map_offset_t lva;
- vm_map_offset_t orig_sva;
-
- if (map == PMAP_NULL)
- return;
-
- PMAP_READ_LOCK(map, spl);
-
- orig_sva = sva;
- while (sva && sva < eva) {
- lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1);
- if (lva > eva)
- lva = eva;
- pde = pmap_pde(map, sva);
- if (pde && (*pde & INTEL_PTE_VALID)) {
- spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1)));
- if (lva) {
- spte = &spte[ptenum(sva)];
- epte = &spte[intel_btop(lva-sva)];
- } else {
- epte = &spte[intel_btop(pde_mapped_size)];
- spte = &spte[ptenum(sva)];
- }
- while (spte < epte) {
- if (*spte & INTEL_PTE_VALID) {
- pmap_store_pte(spte, *spte
- | INTEL_PTE_MOD
- | INTEL_PTE_WRITE);
- }
- spte++;
- }
- }
- sva = lva;
- pde++;
- }
- PMAP_UPDATE_TLBS(map, orig_sva, eva);
-
- PMAP_READ_UNLOCK(map, spl);
+#ifdef lint
+ pmap++; start_addr++; end_addr++; pageable++;
+#endif /* lint */
}
-
void
invalidate_icache(__unused vm_offset_t addr,
__unused unsigned cnt,
return;
}
+#if CONFIG_DTRACE
+/*
+ * Constrain DTrace copyin/copyout actions
+ */
+extern kern_return_t dtrace_copyio_preflight(addr64_t);
+extern kern_return_t dtrace_copyio_postflight(addr64_t);
+
+kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
+{
+ thread_t thread = current_thread();
+
+ if (current_map() == kernel_map)
+ return KERN_FAILURE;
+ else if (thread->machine.specFlags & CopyIOActive)
+ return KERN_FAILURE;
+ else
+ return KERN_SUCCESS;
+}
+
+kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
+{
+ return KERN_SUCCESS;
+}
+#endif /* CONFIG_DTRACE */
+
#if MACH_KDB
/* show phys page mappings and attributes */
extern void db_show_page(pmap_paddr_t pa);
+#if 0
void
db_show_page(pmap_paddr_t pa)
{
pv_h = pai_to_pvh(pai);
attr = pmap_phys_attributes[pai];
- printf("phys page %x ", pa);
+ printf("phys page %llx ", pa);
if (attr & PHYS_MODIFIED)
printf("modified, ");
if (attr & PHYS_REFERENCED)
printf(" not mapped\n");
for (; pv_h; pv_h = pv_h->next)
if (pv_h->pmap)
- printf("%x in pmap %x\n", pv_h->va, pv_h->pmap);
+ printf("%llx in pmap %p\n", pv_h->va, pv_h->pmap);
}
+#endif
#endif /* MACH_KDB */
#if MACH_KDB
+#if 0
void db_kvtophys(vm_offset_t);
void db_show_vaddrs(pt_entry_t *);
continue;
}
pdecnt++;
- ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK);
+ ptep = (pt_entry_t *) ((unsigned long)(*pdep) & ~INTEL_OFFMASK);
db_printf("dir[%4d]: 0x%x\n", y, *pdep);
for (x = 0; x < NPTEPG; x++, ptep++) {
if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) {
db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt);
}
+#endif
#endif /* MACH_KDB */
#include <mach_vm_debug.h>
phys_page_exists(
ppnum_t pn)
{
- pmap_paddr_t phys;
-
assert(pn != vm_page_fictitious_addr);
if (!pmap_initialized)
return (TRUE);
- phys = (pmap_paddr_t) i386_ptob(pn);
- if (!pmap_valid_page(pn))
+
+ if (pn == vm_page_guard_addr)
+ return FALSE;
+
+ if (!managed_page(ppn_to_pai(pn)))
return (FALSE);
return TRUE;
}
void
-mapping_free_prime()
+pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
{
- int i;
- pv_entry_t pv_e;
-
- for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) {
- pv_e = (pv_entry_t) zalloc(pv_list_zone);
- PV_FREE(pv_e);
- }
-}
+ int i;
+ pt_entry_t *opte, *npte;
+ pt_entry_t pte;
+ spl_t s;
-void
-mapping_adjust()
-{
- pv_entry_t pv_e;
- int i;
- int spl;
-
- if (mapping_adjust_call == NULL) {
- thread_call_setup(&mapping_adjust_call_data,
- (thread_call_func_t) mapping_adjust,
- (thread_call_param_t) NULL);
- mapping_adjust_call = &mapping_adjust_call_data;
+ for (i = 0; i < cnt; i++) {
+ s = splhigh();
+ opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
+ if (0 == opte)
+ panic("kernel_commpage");
+ pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
+ pte &= ~INTEL_PTE_WRITE; // ensure read only
+ npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
+ if (0 == npte)
+ panic("user_commpage");
+ pmap_store_pte(npte, pte);
+ splx(s);
+ kernel_commpage += INTEL_PGBYTES;
+ user_commpage += INTEL_PGBYTES;
}
- /* XXX rethink best way to do locking here */
- if (pv_free_count < PV_LOW_WATER_MARK) {
- for (i = 0; i < PV_ALLOC_CHUNK; i++) {
- pv_e = (pv_entry_t) zalloc(pv_list_zone);
- SPLVM(spl);
- PV_FREE(pv_e);
- SPLX(spl);
- }
- }
- mappingrecurse = 0;
}
-void
-pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt)
-{
- int i;
- pt_entry_t *opte, *npte;
- pt_entry_t pte;
-
-
- for (i = 0; i < cnt; i++) {
- opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage);
- if (0 == opte) panic("kernel_commpage");
- pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
- pte &= ~INTEL_PTE_WRITE; // ensure read only
- npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage);
- if (0 == npte) panic("user_commpage");
- pmap_store_pte(npte, pte);
- kernel_commpage += INTEL_PGBYTES;
- user_commpage += INTEL_PGBYTES;
- }
-}
#define PMAP_COMMPAGE64_CNT (_COMM_PAGE64_AREA_USED/PAGE_SIZE)
pt_entry_t pmap_commpage64_ptes[PMAP_COMMPAGE64_CNT];
void
pmap_commpage64_init(vm_offset_t kernel_commpage, __unused vm_map_offset_t user_commpage, int cnt)
{
- spl_t s;
- int i;
- pt_entry_t *kptep;
-
- s = splhigh();
- for (i = 0; i< cnt; i++) {
- kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
- if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID))) panic("pmap_commpage64_init pte");
- pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
- }
- splx(s);
-
-}
-
-void
-pmap_map_sharedpage(__unused task_t task, pmap_t p)
-{
- pt_entry_t *ptep;
- spl_t s;
- int i;
-
- if (!p->pm_64bit) return;
- /* setup high 64 bit commpage */
- s = splhigh();
- while ((ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS)) == PD_ENTRY_NULL) {
- splx(s);
- pmap_expand(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS);
- s = splhigh();
- }
+ int i;
+ pt_entry_t *kptep;
- for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
- ptep = pmap_pte(p, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
- if (0 == ptep) panic("pmap_map_sharedpage");
- pmap_store_pte(ptep, pmap_commpage64_ptes[i]);
- }
- splx(s);
+ PMAP_LOCK(kernel_pmap);
+ for (i = 0; i < cnt; i++) {
+ kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE));
+ if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID)))
+ panic("pmap_commpage64_init pte");
+ pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER);
+ }
+ PMAP_UNLOCK(kernel_pmap);
}
-void
-pmap_unmap_sharedpage(pmap_t pmap)
-{
- spl_t s;
- pt_entry_t *ptep;
- int i;
-
- if (!pmap->pm_64bit) return;
- s = splhigh();
- for (i = 0; i< PMAP_COMMPAGE64_CNT; i++) {
- ptep = pmap_pte(pmap, (uint64_t)_COMM_PAGE64_BASE_ADDRESS + (i*PAGE_SIZE));
- if (ptep) pmap_store_pte(ptep, 0);
- }
- splx(s);
-}
static cpu_pmap_t cpu_pmap_master;
address = (vm_offset_t)mapaddr;
for (i = 0; i < PMAP_NWINDOWS; i++, address += PAGE_SIZE) {
+ spl_t s;
+ s = splhigh();
while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
pmap_expand(kernel_pmap, (vm_map_offset_t)address);
* (int *) pte = 0;
cp->mapwindow[i].prv_CADDR = (caddr_t) address;
cp->mapwindow[i].prv_CMAP = pte;
+ splx(s);
}
vm_map_unlock(kernel_map);
}
}
}
-
mapwindow_t *
pmap_get_mapwindow(pt_entry_t pentry)
{
mapwindow_t *mp;
int i;
- boolean_t istate;
-
- /*
- * can be called from hardware interrupt context
- * so we need to protect the lookup process
- */
- istate = ml_set_interrupts_enabled(FALSE);
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+ /* fold in cache attributes for this physical page */
+ pentry |= pmap_get_cache_attributes(i386_btop(pte_to_pa(pentry)));
/*
* Note: 0th map reserved for pmap_pte()
*/
mp = ¤t_cpu_datap()->cpu_pmap->mapwindow[i];
if (*mp->prv_CMAP == 0) {
- *mp->prv_CMAP = pentry;
- break;
- }
- }
- if (i >= PMAP_NWINDOWS)
- mp = NULL;
- (void) ml_set_interrupts_enabled(istate);
-
- return (mp);
-}
-
-
-/*
- * kern_return_t pmap_nest(grand, subord, vstart, size)
- *
- * grand = the pmap that we will nest subord into
- * subord = the pmap that goes into the grand
- * vstart = start of range in pmap to be inserted
- * nstart = start of range in pmap nested pmap
- * size = Size of nest area (up to 16TB)
- *
- * Inserts a pmap into another. This is used to implement shared segments.
- *
- * on x86 this is very limited right now. must be exactly 1 segment.
- *
- * Note that we depend upon higher level VM locks to insure that things don't change while
- * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
- * or do 2 nests at once.
- */
-
-
-kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size) {
-
- vm_map_offset_t vaddr, nvaddr;
- pd_entry_t *pde,*npde;
- unsigned int i, need_flush;
- unsigned int num_pde;
- spl_t s;
-
- // do validity tests
-
- if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */
- if((size >> 28) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 16TB */
- if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
- if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
- if(size == 0) {
- panic("pmap_nest: size is invalid - %016llX\n", size);
- }
- if ((size >> 28) != 1) panic("pmap_nest: size 0x%llx must be 0x%x", size, NBPDE);
-
- subord->pm_shared = TRUE;
-
- // prepopulate subord pmap pde's if necessary
-
- if (cpu_64bit) {
- s = splhigh();
- while (PD_ENTRY_NULL == (npde = pmap_pde(subord, nstart))) {
- splx(s);
- pmap_expand(subord, nstart);
- s = splhigh();
- }
- splx(s);
- }
-
- PMAP_READ_LOCK(subord,s);
- nvaddr = (vm_map_offset_t)nstart;
- need_flush = 0;
- num_pde = size >> PDESHIFT;
-
- for (i=0;i<num_pde;i++) {
- npde = pmap_pde(subord, nvaddr);
- if ((0 == npde) || (*npde++ & INTEL_PTE_VALID) == 0) {
- PMAP_READ_UNLOCK(subord,s);
- pmap_expand(subord, nvaddr); // pmap_expand handles races
- PMAP_READ_LOCK(subord,s);
- need_flush++;
- }
- nvaddr += NBPDE;
- }
-
- if (need_flush) {
- nvaddr = (vm_map_offset_t)nstart;
- PMAP_UPDATE_TLBS(subord, nvaddr, nvaddr + (1 << 28) -1 );
- }
- PMAP_READ_UNLOCK(subord,s);
-
- // copy pde's from subord pmap into grand pmap
-
- if (cpu_64bit) {
- s = splhigh();
- while (PD_ENTRY_NULL == (pde = pmap_pde(grand, vstart))) {
- splx(s);
- pmap_expand(grand, vstart);
- s = splhigh();
- }
- splx(s);
- }
+ pmap_store_pte(mp->prv_CMAP, pentry);
- PMAP_READ_LOCK(grand,s);
- vaddr = (vm_map_offset_t)vstart;
- for (i=0;i<num_pde;i++,pde++) {
- pd_entry_t tpde;
- npde = pmap_pde(subord, nstart);
- if (npde == 0) panic("pmap_nest: no npde, subord 0x%x nstart 0x%llx", subord, nstart);
- tpde = *npde;
- nstart += NBPDE;
- pde = pmap_pde(grand, vaddr);
- if (pde == 0) panic("pmap_nest: no pde, grand 0x%x vaddr 0x%llx", grand, vaddr);
- vaddr += NBPDE;
- pmap_store_pte(pde, tpde);
- }
- PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1 << 28) -1 );
+ invlpg((uintptr_t)mp->prv_CADDR);
- PMAP_READ_UNLOCK(grand,s);
+ return (mp);
+ }
+ }
+ panic("pmap_get_mapwindow: no windows available");
- return KERN_SUCCESS;
+ return NULL;
}
-/*
- * kern_return_t pmap_unnest(grand, vaddr)
- *
- * grand = the pmap that we will nest subord into
- * vaddr = start of range in pmap to be unnested
- *
- * Removes a pmap from another. This is used to implement shared segments.
- * On the current PPC processors, this is limited to segment (256MB) aligned
- * segment sized ranges.
- */
-
-kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr) {
-
- spl_t s;
- pd_entry_t *pde;
- unsigned int i;
- unsigned int num_pde;
-
- PMAP_READ_LOCK(grand,s);
-
- // invalidate all pdes for segment at vaddr in pmap grand
-
- num_pde = (1<<28) >> PDESHIFT;
-
- for (i=0;i<num_pde;i++,pde++) {
- pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
- if (pde == 0) panic("pmap_unnest: no pde, grand 0x%x vaddr 0x%llx\n", grand, vaddr);
- pmap_store_pte(pde, (pd_entry_t)0);
- vaddr += NBPDE;
- }
- PMAP_UPDATE_TLBS(grand, vaddr, vaddr + (1<<28) -1 );
- PMAP_READ_UNLOCK(grand,s);
-
- return KERN_SUCCESS; /* Bye, bye, butterfly... */
+void
+pmap_put_mapwindow(mapwindow_t *mp)
+{
+ pmap_store_pte(mp->prv_CMAP, 0);
}
void
pmap_switch(pmap_t tpmap)
{
spl_t s;
- int my_cpu;
s = splhigh(); /* Make sure interruptions are disabled */
- my_cpu = cpu_number();
- set_dirbase(tpmap, my_cpu);
+ set_dirbase(tpmap, current_thread());
splx(s);
}
}
void
-pt_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
- vm_size_t *alloc_size, int *collectable, int *exhaustable)
+pt_fake_zone_init(int zone_index)
+{
+ pt_fake_zone_index = zone_index;
+}
+
+void
+pt_fake_zone_info(int *count,
+ vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
+ uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
{
*count = inuse_ptepages_count;
*cur_size = PAGE_SIZE * inuse_ptepages_count;
*max_size = PAGE_SIZE * (inuse_ptepages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count);
*elem_size = PAGE_SIZE;
*alloc_size = PAGE_SIZE;
+ *sum_size = alloc_ptepages_count * PAGE_SIZE;
*collectable = 1;
*exhaustable = 0;
+ *caller_acct = 1;
}
vm_offset_t pmap_cpu_high_map_vaddr(int cpu, enum high_cpu_types e)
a = e + HIGH_CPU_END * cpu_number();
vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a);
- *(pte_unique_base + a) = pte;
+ pmap_store_pte(pte_unique_base + a, pte);
/* TLB flush for this page for this cpu */
invlpg((uintptr_t)vaddr);
return vaddr;
}
+static inline void
+pmap_cpuset_NMIPI(cpu_set cpu_mask) {
+ unsigned int cpu, cpu_bit;
+ uint64_t deadline;
+
+ for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+ if (cpu_mask & cpu_bit)
+ cpu_NMI_interrupt(cpu);
+ }
+ deadline = mach_absolute_time() + (((uint64_t)LockTimeOut) * 3);
+ while (mach_absolute_time() < deadline)
+ cpu_pause();
+}
/*
* Called with pmap locked, we:
* - return ... the caller will unlock the pmap
*/
void
-pmap_flush_tlbs(pmap_t pmap)
+pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv)
{
unsigned int cpu;
unsigned int cpu_bit;
boolean_t flush_self = FALSE;
uint64_t deadline;
- assert(!ml_get_interrupts_enabled());
+ assert((processor_avail_count < 2) ||
+ (ml_get_interrupts_enabled() && get_preemption_level() != 0));
/*
* Scan other cpus for matching active or task CR3.
for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
if (!cpu_datap(cpu)->cpu_running)
continue;
- if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
- (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+ if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) ||
+ (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
(pmap->pm_shared) ||
((pmap == kernel_pmap) &&
(!CPU_CR3_IS_ACTIVE(cpu) ||
}
}
+ PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
+ (uintptr_t) pmap, cpus_to_signal, flush_self, startv, 0);
+
if (cpus_to_signal) {
- KERNEL_DEBUG(0xef800024 | DBG_FUNC_START, cpus_to_signal, 0, 0, 0, 0);
+ cpu_set cpus_to_respond = cpus_to_signal;
deadline = mach_absolute_time() + LockTimeOut;
/*
* Wait for those other cpus to acknowledge
*/
- for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
- while ((cpus_to_signal & cpu_bit) != 0) {
- if (!cpu_datap(cpu)->cpu_running ||
- cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
- !CPU_CR3_IS_ACTIVE(cpu)) {
- cpus_to_signal &= ~cpu_bit;
- break;
+ while (cpus_to_respond != 0) {
+ long orig_acks = 0;
+
+ for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
+ if ((cpus_to_respond & cpu_bit) != 0) {
+ if (!cpu_datap(cpu)->cpu_running ||
+ cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
+ !CPU_CR3_IS_ACTIVE(cpu)) {
+ cpus_to_respond &= ~cpu_bit;
+ }
+ cpu_pause();
}
- if (mach_absolute_time() > deadline)
- panic("pmap_flush_tlbs() "
- "timeout pmap=%p cpus_to_signal=%p",
- pmap, cpus_to_signal);
- cpu_pause();
+ if (cpus_to_respond == 0)
+ break;
+ }
+
+ if (cpus_to_respond && (mach_absolute_time() > deadline)) {
+ if (machine_timeout_suspended())
+ continue;
+ pmap_tlb_flush_timeout = TRUE;
+ orig_acks = NMIPI_acks;
+ pmap_cpuset_NMIPI(cpus_to_respond);
+
+ panic("TLB invalidation IPI timeout: "
+ "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
+ cpus_to_respond, orig_acks, NMIPI_acks);
}
- if (cpus_to_signal == 0)
- break;
}
- KERNEL_DEBUG(0xef800024 | DBG_FUNC_END, cpus_to_signal, 0, 0, 0, 0);
}
-
/*
* Flush local tlb if required.
* We need this flush even if the pmap being changed
if (flush_self)
flush_tlb();
+ if ((pmap == kernel_pmap) && (flush_self != TRUE)) {
+ panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
+ }
+
+ PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
+ (uintptr_t) pmap, cpus_to_signal, startv, endv, 0);
}
void
process_pmap_updates(void)
{
+ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
+
flush_tlb();
current_cpu_datap()->cpu_tlb_invalid = FALSE;
void
pmap_update_interrupt(void)
{
- KERNEL_DEBUG(0xef800028 | DBG_FUNC_START, 0, 0, 0, 0, 0);
-
- assert(!ml_get_interrupts_enabled());
+ PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
+ 0, 0, 0, 0, 0);
process_pmap_updates();
- KERNEL_DEBUG(0xef800028 | DBG_FUNC_END, 0, 0, 0, 0, 0);
-}
-
-
-unsigned int pmap_cache_attributes(ppnum_t pn) {
-
- if (!pmap_valid_page(pn))
- return (VM_WIMG_IO);
-
- return (VM_WIMG_COPYBACK);
+ PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
+ 0, 0, 0, 0, 0);
}
-
#ifdef PMAP_DEBUG
void
pmap_dump(pmap_t p)