X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/91447636331957f3d9b5ca5b508f07c526b0074d..b7266188b87f3620ec3f9f717e57194a7dd989fe:/osfmk/i386/pmap.c diff --git a/osfmk/i386/pmap.c b/osfmk/i386/pmap.c index e43dac464..e7135803a 100644 --- a/osfmk/i386/pmap.c +++ b/osfmk/i386/pmap.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -83,15 +89,17 @@ */ #include -#include #include #include +#include + #include #include #include #include +#include #include #include @@ -115,7 +123,14 @@ #include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include #if MACH_KDB #include @@ -124,127 +139,110 @@ #include #endif /* MACH_KDB */ -#include - #include #include +#include +#include + + +/* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */ +#ifdef DEBUGINTERRUPTS +#define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);} +#else +#define pmap_intr_assert() +#endif + +#ifdef IWANTTODEBUG +#undef DEBUG +#define DEBUG 1 +#define POSTCODE_DELAY 1 +#include +#endif /* IWANTTODEBUG */ /* * Forward declarations for internal functions. */ -void pmap_expand( - pmap_t map, - vm_offset_t v); -extern void pmap_remove_range( +void pmap_remove_range( pmap_t pmap, - vm_offset_t va, + vm_map_offset_t va, pt_entry_t *spte, pt_entry_t *epte); void phys_attribute_clear( - ppnum_t phys, + ppnum_t phys, int bits); -boolean_t phys_attribute_test( - ppnum_t phys, +int phys_attribute_test( + ppnum_t phys, int bits); void phys_attribute_set( - ppnum_t phys, + ppnum_t phys, int bits); -void pmap_growkernel( - vm_offset_t addr); - void pmap_set_reference( ppnum_t pn); -void pmap_movepage( - unsigned long from, - unsigned long to, - vm_size_t size); - -pt_entry_t * pmap_mapgetpte( - vm_map_t map, - vm_offset_t v); - boolean_t phys_page_exists( ppnum_t pn); -#ifndef set_dirbase -void set_dirbase(vm_offset_t dirbase); -#endif /* set_dirbase */ -#define iswired(pte) ((pte) & INTEL_PTE_WIRED) +#ifdef PMAP_DEBUG +void dump_pmap(pmap_t); +void dump_4GB_pdpt(pmap_t p); +void dump_4GB_pdpt_thread(thread_t tp); +#endif -#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry); -#define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry); +int nx_enabled = 1; /* enable no-execute protection */ +#ifdef CONFIG_EMBEDDED +int allow_data_exec = 0; /* no exec from data, embedded is hardcore like that */ +#else +int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ +#endif +int allow_stack_exec = 0; /* No apps may execute from the stack by default */ -#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL) -#define low32(x) ((unsigned int)((x) & 0x00000000ffffffffLL)) +boolean_t cpu_64bit = FALSE; +boolean_t pmap_trace = FALSE; /* - * Private data structures. + * when spinning through pmap_remove + * ensure that we don't spend too much + * time with preemption disabled. + * I'm setting the current threshold + * to 20us */ +#define MAX_PREEMPTION_LATENCY_NS 20000 -/* - * For each vm_page_t, there is a list of all currently - * valid virtual mappings of that page. An entry is - * a pv_entry_t; the list is the pv_table. - */ +uint64_t max_preemption_latency_tsc = 0; -typedef struct pv_entry { - struct pv_entry *next; /* next pv_entry */ - pmap_t pmap; /* pmap where mapping lies */ - vm_offset_t va; /* virtual address for mapping */ -} *pv_entry_t; -#define PV_ENTRY_NULL ((pv_entry_t) 0) +pv_hashed_entry_t *pv_hash_table; /* hash lists */ + +uint32_t npvhash = 0; -pv_entry_t pv_head_table; /* array of entries, one per page */ /* * pv_list entries are kept on a list that can only be accessed * with the pmap system locked (at SPLVM, not in the cpus_active set). - * The list is refilled from the pv_list_zone if it becomes empty. + * The list is refilled from the pv_hashed_list_zone if it becomes empty. */ -pv_entry_t pv_free_list; /* free list at SPLVM */ -decl_simple_lock_data(,pv_free_list_lock) +pv_rooted_entry_t pv_free_list = PV_ROOTED_ENTRY_NULL; /* free list at SPLVM */ +pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL; +pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL; +decl_simple_lock_data(,pv_hashed_free_list_lock) +decl_simple_lock_data(,pv_hashed_kern_free_list_lock) +decl_simple_lock_data(,pv_hash_table_lock) + int pv_free_count = 0; -#define PV_LOW_WATER_MARK 5000 -#define PV_ALLOC_CHUNK 2000 -thread_call_t mapping_adjust_call; -static thread_call_data_t mapping_adjust_call_data; -int mappingrecurse = 0; - -#define PV_ALLOC(pv_e) { \ - simple_lock(&pv_free_list_lock); \ - if ((pv_e = pv_free_list) != 0) { \ - pv_free_list = pv_e->next; \ - pv_free_count--; \ - if (pv_free_count < PV_LOW_WATER_MARK) \ - if (hw_compare_and_store(0,1,&mappingrecurse)) \ - thread_call_enter(mapping_adjust_call); \ - } \ - simple_unlock(&pv_free_list_lock); \ -} - -#define PV_FREE(pv_e) { \ - simple_lock(&pv_free_list_lock); \ - pv_e->next = pv_free_list; \ - pv_free_list = pv_e; \ - pv_free_count++; \ - simple_unlock(&pv_free_list_lock); \ -} - -zone_t pv_list_zone; /* zone of pv_entry structures */ - -#ifdef PAE -static zone_t pdpt_zone; -#endif +int pv_hashed_free_count = 0; +int pv_kern_free_count = 0; +int pv_hashed_kern_free_count = 0; +zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ + +static zone_t pdpt_zone; /* * Each entry in the pv_head_table is locked by a bit in the @@ -255,60 +253,46 @@ static zone_t pdpt_zone; char *pv_lock_table; /* pointer to array of bits */ #define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) +char *pv_hash_lock_table; +#define pv_hash_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) + /* * First and last physical addresses that we maintain any information * for. Initialized to zero so that pmap operations done before * pmap_init won't touch any non-existent structures. */ -pmap_paddr_t vm_first_phys = (pmap_paddr_t) 0; -pmap_paddr_t vm_last_phys = (pmap_paddr_t) 0; boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ -pmap_paddr_t kernel_vm_end = (pmap_paddr_t)0; - -#define GROW_KERNEL_FUNCTION_IMPLEMENTED 1 -#if GROW_KERNEL_FUNCTION_IMPLEMENTED /* not needed until growing kernel pmap */ static struct vm_object kptobj_object_store; static vm_object_t kptobj; -#endif - - -/* - * Index into pv_head table, its lock bits, and the modify/reference - * bits starting at vm_first_phys. - */ - -#define pa_index(pa) (i386_btop(pa - vm_first_phys)) - -#define pai_to_pvh(pai) (&pv_head_table[pai]) -#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table) -#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table) /* * Array of physical page attribites for managed pages. * One byte per physical page. */ char *pmap_phys_attributes; +unsigned int last_managed_page = 0; /* * Physical page attributes. Copy bits from PTE definition. */ #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ -#define PHYS_NCACHE INTEL_PTE_NCACHE +#define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */ /* * Amount of virtual memory mapped by one * page-directory entry. */ #define PDE_MAPPED_SIZE (pdetova(1)) +uint64_t pde_mapped_size; /* * Locking and TLB invalidation */ /* - * Locking Protocols: + * Locking Protocols: (changed 2/2007 JK) * * There are two structures in the pmap module that need locking: * the pmaps themselves, and the per-page pv_lists (which are locked @@ -318,95 +302,50 @@ char *pmap_phys_attributes; * pmap_remove_all and pmap_copy_on_write operate on a physical page * basis and want to do the locking in the reverse order, i.e. lock * a pv_list and then go through all the pmaps referenced by that list. - * To protect against deadlock between these two cases, the pmap_lock - * is used. There are three different locking protocols as a result: - * - * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only - * the pmap. * - * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read - * lock on the pmap_lock (shared read), then lock the pmap - * and finally the pv_lists as needed [i.e. pmap lock before - * pv_list lock.] - * - * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...) - * Get a write lock on the pmap_lock (exclusive write); this - * also guaranteees exclusive access to the pv_lists. Lock the - * pmaps as needed. - * - * At no time may any routine hold more than one pmap lock or more than - * one pv_list lock. Because interrupt level routines can allocate - * mbufs and cause pmap_enter's, the pmap_lock and the lock on the - * kernel_pmap can only be held at splhigh. - */ - -/* - * We raise the interrupt level to splvm, to block interprocessor - * interrupts during pmap operations. We must take the CPU out of - * the cpus_active set while interrupts are blocked. + * The system wide pmap lock has been removed. Now, paths take a lock + * on the pmap before changing its 'shape' and the reverse order lockers + * (coming in by phys ppn) take a lock on the corresponding pv and then + * retest to be sure nothing changed during the window before they locked + * and can then run up/down the pv lists holding the list lock. This also + * lets the pmap layer run (nearly completely) interrupt enabled, unlike + * previously. */ -#define SPLVM(spl) { \ - spl = splhigh(); \ - mp_disable_preemption(); \ - i_bit_clear(cpu_number(), &cpus_active); \ - mp_enable_preemption(); \ -} -#define SPLX(spl) { \ - mp_disable_preemption(); \ - i_bit_set(cpu_number(), &cpus_active); \ - mp_enable_preemption(); \ - splx(spl); \ -} /* - * Lock on pmap system + * PV locking */ -lock_t pmap_system_lock; - -#define PMAP_READ_LOCK(pmap, spl) { \ - SPLVM(spl); \ - lock_read(&pmap_system_lock); \ - simple_lock(&(pmap)->lock); \ -} - -#define PMAP_WRITE_LOCK(spl) { \ - SPLVM(spl); \ - lock_write(&pmap_system_lock); \ -} -#define PMAP_READ_UNLOCK(pmap, spl) { \ - simple_unlock(&(pmap)->lock); \ - lock_read_done(&pmap_system_lock); \ - SPLX(spl); \ +#define LOCK_PVH(index) { \ + mp_disable_preemption(); \ + lock_pvh_pai(index); \ } -#define PMAP_WRITE_UNLOCK(spl) { \ - lock_write_done(&pmap_system_lock); \ - SPLX(spl); \ +#define UNLOCK_PVH(index) { \ + unlock_pvh_pai(index); \ + mp_enable_preemption(); \ } -#define PMAP_WRITE_TO_READ_LOCK(pmap) { \ - simple_lock(&(pmap)->lock); \ - lock_write_to_read(&pmap_system_lock); \ -} +/* + * PV hash locking + */ -#define LOCK_PVH(index) lock_pvh_pai(index) +#define LOCK_PV_HASH(hash) lock_hash_hash(hash) -#define UNLOCK_PVH(index) unlock_pvh_pai(index) +#define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash) #if USLOCK_DEBUG extern int max_lock_loops; -extern int disableSerialOuput; #define LOOP_VAR \ unsigned int loop_count; \ - loop_count = disableSerialOuput ? max_lock_loops \ + loop_count = disable_serial_output ? max_lock_loops \ : max_lock_loops*100 #define LOOP_CHECK(msg, pmap) \ if (--loop_count == 0) { \ mp_disable_preemption(); \ - kprintf("%s: cpu %d pmap %x, cpus_active 0x%x\n", \ - msg, cpu_number(), pmap, cpus_active); \ + kprintf("%s: cpu %d pmap %x\n", \ + msg, cpu_number(), pmap); \ Debugger("deadlock detection"); \ mp_enable_preemption(); \ loop_count = max_lock_loops; \ @@ -416,76 +355,8 @@ extern int disableSerialOuput; #define LOOP_CHECK(msg, pmap) #endif /* USLOCK_DEBUG */ -#define PMAP_UPDATE_TLBS(pmap, s, e) \ -{ \ - cpu_set cpu_mask; \ - cpu_set users; \ - \ - mp_disable_preemption(); \ - cpu_mask = 1 << cpu_number(); \ - \ - /* Since the pmap is locked, other updates are locked */ \ - /* out, and any pmap_activate has finished. */ \ - \ - /* find other cpus using the pmap */ \ - users = (pmap)->cpus_using & ~cpu_mask; \ - if (users) { \ - LOOP_VAR; \ - /* signal them, and wait for them to finish */ \ - /* using the pmap */ \ - signal_cpus(users, (pmap), (s), (e)); \ - while (((pmap)->cpus_using & cpus_active & ~cpu_mask)) { \ - LOOP_CHECK("PMAP_UPDATE_TLBS", pmap); \ - cpu_pause(); \ - } \ - } \ - /* invalidate our own TLB if pmap is in use */ \ - \ - if ((pmap)->cpus_using & cpu_mask) { \ - INVALIDATE_TLB((pmap), (s), (e)); \ - } \ - \ - mp_enable_preemption(); \ -} - -#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */ - -#define INVALIDATE_TLB(m, s, e) { \ - flush_tlb(); \ -} - -/* - * Structures to keep track of pending TLB invalidations - */ -cpu_set cpus_active; -cpu_set cpus_idle; - -#define UPDATE_LIST_SIZE 4 - -struct pmap_update_item { - pmap_t pmap; /* pmap to invalidate */ - vm_offset_t start; /* start address to invalidate */ - vm_offset_t end; /* end address to invalidate */ -}; - -typedef struct pmap_update_item *pmap_update_item_t; - -/* - * List of pmap updates. If the list overflows, - * the last entry is changed to invalidate all. - */ -struct pmap_update_list { - decl_simple_lock_data(,lock) - int count; - struct pmap_update_item item[UPDATE_LIST_SIZE]; -} ; -typedef struct pmap_update_list *pmap_update_list_t; - -extern void signal_cpus( - cpu_set use_list, - pmap_t pmap, - vm_offset_t start, - vm_offset_t end); +unsigned pmap_memory_region_count; +unsigned pmap_memory_region_current; pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE]; @@ -493,20 +364,22 @@ pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE]; * Other useful macros. */ #define current_pmap() (vm_map_pmap(current_thread()->map)) -#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0) struct pmap kernel_pmap_store; pmap_t kernel_pmap; -#ifdef PMAP_QUEUE -decl_simple_lock_data(,free_pmap_lock) -#endif +pd_entry_t high_shared_pde; +pd_entry_t commpage64_pde; struct zone *pmap_zone; /* zone of pmap structures */ int pmap_debug = 0; /* flag for debugging prints */ -unsigned int inuse_ptepages_count = 0; /* debugging */ +unsigned int inuse_ptepages_count = 0; + +addr64_t kernel64_cr3; +boolean_t no_shared_cr3 = FALSE; /* -no_shared_cr3 boot arg */ + /* * Pmap cache. Cache is threaded through ref_count field of pmap. @@ -518,8 +391,6 @@ pmap_t pmap_cache_list; int pmap_cache_count; decl_simple_lock_data(,pmap_cache_lock) -extern vm_offset_t hole_start, hole_end; - extern char end; static int nkpt; @@ -527,88 +398,196 @@ static int nkpt; pt_entry_t *DMAP1, *DMAP2; caddr_t DADDR1; caddr_t DADDR2; +/* + * for legacy, returns the address of the pde entry. + * for 64 bit, causes the pdpt page containing the pde entry to be mapped, + * then returns the mapped address of the pde entry in that page + */ +pd_entry_t * +pmap_pde(pmap_t m, vm_map_offset_t v) +{ + pd_entry_t *pde; + if (!cpu_64bit || (m == kernel_pmap)) { + pde = (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT])); + } else { + assert(m); + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + pde = pmap64_pde(m, v); + } + return pde; +} + + +/* + * the single pml4 page per pmap is allocated at pmap create time and exists + * for the duration of the pmap. we allocate this page in kernel vm (to save us one + * level of page table dynamic mapping. + * this returns the address of the requested pml4 entry in the top level page. + */ +static inline +pml4_entry_t * +pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr) +{ + return ((pml4_entry_t *)pmap->pm_hold + ((vm_offset_t)((vaddr>>PML4SHIFT)&(NPML4PG-1)))); +} + +/* + * maps in the pml4 page, if any, containing the pdpt entry requested + * and returns the address of the pdpt entry in that mapped page + */ +pdpt_entry_t * +pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr) +{ + pml4_entry_t newpf; + pml4_entry_t *pml4; + int i; + + assert(pmap); + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) { + return(0); + } -#if DEBUG_ALIAS -#define PMAP_ALIAS_MAX 32 -struct pmap_alias { - vm_offset_t rpc; - pmap_t pmap; - vm_offset_t va; - int cookie; -#define PMAP_ALIAS_COOKIE 0xdeadbeef -} pmap_aliasbuf[PMAP_ALIAS_MAX]; -int pmap_alias_index = 0; -extern vm_offset_t get_rpc(); + pml4 = pmap64_pml4(pmap, vaddr); -#endif /* DEBUG_ALIAS */ + if (pml4 && ((*pml4 & INTEL_PTE_VALID))) { -#define pmap_pde(m, v) (&((m)->dirbase[(vm_offset_t)(v) >> PDESHIFT])) -#define pdir_pde(d, v) (d[(vm_offset_t)(v) >> PDESHIFT]) + newpf = *pml4 & PG_FRAME; -static __inline int -pmap_is_current(pmap_t pmap) + + for (i=PMAP_PDPT_FIRST_WINDOW; i < PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS; i++) { + if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) { + return((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + + ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1)))); + } + } + + current_cpu_datap()->cpu_pmap->pdpt_window_index++; + if (current_cpu_datap()->cpu_pmap->pdpt_window_index > (PMAP_PDPT_FIRST_WINDOW+PMAP_PDPT_NWINDOWS-1)) + current_cpu_datap()->cpu_pmap->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW; + pmap_store_pte( + (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CMAP), + newpf | INTEL_PTE_RW | INTEL_PTE_VALID); + invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR)); + return ((pdpt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pdpt_window_index].prv_CADDR) + + ((vm_offset_t)((vaddr>>PDPTSHIFT)&(NPDPTPG-1)))); + } + + return (NULL); +} + +/* + * maps in the pdpt page, if any, containing the pde entry requested + * and returns the address of the pde entry in that mapped page + */ +pd_entry_t * +pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr) { - return (pmap == kernel_pmap || - (pmap->dirbase[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)); + pdpt_entry_t newpf; + pdpt_entry_t *pdpt; + int i; + + assert(pmap); + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + if ((vaddr > 0x00007FFFFFFFFFFFULL) && (vaddr < 0xFFFF800000000000ULL)) { + return(0); + } + + /* if (vaddr & (1ULL << 63)) panic("neg addr");*/ + pdpt = pmap64_pdpt(pmap, vaddr); + + if (pdpt && ((*pdpt & INTEL_PTE_VALID))) { + + newpf = *pdpt & PG_FRAME; + + for (i=PMAP_PDE_FIRST_WINDOW; i < PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS; i++) { + if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) { + return((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + + ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1)))); + } + } + + current_cpu_datap()->cpu_pmap->pde_window_index++; + if (current_cpu_datap()->cpu_pmap->pde_window_index > (PMAP_PDE_FIRST_WINDOW+PMAP_PDE_NWINDOWS-1)) + current_cpu_datap()->cpu_pmap->pde_window_index = PMAP_PDE_FIRST_WINDOW; + pmap_store_pte( + (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CMAP), + newpf | INTEL_PTE_RW | INTEL_PTE_VALID); + invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR)); + return ((pd_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pde_window_index].prv_CADDR) + + ((vm_offset_t)((vaddr>>PDSHIFT)&(NPDPG-1)))); + } + + return (NULL); } +/* + * Because the page tables (top 3 levels) are mapped into per cpu windows, + * callers must either disable interrupts or disable preemption before calling + * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr + * is in one of those mapped windows and that cannot be allowed to change until + * the caller is done using the returned pte pointer. When done, the caller + * restores interrupts or preemption to its previous state after which point the + * vaddr for the returned pte can no longer be used + */ + /* * return address of mapped pte for vaddr va in pmap pmap. + * must be called with pre-emption or interrupts disabled + * if targeted pmap is not the kernel pmap + * since we may be passing back a virtual address that is + * associated with this cpu... pre-emption or interrupts + * must remain disabled until the caller is done using + * the pointer that was passed back . + * + * maps the pde page, if any, containing the pte in and returns + * the address of the pte in that mapped page */ pt_entry_t * -pmap_pte(pmap_t pmap, vm_offset_t va) -{ - pd_entry_t *pde; - pd_entry_t newpf; - - pde = pmap_pde(pmap, va); - if (*pde != 0) { - if (pmap_is_current(pmap)) - return( vtopte(va)); - newpf = *pde & PG_FRAME; - if (((*CM4) & PG_FRAME) != newpf) { - *CM4 = newpf | INTEL_PTE_RW | INTEL_PTE_VALID; - invlpg((u_int)CA4); - } - return (pt_entry_t *)CA4 + (i386_btop(va) & (NPTEPG-1)); - } - return(0); -} - -#define DEBUG_PTE_PAGE 0 - -#if DEBUG_PTE_PAGE -void -ptep_check( - ptep_t ptep) +pmap_pte(pmap_t pmap, vm_map_offset_t vaddr) { - register pt_entry_t *pte, *epte; - int ctu, ctw; + pd_entry_t *pde; + pd_entry_t newpf; + int i; - /* check the use and wired counts */ - if (ptep == PTE_PAGE_NULL) - return; - pte = pmap_pte(ptep->pmap, ptep->va); - epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t); - ctu = 0; - ctw = 0; - while (pte < epte) { - if (pte->pfn != 0) { - ctu++; - if (pte->wired) - ctw++; + assert(pmap); + pde = pmap_pde(pmap,vaddr); + + if (pde && ((*pde & INTEL_PTE_VALID))) { + if (*pde & INTEL_PTE_PS) + return pde; + if (pmap == kernel_pmap) + return (vtopte(vaddr)); /* compat kernel still has pte's mapped */ +#if TESTING + if (ml_get_interrupts_enabled() && get_preemption_level() == 0) + panic("pmap_pte: unsafe call"); +#endif + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + + newpf = *pde & PG_FRAME; + + for (i=PMAP_PTE_FIRST_WINDOW; i < PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS; i++) { + if (((*(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP)) & PG_FRAME) == newpf) { + return((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR) + + ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1))); + } } - pte++; - } - if (ctu != ptep->use_count || ctw != ptep->wired_count) { - printf("use %d wired %d - actual use %d wired %d\n", - ptep->use_count, ptep->wired_count, ctu, ctw); - panic("pte count"); + current_cpu_datap()->cpu_pmap->pte_window_index++; + if (current_cpu_datap()->cpu_pmap->pte_window_index > (PMAP_PTE_FIRST_WINDOW+PMAP_PTE_NWINDOWS-1)) + current_cpu_datap()->cpu_pmap->pte_window_index = PMAP_PTE_FIRST_WINDOW; + pmap_store_pte( + (current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CMAP), + newpf | INTEL_PTE_RW | INTEL_PTE_VALID); + invlpg((u_int)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR)); + return ((pt_entry_t *)(current_cpu_datap()->cpu_pmap->mapwindow[current_cpu_datap()->cpu_pmap->pte_window_index].prv_CADDR) + + ((vm_offset_t)i386_btop(vaddr) & (NPTEPG-1))); } + + return(NULL); } -#endif /* DEBUG_PTE_PAGE */ + /* * Map memory at initialization. The physical addresses being @@ -619,17 +598,18 @@ ptep_check( */ vm_offset_t pmap_map( - register vm_offset_t virt, - register vm_offset_t start_addr, - register vm_offset_t end_addr, - register vm_prot_t prot) + vm_offset_t virt, + vm_map_offset_t start_addr, + vm_map_offset_t end_addr, + vm_prot_t prot, + unsigned int flags) { - register int ps; + int ps; ps = PAGE_SIZE; while (start_addr < end_addr) { - pmap_enter(kernel_pmap, - virt, (ppnum_t) i386_btop(start_addr), prot, 0, FALSE); + pmap_enter(kernel_pmap, (vm_map_offset_t)virt, + (ppnum_t) i386_btop(start_addr), prot, flags, FALSE); virt += ps; start_addr += ps; } @@ -640,116 +620,284 @@ pmap_map( * Back-door routine for mapping kernel VM at initialization. * Useful for mapping memory outside the range * Sets no-cache, A, D. - * [vm_first_phys, vm_last_phys) (i.e., devices). * Otherwise like pmap_map. */ vm_offset_t pmap_map_bd( - register vm_offset_t virt, - register vm_offset_t start_addr, - register vm_offset_t end_addr, - vm_prot_t prot) + vm_offset_t virt, + vm_map_offset_t start_addr, + vm_map_offset_t end_addr, + vm_prot_t prot, + unsigned int flags) { - register pt_entry_t template; - register pt_entry_t *pte; + pt_entry_t template; + pt_entry_t *pte; + spl_t spl; template = pa_to_pte(start_addr) - | INTEL_PTE_NCACHE | INTEL_PTE_REF | INTEL_PTE_MOD | INTEL_PTE_WIRED | INTEL_PTE_VALID; + + if(flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) { + template |= INTEL_PTE_NCACHE; + if(!(flags & (VM_MEM_GUARDED | VM_WIMG_USE_DEFAULT))) + template |= INTEL_PTE_PTA; + } + if (prot & VM_PROT_WRITE) template |= INTEL_PTE_WRITE; - /* XXX move pmap_pte out of loop, once one pte mapped, all are */ + while (start_addr < end_addr) { - pte = pmap_pte(kernel_pmap, virt); + spl = splhigh(); + pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt); if (pte == PT_ENTRY_NULL) { panic("pmap_map_bd: Invalid kernel address\n"); } - WRITE_PTE_FAST(pte, template) + pmap_store_pte(pte, template); + splx(spl); pte_increment_pa(template); virt += PAGE_SIZE; start_addr += PAGE_SIZE; - } + } + flush_tlb(); return(virt); } -extern char *first_avail; -extern vm_offset_t virtual_avail, virtual_end; -extern pmap_paddr_t avail_start, avail_end; -extern vm_offset_t etext; -extern void *sectHIBB; -extern int sectSizeHIB; +extern char *first_avail; +extern vm_offset_t virtual_avail, virtual_end; +extern pmap_paddr_t avail_start, avail_end; + +void +pmap_cpu_init(void) +{ + /* + * Here early in the life of a processor (from cpu_mode_init()). + * If we're not in 64-bit mode, enable the global TLB feature. + * Note: regardless of mode we continue to set the global attribute + * bit in ptes for all (32-bit) global pages such as the commpage. + */ + if (!cpu_64bit) { + set_cr4(get_cr4() | CR4_PGE); + } + + /* + * Initialize the per-cpu, TLB-related fields. + */ + current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3; + current_cpu_datap()->cpu_tlb_invalid = FALSE; +} + +vm_offset_t +pmap_high_shared_remap(enum high_fixed_addresses e, vm_offset_t va, int sz) +{ + vm_offset_t ve = pmap_index_to_virt(e); + pt_entry_t *ptep; + pmap_paddr_t pa; + int i; + spl_t s; + + assert(0 == (va & PAGE_MASK)); /* expecting page aligned */ + s = splhigh(); + ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ve); + + for (i=0; i< sz; i++) { + pa = (pmap_paddr_t) kvtophys(va); + pmap_store_pte(ptep, (pa & PG_FRAME) + | INTEL_PTE_VALID + | INTEL_PTE_GLOBAL + | INTEL_PTE_RW + | INTEL_PTE_REF + | INTEL_PTE_MOD); + va+= PAGE_SIZE; + ptep++; + } + splx(s); + return ve; +} + +vm_offset_t +pmap_cpu_high_shared_remap(int cpu, enum high_cpu_types e, vm_offset_t va, int sz) +{ + enum high_fixed_addresses a = e + HIGH_CPU_END * cpu; + return pmap_high_shared_remap(HIGH_FIXED_CPUS_BEGIN + a, va, sz); +} + +void pmap_init_high_shared(void); + +extern vm_offset_t gdtptr, idtptr; + +extern uint32_t low_intstack; + +extern struct fake_descriptor ldt_desc_pattern; +extern struct fake_descriptor tss_desc_pattern; + +extern char hi_remap_text, hi_remap_etext; +extern char t_zero_div; + +pt_entry_t *pte_unique_base; + +void +pmap_init_high_shared(void) +{ + + vm_offset_t haddr; + spl_t s; +#if MACH_KDB + struct i386_tss *ttss; +#endif + + cpu_desc_index_t * cdi = &cpu_data_master.cpu_desc_index; + + kprintf("HIGH_MEM_BASE 0x%x fixed per-cpu begin 0x%x\n", + HIGH_MEM_BASE,pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN)); + s = splhigh(); + pte_unique_base = pmap_pte(kernel_pmap, (vm_map_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN)); + splx(s); + + if (i386_btop(&hi_remap_etext - &hi_remap_text + 1) > + HIGH_FIXED_TRAMPS_END - HIGH_FIXED_TRAMPS + 1) + panic("tramps too large"); + haddr = pmap_high_shared_remap(HIGH_FIXED_TRAMPS, + (vm_offset_t) &hi_remap_text, 3); + kprintf("tramp: 0x%x, ",haddr); + /* map gdt up high and update ptr for reload */ + haddr = pmap_high_shared_remap(HIGH_FIXED_GDT, + (vm_offset_t) master_gdt, 1); + cdi->cdi_gdt.ptr = (void *)haddr; + kprintf("GDT: 0x%x, ",haddr); + /* map ldt up high */ + haddr = pmap_high_shared_remap(HIGH_FIXED_LDT_BEGIN, + (vm_offset_t) master_ldt, + HIGH_FIXED_LDT_END - HIGH_FIXED_LDT_BEGIN + 1); + cdi->cdi_ldt = (struct fake_descriptor *)haddr; + kprintf("LDT: 0x%x, ",haddr); + /* put new ldt addr into gdt */ + struct fake_descriptor temp_fake_desc; + temp_fake_desc = ldt_desc_pattern; + temp_fake_desc.offset = (vm_offset_t) haddr; + fix_desc(&temp_fake_desc, 1); + + *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_LDT)] = temp_fake_desc; + *(struct fake_descriptor *) &master_gdt[sel_idx(USER_LDT)] = temp_fake_desc; + + /* map idt up high */ + haddr = pmap_high_shared_remap(HIGH_FIXED_IDT, + (vm_offset_t) master_idt, 1); + cdi->cdi_idt.ptr = (void *)haddr; + kprintf("IDT: 0x%x, ", haddr); + /* remap ktss up high and put new high addr into gdt */ + haddr = pmap_high_shared_remap(HIGH_FIXED_KTSS, + (vm_offset_t) &master_ktss, 1); + + temp_fake_desc = tss_desc_pattern; + temp_fake_desc.offset = (vm_offset_t) haddr; + fix_desc(&temp_fake_desc, 1); + *(struct fake_descriptor *) &master_gdt[sel_idx(KERNEL_TSS)] = temp_fake_desc; + kprintf("KTSS: 0x%x, ",haddr); +#if MACH_KDB + /* remap dbtss up high and put new high addr into gdt */ + haddr = pmap_high_shared_remap(HIGH_FIXED_DBTSS, + (vm_offset_t) &master_dbtss, 1); + temp_fake_desc = tss_desc_pattern; + temp_fake_desc.offset = (vm_offset_t) haddr; + fix_desc(&temp_fake_desc, 1); + *(struct fake_descriptor *)&master_gdt[sel_idx(DEBUG_TSS)] = temp_fake_desc; + ttss = (struct i386_tss *)haddr; + kprintf("DBTSS: 0x%x, ",haddr); +#endif /* MACH_KDB */ + + /* remap dftss up high and put new high addr into gdt */ + haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS, + (vm_offset_t) &master_dftss, 1); + temp_fake_desc = tss_desc_pattern; + temp_fake_desc.offset = (vm_offset_t) haddr; + fix_desc(&temp_fake_desc, 1); + *(struct fake_descriptor *) &master_gdt[sel_idx(DF_TSS)] = temp_fake_desc; + kprintf("DFTSS: 0x%x\n",haddr); + + /* remap mctss up high and put new high addr into gdt */ + haddr = pmap_high_shared_remap(HIGH_FIXED_DFTSS, + (vm_offset_t) &master_mctss, 1); + temp_fake_desc = tss_desc_pattern; + temp_fake_desc.offset = (vm_offset_t) haddr; + fix_desc(&temp_fake_desc, 1); + *(struct fake_descriptor *) &master_gdt[sel_idx(MC_TSS)] = temp_fake_desc; + kprintf("MCTSS: 0x%x\n",haddr); + + cpu_desc_load(&cpu_data_master); +} + /* * Bootstrap the system enough to run with virtual memory. * Map the kernel's code and data, and allocate the system page table. * Called with mapping OFF. Page_size must already be set. - * - * Parameters: - * load_start: PA where kernel was loaded - * avail_start PA of first available physical page - - * after kernel page tables - * avail_end PA of last available physical page - * virtual_avail VA of first available page - - * after kernel page tables - * virtual_end VA of last available page - - * end of kernel address space - * - * &start_text start of kernel text - * &etext end of kernel text */ void pmap_bootstrap( - __unused vm_offset_t load_start) + __unused vm_offset_t load_start, + boolean_t IA32e) { vm_offset_t va; pt_entry_t *pte; int i; - int wpkernel, boot_arg; + pdpt_entry_t *pdpt; + spl_t s; vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address * known to VM */ - /* * The kernel's pmap is statically allocated so we don't * have to use pmap_create, which is unlikely to work * correctly at this part of the boot sequence. */ + kernel_pmap = &kernel_pmap_store; -#ifdef PMAP_QUEUE - kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */ - kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */ -#endif kernel_pmap->ref_count = 1; + kernel_pmap->nx_enabled = FALSE; + kernel_pmap->pm_task_map = TASK_MAP_32BIT; kernel_pmap->pm_obj = (vm_object_t) NULL; kernel_pmap->dirbase = (pd_entry_t *)((unsigned int)IdlePTD | KERNBASE); - kernel_pmap->pdirbase = (pd_entry_t *)IdlePTD; -#ifdef PAE - kernel_pmap->pm_pdpt = (pd_entry_t *)((unsigned int)IdlePDPT | KERNBASE ); - kernel_pmap->pm_ppdpt = (vm_offset_t)IdlePDPT; -#endif + kernel_pmap->pdirbase = (pmap_paddr_t)((int)IdlePTD); + pdpt = (pd_entry_t *)((unsigned int)IdlePDPT | KERNBASE ); + kernel_pmap->pm_pdpt = pdpt; + kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePDPT); + va = (vm_offset_t)kernel_pmap->dirbase; /* setup self referential mapping(s) */ - for (i = 0; i< NPGPTD; i++ ) { + for (i = 0; i< NPGPTD; i++, pdpt++) { pmap_paddr_t pa; - pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i)); - * (pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i) = + pa = (pmap_paddr_t) kvtophys((vm_offset_t)(va + i386_ptob(i))); + pmap_store_pte( + (pd_entry_t *) (kernel_pmap->dirbase + PTDPTDI + i), (pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF | - INTEL_PTE_MOD | INTEL_PTE_WIRED ; -#ifdef PAE - kernel_pmap->pm_pdpt[i] = pa | INTEL_PTE_VALID; -#endif + INTEL_PTE_MOD | INTEL_PTE_WIRED) ; + pmap_store_pte(pdpt, pa | INTEL_PTE_VALID); } + cpu_64bit = IA32e; + + lo_kernel_cr3 = kernel_pmap->pm_cr3; + current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3; + + /* save the value we stuff into created pmaps to share the gdts etc */ + high_shared_pde = *pmap_pde(kernel_pmap, HIGH_MEM_BASE); + /* make sure G bit is on for high shared pde entry */ + high_shared_pde |= INTEL_PTE_GLOBAL; + s = splhigh(); + pmap_store_pte(pmap_pde(kernel_pmap, HIGH_MEM_BASE), high_shared_pde); + splx(s); + nkpt = NKPT; + OSAddAtomic(NKPT, &inuse_ptepages_count); virtual_avail = (vm_offset_t)VADDR(KPTDI,0) + (vm_offset_t)first_avail; virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS); @@ -759,77 +907,117 @@ pmap_bootstrap( * mapping of pages. */ #define SYSMAP(c, p, v, n) \ - v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n); + v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n) va = virtual_avail; - pte = (pt_entry_t *) pmap_pte(kernel_pmap, va); - - /* - * CMAP1/CMAP2 are used for zeroing and copying pages. - * CMAP3 is used for ml_phys_read/write. - */ - SYSMAP(caddr_t, CM1, CA1, 1) - * (pt_entry_t *) CM1 = 0; - SYSMAP(caddr_t, CM2, CA2, 1) - * (pt_entry_t *) CM2 = 0; - SYSMAP(caddr_t, CM3, CA3, 1) - * (pt_entry_t *) CM3 = 0; + pte = vtopte(va); - /* used by pmap_pte */ - SYSMAP(caddr_t, CM4, CA4, 1) - * (pt_entry_t *) CM4 = 0; + for (i=0; icpu_pmap->mapwindow[i].prv_CMAP), + (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR), + 1); + *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; + } /* DMAP user for debugger */ SYSMAP(caddr_t, DMAP1, DADDR1, 1); SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */ - - lock_init(&pmap_system_lock, - FALSE, /* NOT a sleep lock */ - 0, 0); - virtual_avail = va; - wpkernel = 1; - if (PE_parse_boot_arg("debug", &boot_arg)) { - if (boot_arg & DB_PRT) wpkernel = 0; - if (boot_arg & DB_NMI) wpkernel = 0; + if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) { + if (0 != ((npvhash+1) & npvhash)) { + kprintf("invalid hash %d, must be ((2^N)-1), using default %d\n",npvhash,NPVHASH); + npvhash = NPVHASH; + } + } else { + npvhash = NPVHASH; } + printf("npvhash=%d\n",npvhash); - /* remap kernel text readonly if not debugging or kprintfing */ - if (wpkernel) - { - vm_offset_t myva; - pt_entry_t *ptep; - - for (myva = i386_round_page(VM_MIN_KERNEL_ADDRESS + MP_BOOT + MP_BOOTSTACK); myva < etext; myva += PAGE_SIZE) { - if (myva >= (vm_offset_t)sectHIBB && myva < ((vm_offset_t)sectHIBB + sectSizeHIB)) - continue; - ptep = pmap_pte(kernel_pmap, myva); - if (ptep) - *ptep &= ~INTEL_PTE_RW; - } - flush_tlb(); + simple_lock_init(&kernel_pmap->lock, 0); + simple_lock_init(&pv_hashed_free_list_lock, 0); + simple_lock_init(&pv_hashed_kern_free_list_lock, 0); + simple_lock_init(&pv_hash_table_lock,0); + + pmap_init_high_shared(); + + pde_mapped_size = PDE_MAPPED_SIZE; + + if (cpu_64bit) { + pdpt_entry_t *ppdpt = IdlePDPT; + pdpt_entry_t *ppdpt64 = (pdpt_entry_t *)IdlePDPT64; + pdpt_entry_t *ppml4 = (pdpt_entry_t *)IdlePML4; + int istate = ml_set_interrupts_enabled(FALSE); + + /* + * Clone a new 64-bit 3rd-level page table directory, IdlePML4, + * with page bits set for the correct IA-32e operation and so that + * the legacy-mode IdlePDPT is retained for slave processor start-up. + * This is necessary due to the incompatible use of page bits between + * 64-bit and legacy modes. + */ + kernel_pmap->pm_cr3 = (pmap_paddr_t)((int)IdlePML4); /* setup in start.s for us */ + kernel_pmap->pm_pml4 = IdlePML4; + kernel_pmap->pm_pdpt = (pd_entry_t *) + ((unsigned int)IdlePDPT64 | KERNBASE ); +#define PAGE_BITS INTEL_PTE_VALID|INTEL_PTE_RW|INTEL_PTE_USER|INTEL_PTE_REF + pmap_store_pte(kernel_pmap->pm_pml4, + (uint32_t)IdlePDPT64 | PAGE_BITS); + pmap_store_pte((ppdpt64+0), *(ppdpt+0) | PAGE_BITS); + pmap_store_pte((ppdpt64+1), *(ppdpt+1) | PAGE_BITS); + pmap_store_pte((ppdpt64+2), *(ppdpt+2) | PAGE_BITS); + pmap_store_pte((ppdpt64+3), *(ppdpt+3) | PAGE_BITS); + + /* + * The kernel is also mapped in the uber-sapce at the 4GB starting + * 0xFFFFFF80:00000000. This is the highest entry in the 4th-level. + */ + pmap_store_pte((ppml4+KERNEL_UBER_PML4_INDEX), *(ppml4+0)); + + kernel64_cr3 = (addr64_t) kernel_pmap->pm_cr3; + + /* Re-initialize descriptors and prepare to switch modes */ + cpu_desc_init64(&cpu_data_master); + current_cpu_datap()->cpu_is64bit = TRUE; + current_cpu_datap()->cpu_active_cr3 = kernel64_cr3; + + pde_mapped_size = 512*4096 ; + + ml_set_interrupts_enabled(istate); } - simple_lock_init(&kernel_pmap->lock, 0); - simple_lock_init(&pv_free_list_lock, 0); + /* Sets 64-bit mode if required. */ + cpu_mode_init(&cpu_data_master); + /* Update in-kernel CPUID information if we're now in 64-bit mode */ + if (IA32e) + cpuid_set_info(); - /* invalidate user virtual addresses */ - memset((char *)kernel_pmap->dirbase, - 0, - (KPTDI) * sizeof(pd_entry_t)); + kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4; kprintf("Kernel virtual space from 0x%x to 0x%x.\n", VADDR(KPTDI,0), virtual_end); -#ifdef PAE - kprintf("Available physical space from 0x%llx to 0x%llx\n", - avail_start, avail_end); printf("PAE enabled\n"); -#else - kprintf("Available physical space from 0x%x to 0x%x\n", + if (cpu_64bit){ + printf("64 bit mode enabled\n");kprintf("64 bit mode enabled\n"); } + + kprintf("Available physical space from 0x%llx to 0x%llx\n", avail_start, avail_end); -#endif + + /* + * By default for 64-bit users loaded at 4GB, share kernel mapping. + * But this may be overridden by the -no_shared_cr3 boot-arg. + */ + if (PE_parse_boot_argn("-no_shared_cr3", &no_shared_cr3, sizeof (no_shared_cr3))) { + kprintf("Shared kernel address space disabled\n"); + } + +#ifdef PMAP_TRACES + if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) { + kprintf("Kernel traces for pmap operations enabled\n"); + } +#endif /* PMAP_TRACES */ } void @@ -852,7 +1040,7 @@ pmap_init(void) register long npages; vm_offset_t addr; register vm_size_t s; - vm_offset_t vaddr; + vm_map_offset_t vaddr; ppnum_t ppn; /* @@ -860,29 +1048,68 @@ pmap_init(void) * the modify bit array, and the pte_page table. */ - /* zero bias all these arrays now instead of off avail_start - so we cover all memory */ - npages = i386_btop(avail_end); - s = (vm_size_t) (sizeof(struct pv_entry) * npages - + pv_lock_table_size(npages) + /* + * zero bias all these arrays now instead of off avail_start + * so we cover all memory + */ + + npages = (long)i386_btop(avail_end); + s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages + + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1)) + + pv_lock_table_size(npages) + + pv_hash_lock_table_size((npvhash+1)) + npages); s = round_page(s); - if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS) + if (kernel_memory_allocate(kernel_map, &addr, s, 0, + KMA_KOBJECT | KMA_PERMANENT) + != KERN_SUCCESS) panic("pmap_init"); memset((char *)addr, 0, s); +#if PV_DEBUG + if (0 == npvhash) panic("npvhash not initialized"); +#endif + /* * Allocate the structures first to preserve word-alignment. */ - pv_head_table = (pv_entry_t) addr; + pv_head_table = (pv_rooted_entry_t) addr; addr = (vm_offset_t) (pv_head_table + npages); + pv_hash_table = (pv_hashed_entry_t *)addr; + addr = (vm_offset_t) (pv_hash_table + (npvhash + 1)); + pv_lock_table = (char *) addr; addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages)); + pv_hash_lock_table = (char *) addr; + addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1))); + pmap_phys_attributes = (char *) addr; + { + unsigned int i; + unsigned int pn; + ppnum_t last_pn; + pmap_memory_region_t *pmptr = pmap_memory_regions; + + last_pn = (ppnum_t)i386_btop(avail_end); + + for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { + if (pmptr->type == kEfiConventionalMemory) { + + for (pn = pmptr->base; pn <= pmptr->end; pn++) { + if (pn < last_pn) { + pmap_phys_attributes[pn] |= PHYS_MANAGED; + + if (pn > last_managed_page) + last_managed_page = pn; + } + } + } + } + } /* * Create the zone of physical maps, @@ -890,104 +1117,127 @@ pmap_init(void) */ s = (vm_size_t) sizeof(struct pmap); pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */ - s = (vm_size_t) sizeof(struct pv_entry); - pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */ -#ifdef PAE - // s = (vm_size_t) (sizeof(pdpt_entry_t) * NPGPTD); + s = (vm_size_t) sizeof(struct pv_hashed_entry); + pv_hashed_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */ s = 63; pdpt_zone = zinit(s, 400*s, 4096, "pdpt"); /* XXX */ -#endif - - /* - * Only now, when all of the data structures are allocated, - * can we set vm_first_phys and vm_last_phys. If we set them - * too soon, the kmem_alloc_wired above will try to use these - * data structures and blow up. - */ - - /* zero bias this now so we cover all memory */ - vm_first_phys = 0; - vm_last_phys = avail_end; -#if GROW_KERNEL_FUNCTION_IMPLEMENTED kptobj = &kptobj_object_store; - _vm_object_allocate((vm_object_size_t)NKPDE, kptobj); + _vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG), kptobj); kernel_pmap->pm_obj = kptobj; -#endif /* create pv entries for kernel pages mapped by low level startup code. these have to exist so we can pmap_remove() e.g. kext pages from the middle of our addr space */ - vaddr = (vm_offset_t)VM_MIN_KERNEL_ADDRESS; + vaddr = (vm_map_offset_t)0; for (ppn = 0; ppn < i386_btop(avail_start) ; ppn++ ) { - pv_entry_t pv_e; + pv_rooted_entry_t pv_e; pv_e = pai_to_pvh(ppn); pv_e->va = vaddr; vaddr += PAGE_SIZE; pv_e->pmap = kernel_pmap; - pv_e->next = PV_ENTRY_NULL; + queue_init(&pv_e->qlink); } pmap_initialized = TRUE; /* - * Initializie pmap cache. + * Initialize pmap cache. */ pmap_cache_list = PMAP_NULL; pmap_cache_count = 0; simple_lock_init(&pmap_cache_lock, 0); -#ifdef PMAP_QUEUE - simple_lock_init(&free_pmap_lock, 0); -#endif - -} -void -x86_lowmem_free(void) -{ - /* free lowmem pages back to the vm system. we had to defer doing this - until the vm system was fully up. - the actual pages that are released are determined by which - pages the memory sizing code puts into the region table */ + max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t); - ml_static_mfree((vm_offset_t) i386_ptob(pmap_memory_regions[0].base)|VM_MIN_KERNEL_ADDRESS, - (vm_size_t) i386_ptob(pmap_memory_regions[0].end - pmap_memory_regions[0].base)); } -#define valid_page(x) (pmap_initialized && pmap_valid_page(x)) +#define managed_page(x) ( (unsigned int)x <= last_managed_page && (pmap_phys_attributes[x] & PHYS_MANAGED) ) +/* + * this function is only used for debugging fron the vm layer + */ boolean_t pmap_verify_free( ppnum_t pn) { - pmap_paddr_t phys; - pv_entry_t pv_h; + pv_rooted_entry_t pv_h; int pai; - spl_t spl; boolean_t result; assert(pn != vm_page_fictitious_addr); - phys = (pmap_paddr_t)i386_ptob(pn); + if (!pmap_initialized) return(TRUE); - if (!pmap_valid_page(pn)) + if (pn == vm_page_guard_addr) + return TRUE; + + pai = ppn_to_pai(pn); + if (!managed_page(pai)) return(FALSE); + pv_h = pai_to_pvh(pn); + result = (pv_h->pmap == PMAP_NULL); + return(result); +} - PMAP_WRITE_LOCK(spl); +boolean_t +pmap_is_empty( + pmap_t pmap, + vm_map_offset_t va_start, + vm_map_offset_t va_end) +{ + vm_map_offset_t offset; + ppnum_t phys_page; - pai = pa_index(phys); - pv_h = pai_to_pvh(pai); + if (pmap == PMAP_NULL) { + return TRUE; + } - result = (pv_h->pmap == PMAP_NULL); - PMAP_WRITE_UNLOCK(spl); + /* + * Check the resident page count + * - if it's zero, the pmap is completely empty. + * This short-circuit test prevents a virtual address scan which is + * painfully slow for 64-bit spaces. + * This assumes the count is correct + * .. the debug kernel ought to be checking perhaps by page table walk. + */ + if (pmap->stats.resident_count == 0) + return TRUE; + + for (offset = va_start; + offset < va_end; + offset += PAGE_SIZE_64) { + phys_page = pmap_find_phys(pmap, offset); + if (phys_page) { + if (pmap != kernel_pmap && + pmap->pm_task_map == TASK_MAP_32BIT && + offset >= HIGH_MEM_BASE) { + /* + * The "high_shared_pde" is used to share + * the entire top-most 2MB of address space + * between the kernel and all 32-bit tasks. + * So none of this can be removed from 32-bit + * tasks. + * Let's pretend there's nothing up + * there... + */ + return TRUE; + } + kprintf("pmap_is_empty(%p,0x%llx,0x%llx): " + "page %d at 0x%llx\n", + pmap, va_start, va_end, phys_page, offset); + return FALSE; + } + } - return(result); + return TRUE; } + /* * Create and return a physical map. * @@ -1002,15 +1252,23 @@ pmap_verify_free( */ pmap_t pmap_create( - vm_size_t size) + vm_map_size_t sz, + boolean_t is_64bit) { - register pmap_t p; -#ifdef PMAP_QUEUE - register pmap_t pro; - spl_t s; -#endif - register int i; - register vm_offset_t va; + pmap_t p; + int i; + vm_offset_t va; + vm_size_t size; + pdpt_entry_t *pdpt; + pml4_entry_t *pml4p; + pd_entry_t *pdp; + int template; + spl_t s; + + PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, + (int) (sz>>32), (int) sz, (int) is_64bit, 0, 0); + + size = (vm_size_t) sz; /* * A software use-only map doesn't even need a map. @@ -1022,535 +1280,318 @@ pmap_create( p = (pmap_t) zalloc(pmap_zone); if (PMAP_NULL == p) - panic("pmap_create zalloc"); - if (KERN_SUCCESS != kmem_alloc_wired(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD)) - panic("pmap_create kmem_alloc_wired"); -#ifdef PAE - p->pm_hold = (vm_offset_t)zalloc(pdpt_zone); - if ((vm_offset_t)NULL == p->pm_hold) { - panic("pdpt zalloc"); - } - p->pm_pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31); - p->pm_ppdpt = kvtophys((vm_offset_t)p->pm_pdpt); /* XXX */ -#endif - if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPDEPG)))) - panic("pmap_create vm_object_allocate"); - memcpy(p->dirbase, - (void *)((unsigned int)IdlePTD | KERNBASE), - NBPTD); - va = (vm_offset_t)p->dirbase; - p->pdirbase = (pd_entry_t *)(kvtophys(va)); - simple_lock_init(&p->lock, 0); + panic("pmap_create zalloc"); - /* setup self referential mapping(s) */ - for (i = 0; i< NPGPTD; i++ ) { - pmap_paddr_t pa; - pa = (pmap_paddr_t) kvtophys(va + i386_ptob(i)); - * (pd_entry_t *) (p->dirbase + PTDPTDI + i) = - (pa & PG_FRAME) | INTEL_PTE_VALID | INTEL_PTE_RW | INTEL_PTE_REF | - INTEL_PTE_MOD | INTEL_PTE_WIRED ; -#ifdef PAE - p->pm_pdpt[i] = pa | INTEL_PTE_VALID; -#endif - } - - p->cpus_using = 0; + /* init counts now since we'll be bumping some */ + simple_lock_init(&p->lock, 0); p->stats.resident_count = 0; + p->stats.resident_max = 0; p->stats.wired_count = 0; p->ref_count = 1; + p->nx_enabled = 1; + p->pm_shared = FALSE; + + assert(!is_64bit || cpu_64bit); + p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;; + + if (!cpu_64bit) { + /* legacy 32 bit setup */ + /* in the legacy case the pdpt layer is hardwired to 4 entries and each + * entry covers 1GB of addr space */ + if (KERN_SUCCESS != kmem_alloc_kobject(kernel_map, (vm_offset_t *)(&p->dirbase), NBPTD)) + panic("pmap_create kmem_alloc_kobject"); + p->pm_hold = (vm_offset_t)zalloc(pdpt_zone); + if ((vm_offset_t)NULL == p->pm_hold) { + panic("pdpt zalloc"); + } + pdpt = (pdpt_entry_t *) (( p->pm_hold + 31) & ~31); + p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)pdpt); + if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPGPTD*NPTDPG)))) + panic("pmap_create vm_object_allocate"); -#ifdef PMAP_QUEUE - /* insert new pmap at head of queue hanging off kernel_pmap */ - SPLVM(s); - simple_lock(&free_pmap_lock); - p->pmap_link.next = (queue_t)kernel_pmap->pmap_link.next; - kernel_pmap->pmap_link.next = (queue_t)p; + memset((char *)p->dirbase, 0, NBPTD); - pro = (pmap_t) p->pmap_link.next; - p->pmap_link.prev = (queue_t)pro->pmap_link.prev; - pro->pmap_link.prev = (queue_t)p; + va = (vm_offset_t)p->dirbase; + p->pdirbase = kvtophys(va); - - simple_unlock(&free_pmap_lock); - SPLX(s); -#endif - - return(p); -} + template = INTEL_PTE_VALID; + for (i = 0; i< NPGPTD; i++, pdpt++ ) { + pmap_paddr_t pa; + pa = (pmap_paddr_t) kvtophys((vm_offset_t)(va + i386_ptob(i))); + pmap_store_pte(pdpt, pa | template); + } -/* - * Retire the given physical map from service. - * Should only be called if the map contains - * no valid mappings. - */ + /* map the high shared pde */ + s = splhigh(); + pmap_store_pte(pmap_pde(p, HIGH_MEM_BASE), high_shared_pde); + splx(s); -void -pmap_destroy( - register pmap_t p) -{ - register pt_entry_t *pdep; - register int c; - spl_t s; - register vm_page_t m; -#ifdef PMAP_QUEUE - register pmap_t pre,pro; -#endif - - if (p == PMAP_NULL) - return; + } else { + /* 64 bit setup */ - SPLVM(s); - simple_lock(&p->lock); - c = --p->ref_count; - if (c == 0) { - register int my_cpu; + /* alloc the pml4 page in kernel vm */ + if (KERN_SUCCESS != kmem_alloc_kobject(kernel_map, (vm_offset_t *)(&p->pm_hold), PAGE_SIZE)) + panic("pmap_create kmem_alloc_kobject pml4"); - mp_disable_preemption(); - my_cpu = cpu_number(); + memset((char *)p->pm_hold, 0, PAGE_SIZE); + p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_hold); - /* - * If some cpu is not using the physical pmap pointer that it - * is supposed to be (see set_dirbase), we might be using the - * pmap that is being destroyed! Make sure we are - * physically on the right pmap: - */ - /* force pmap/cr3 update */ - PMAP_UPDATE_TLBS(p, - VM_MIN_ADDRESS, - VM_MAX_KERNEL_ADDRESS); - - if (PMAP_REAL(my_cpu) == p) { - PMAP_CPU_CLR(p, my_cpu); - PMAP_REAL(my_cpu) = kernel_pmap; -#ifdef PAE - set_cr3((unsigned int)kernel_pmap->pm_ppdpt); -#else - set_cr3((unsigned int)kernel_pmap->pdirbase); -#endif - } - mp_enable_preemption(); - } - simple_unlock(&p->lock); - SPLX(s); + OSAddAtomic(1, &inuse_ptepages_count); - if (c != 0) { - return; /* still in use */ - } + /* allocate the vm_objs to hold the pdpt, pde and pte pages */ -#ifdef PMAP_QUEUE - /* remove from pmap queue */ - SPLVM(s); - simple_lock(&free_pmap_lock); + if (NULL == (p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS)))) + panic("pmap_create pdpt obj"); - pre = (pmap_t)p->pmap_link.prev; - pre->pmap_link.next = (queue_t)p->pmap_link.next; - pro = (pmap_t)p->pmap_link.next; - pro->pmap_link.prev = (queue_t)p->pmap_link.prev; + if (NULL == (p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS)))) + panic("pmap_create pdpt obj"); - simple_unlock(&free_pmap_lock); - SPLX(s); -#endif + if (NULL == (p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS)))) + panic("pmap_create pte obj"); - /* - * Free the memory maps, then the - * pmap structure. - */ + /* uber space points to uber mapped kernel */ + s = splhigh(); + pml4p = pmap64_pml4(p, 0ULL); + pmap_store_pte((pml4p+KERNEL_UBER_PML4_INDEX), *kernel_pmap->pm_pml4); - pdep = (pt_entry_t *)p->dirbase; - while (pdep < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]) { - int ind; - if (*pdep & INTEL_PTE_VALID) { - ind = pdep - (pt_entry_t *)&p->dirbase[0]; - vm_object_lock(p->pm_obj); - m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)ind); - if (m == VM_PAGE_NULL) { - panic("pmap_destroy: pte page not in object"); + if (!is_64bit) { + while ((pdp = pmap64_pde(p, (uint64_t)HIGH_MEM_BASE)) == PD_ENTRY_NULL) { + splx(s); + pmap_expand_pdpt(p, (uint64_t)HIGH_MEM_BASE); /* need room for another pde entry */ + s = splhigh(); + } + pmap_store_pte(pdp, high_shared_pde); } - vm_page_lock_queues(); - vm_page_free(m); - inuse_ptepages_count--; - vm_object_unlock(p->pm_obj); - vm_page_unlock_queues(); - - /* - * Clear pdes, this might be headed for the cache. - */ - *pdep++ = 0; - } - else { - *pdep++ = 0; - } - + splx(s); } - vm_object_deallocate(p->pm_obj); - kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD); -#ifdef PAE - zfree(pdpt_zone, (void *)p->pm_hold); -#endif - zfree(pmap_zone, p); -} - -/* - * Add a reference to the specified pmap. - */ - -void -pmap_reference( - register pmap_t p) -{ - spl_t s; + PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, + (int) p, is_64bit, 0, 0, 0); - if (p != PMAP_NULL) { - SPLVM(s); - simple_lock(&p->lock); - p->ref_count++; - simple_unlock(&p->lock); - SPLX(s); - } + return(p); } /* - * Remove a range of hardware page-table entries. - * The entries given are the first (inclusive) - * and last (exclusive) entries for the VM pages. - * The virtual address is the va for the first pte. + * The following routines implement the shared address optmization for 64-bit + * users with a 4GB page zero. * - * The pmap must be locked. - * If the pmap is not the kernel pmap, the range must lie - * entirely within one pte-page. This is NOT checked. - * Assumes that the pte-page exists. - */ + * pmap_set_4GB_pagezero() + * is called in the exec and fork paths to mirror the kernel's + * mapping in the bottom 4G of the user's pmap. The task mapping changes + * from TASK_MAP_64BIT to TASK_MAP_64BIT_SHARED. This routine returns + * without doing anything if the -no_shared_cr3 boot-arg is set. + * + * pmap_clear_4GB_pagezero() + * is called in the exec/exit paths to undo this mirror. The task mapping + * reverts to TASK_MAP_64BIT. In addition, we switch to the kernel's + * CR3 by calling pmap_load_kernel_cr3(). + * + * pmap_load_kernel_cr3() + * loads cr3 with the kernel's page table. In addition to being called + * by pmap_clear_4GB_pagezero(), it is used both prior to teardown and + * when we go idle in the context of a shared map. + * + * Further notes on per-cpu data used: + * + * cpu_kernel_cr3 is the cr3 for the kernel's pmap. + * This is loaded in a trampoline on entering the kernel + * from a 32-bit user (or non-shared-cr3 64-bit user). + * cpu_task_cr3 is the cr3 for the current thread. + * This is loaded in a trampoline as we exit the kernel. + * cpu_active_cr3 reflects the cr3 currently loaded. + * However, the low order bit is set when the + * processor is idle or interrupts are disabled + * while the system pmap lock is held. It is used by + * tlb shoot-down. + * cpu_task_map indicates whether the task cr3 belongs to + * a 32-bit, a 64-bit or a 64-bit shared map. + * The latter allows the avoidance of the cr3 load + * on kernel entry and exit. + * cpu_tlb_invalid set TRUE when a tlb flush is requested. + * If the cr3 is "inactive" (the cpu is idle or the + * system-wide pmap lock is held) this not serviced by + * an IPI but at time when the cr3 becomes "active". + */ -/* static */ void -pmap_remove_range( - pmap_t pmap, - vm_offset_t va, - pt_entry_t *spte, - pt_entry_t *epte) -{ - register pt_entry_t *cpte; - int num_removed, num_unwired; - int pai; - pmap_paddr_t pa; - -#if DEBUG_PTE_PAGE - if (pmap != kernel_pmap) - ptep_check(get_pte_page(spte)); -#endif /* DEBUG_PTE_PAGE */ - num_removed = 0; - num_unwired = 0; - - for (cpte = spte; cpte < epte; - cpte++, va += PAGE_SIZE) { - - pa = pte_to_pa(*cpte); - if (pa == 0) - continue; - - num_removed++; - if (iswired(*cpte)) - num_unwired++; +pmap_set_4GB_pagezero(pmap_t p) +{ + pdpt_entry_t *user_pdptp; + pdpt_entry_t *kern_pdptp; - if (!valid_page(i386_btop(pa))) { + assert(p->pm_task_map != TASK_MAP_32BIT); - /* - * Outside range of managed physical memory. - * Just remove the mappings. - */ - register pt_entry_t *lpte = cpte; + /* Kernel-shared cr3 may be disabled by boot arg. */ + if (no_shared_cr3) + return; - *lpte = 0; - continue; - } + /* + * Set the bottom 4 3rd-level pte's to be the kernel's. + */ + PMAP_LOCK(p); + while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) { + PMAP_UNLOCK(p); + pmap_expand_pml4(p, 0x0); + PMAP_LOCK(p); + } + kern_pdptp = kernel_pmap->pm_pdpt; + pmap_store_pte(user_pdptp+0, *(kern_pdptp+0)); + pmap_store_pte(user_pdptp+1, *(kern_pdptp+1)); + pmap_store_pte(user_pdptp+2, *(kern_pdptp+2)); + pmap_store_pte(user_pdptp+3, *(kern_pdptp+3)); + p->pm_task_map = TASK_MAP_64BIT_SHARED; + PMAP_UNLOCK(p); +} - pai = pa_index(pa); - LOCK_PVH(pai); +void +pmap_clear_4GB_pagezero(pmap_t p) +{ + pdpt_entry_t *user_pdptp; - /* - * Get the modify and reference bits. - */ - { - register pt_entry_t *lpte; + if (p->pm_task_map != TASK_MAP_64BIT_SHARED) + return; - lpte = cpte; - pmap_phys_attributes[pai] |= - *lpte & (PHYS_MODIFIED|PHYS_REFERENCED); - *lpte = 0; + PMAP_LOCK(p); - } + p->pm_task_map = TASK_MAP_64BIT; - /* - * Remove the mapping from the pvlist for - * this physical page. - */ - { - register pv_entry_t pv_h, prev, cur; + pmap_load_kernel_cr3(); - pv_h = pai_to_pvh(pai); - if (pv_h->pmap == PMAP_NULL) { - panic("pmap_remove: null pv_list!"); - } - if (pv_h->va == va && pv_h->pmap == pmap) { - /* - * Header is the pv_entry. Copy the next one - * to header and free the next one (we cannot - * free the header) - */ - cur = pv_h->next; - if (cur != PV_ENTRY_NULL) { - *pv_h = *cur; - PV_FREE(cur); - } - else { - pv_h->pmap = PMAP_NULL; - } - } - else { - cur = pv_h; - do { - prev = cur; - if ((cur = prev->next) == PV_ENTRY_NULL) { - panic("pmap-remove: mapping not in pv_list!"); - } - } while (cur->va != va || cur->pmap != pmap); - prev->next = cur->next; - PV_FREE(cur); - } - UNLOCK_PVH(pai); - } - } + user_pdptp = pmap64_pdpt(p, 0x0); + pmap_store_pte(user_pdptp+0, 0); + pmap_store_pte(user_pdptp+1, 0); + pmap_store_pte(user_pdptp+2, 0); + pmap_store_pte(user_pdptp+3, 0); - /* - * Update the counts - */ - assert(pmap->stats.resident_count >= num_removed); - pmap->stats.resident_count -= num_removed; - assert(pmap->stats.wired_count >= num_unwired); - pmap->stats.wired_count -= num_unwired; + PMAP_UNLOCK(p); } -/* - * Remove phys addr if mapped in specified map - * - */ void -pmap_remove_some_phys( - __unused pmap_t map, - __unused ppnum_t pn) +pmap_load_kernel_cr3(void) { + uint64_t kernel_cr3; -/* Implement to support working set code */ + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + /* + * Reload cr3 with the true kernel cr3. + */ + kernel_cr3 = current_cpu_datap()->cpu_kernel_cr3; + set64_cr3(kernel_cr3); + current_cpu_datap()->cpu_active_cr3 = kernel_cr3; + current_cpu_datap()->cpu_tlb_invalid = FALSE; + __asm__ volatile("mfence"); } /* - * Remove the given range of addresses - * from the specified map. - * - * It is assumed that the start and end are properly - * rounded to the hardware page size. + * Retire the given physical map from service. + * Should only be called if the map contains + * no valid mappings. */ - void -pmap_remove( - pmap_t map, - addr64_t s64, - addr64_t e64) +pmap_destroy( + register pmap_t p) { - spl_t spl; - register pt_entry_t *pde; - register pt_entry_t *spte, *epte; - vm_offset_t l; - vm_offset_t s, e; - vm_offset_t orig_s; + register int c; - if (map == PMAP_NULL) + if (p == PMAP_NULL) return; - PMAP_READ_LOCK(map, spl); - - if (value_64bit(s64) || value_64bit(e64)) { - panic("pmap_remove addr overflow"); - } + PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START, + (int) p, 0, 0, 0, 0); - orig_s = s = (vm_offset_t)low32(s64); - e = (vm_offset_t)low32(e64); + PMAP_LOCK(p); - pde = pmap_pde(map, s); + c = --p->ref_count; - while (s < e) { - l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); - if (l > e) - l = e; - if (*pde & INTEL_PTE_VALID) { - spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1))); - spte = &spte[ptenum(s)]; - epte = &spte[intel_btop(l-s)]; - pmap_remove_range(map, s, spte, epte); - } - s = l; - pde++; + if (c == 0) { + /* + * If some cpu is not using the physical pmap pointer that it + * is supposed to be (see set_dirbase), we might be using the + * pmap that is being destroyed! Make sure we are + * physically on the right pmap: + */ + PMAP_UPDATE_TLBS(p, + 0x0ULL, + 0xFFFFFFFFFFFFF000ULL); } - PMAP_UPDATE_TLBS(map, orig_s, e); - - PMAP_READ_UNLOCK(map, spl); -} - -/* - * Routine: pmap_page_protect - * - * Function: - * Lower the permission for all mappings to a given - * page. - */ -void -pmap_page_protect( - ppnum_t pn, - vm_prot_t prot) -{ - pv_entry_t pv_h, prev; - register pv_entry_t pv_e; - register pt_entry_t *pte; - int pai; - register pmap_t pmap; - spl_t spl; - boolean_t remove; - pmap_paddr_t phys; - - assert(pn != vm_page_fictitious_addr); - phys = (pmap_paddr_t)i386_ptob(pn); - if (!valid_page(pn)) { - /* - * Not a managed page. - */ - return; - } + PMAP_UNLOCK(p); - /* - * Determine the new protection. - */ - switch (prot) { - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - remove = FALSE; - break; - case VM_PROT_ALL: - return; /* nothing to do */ - default: - remove = TRUE; - break; + if (c != 0) { + PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END, + (int) p, 1, 0, 0, 0); + return; /* still in use */ } /* - * Lock the pmap system first, since we will be changing - * several pmaps. + * Free the memory maps, then the + * pmap structure. */ + if (!cpu_64bit) { + OSAddAtomic(-p->pm_obj->resident_page_count, &inuse_ptepages_count); - PMAP_WRITE_LOCK(spl); + kmem_free(kernel_map, (vm_offset_t)p->dirbase, NBPTD); + zfree(pdpt_zone, (void *)p->pm_hold); - pai = pa_index(phys); - pv_h = pai_to_pvh(pai); + vm_object_deallocate(p->pm_obj); + } else { + /* 64 bit */ + int inuse_ptepages = 0; - /* - * Walk down PV list, changing or removing all mappings. - * We do not have to lock the pv_list because we have - * the entire pmap system locked. - */ - if (pv_h->pmap != PMAP_NULL) { + /* free 64 bit mode structs */ + inuse_ptepages++; + kmem_free(kernel_map, (vm_offset_t)p->pm_hold, PAGE_SIZE); - prev = pv_e = pv_h; - do { - register vm_offset_t va; - pmap = pv_e->pmap; - /* - * Lock the pmap to block pmap_extract and similar routines. - */ - simple_lock(&pmap->lock); + inuse_ptepages += p->pm_obj_pml4->resident_page_count; + vm_object_deallocate(p->pm_obj_pml4); - { + inuse_ptepages += p->pm_obj_pdpt->resident_page_count; + vm_object_deallocate(p->pm_obj_pdpt); - va = pv_e->va; - pte = pmap_pte(pmap, va); + inuse_ptepages += p->pm_obj->resident_page_count; + vm_object_deallocate(p->pm_obj); - /* - * Consistency checks. - */ - /* assert(*pte & INTEL_PTE_VALID); XXX */ - /* assert(pte_to_phys(*pte) == phys); */ + OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count); + } + zfree(pmap_zone, p); - } + PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END, + 0, 0, 0, 0, 0); - /* - * Remove the mapping if new protection is NONE - * or if write-protecting a kernel mapping. - */ - if (remove || pmap == kernel_pmap) { - /* - * Remove the mapping, collecting any modify bits. - */ - { - pmap_phys_attributes[pai] |= - *pte & (PHYS_MODIFIED|PHYS_REFERENCED); - *pte++ = 0; - PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); - } +} - assert(pmap->stats.resident_count >= 1); - pmap->stats.resident_count--; +/* + * Add a reference to the specified pmap. + */ - /* - * Remove the pv_entry. - */ - if (pv_e == pv_h) { - /* - * Fix up head later. - */ - pv_h->pmap = PMAP_NULL; - } - else { - /* - * Delete this entry. - */ - prev->next = pv_e->next; - PV_FREE(pv_e); - } - } - else { - /* - * Write-protect. - */ +void +pmap_reference( + register pmap_t p) +{ - *pte &= ~INTEL_PTE_WRITE; - pte++; - PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); - /* - * Advance prev. - */ - prev = pv_e; - } + if (p != PMAP_NULL) { + PMAP_LOCK(p); + p->ref_count++; + PMAP_UNLOCK(p);; + } +} - simple_unlock(&pmap->lock); - } while ((pv_e = prev->next) != PV_ENTRY_NULL); +/* + * Remove phys addr if mapped in specified map + * + */ +void +pmap_remove_some_phys( + __unused pmap_t map, + __unused ppnum_t pn) +{ - /* - * If pv_head mapping was removed, fix it up. - */ - if (pv_h->pmap == PMAP_NULL) { - pv_e = pv_h->next; - if (pv_e != PV_ENTRY_NULL) { - *pv_h = *pv_e; - PV_FREE(pv_e); - } - } - } +/* Implement to support working set code */ - PMAP_WRITE_UNLOCK(spl); } /* @@ -1565,7 +1606,7 @@ pmap_page_protect( unsigned int pmap_disconnect( ppnum_t pa) { - pmap_page_protect(pa, 0); /* disconnect the page */ + pmap_page_protect(pa, 0); /* disconnect the page */ return (pmap_get_refmod(pa)); /* return ref/chg status */ } @@ -1577,572 +1618,350 @@ unsigned int pmap_disconnect( void pmap_protect( pmap_t map, - vm_offset_t s, - vm_offset_t e, + vm_map_offset_t sva, + vm_map_offset_t eva, vm_prot_t prot) { register pt_entry_t *pde; register pt_entry_t *spte, *epte; - vm_offset_t l; - spl_t spl; - vm_offset_t orig_s = s; + vm_map_offset_t lva; + vm_map_offset_t orig_sva; + boolean_t set_NX; + int num_found = 0; + pmap_intr_assert(); if (map == PMAP_NULL) return; - /* - * Determine the new protection. - */ - switch (prot) { - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - break; - case VM_PROT_READ|VM_PROT_WRITE: - case VM_PROT_ALL: - return; /* nothing to do */ - default: - pmap_remove(map, (addr64_t)s, (addr64_t)e); + if (prot == VM_PROT_NONE) { + pmap_remove(map, sva, eva); return; } - SPLVM(spl); - simple_lock(&map->lock); + PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START, + (int) map, + (int) (sva>>32), (int) sva, + (int) (eva>>32), (int) eva); + + if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled ) + set_NX = FALSE; + else + set_NX = TRUE; + + PMAP_LOCK(map); - pde = pmap_pde(map, s); - while (s < e) { - l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); - if (l > e) - l = e; - if (*pde & INTEL_PTE_VALID) { - spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1))); - spte = &spte[ptenum(s)]; - epte = &spte[intel_btop(l-s)]; + orig_sva = sva; + while (sva < eva) { + lva = (sva + pde_mapped_size) & ~(pde_mapped_size-1); + if (lva > eva) + lva = eva; + pde = pmap_pde(map, sva); + if (pde && (*pde & INTEL_PTE_VALID)) { + spte = (pt_entry_t *)pmap_pte(map, (sva & ~(pde_mapped_size-1))); + spte = &spte[ptenum(sva)]; + epte = &spte[intel_btop(lva-sva)]; while (spte < epte) { - if (*spte & INTEL_PTE_VALID) - *spte &= ~INTEL_PTE_WRITE; + + if (*spte & INTEL_PTE_VALID) { + + if (prot & VM_PROT_WRITE) + pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE)); + else + pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE)); + + if (set_NX == TRUE) + pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX)); + else + pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX)); + + num_found++; + } spte++; } } - s = l; - pde++; + sva = lva; } + if (num_found) + PMAP_UPDATE_TLBS(map, orig_sva, eva); + + PMAP_UNLOCK(map); - PMAP_UPDATE_TLBS(map, orig_s, e); + PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END, + 0, 0, 0, 0, 0); - simple_unlock(&map->lock); - SPLX(spl); } +/* Map a (possibly) autogenned block */ +void +pmap_map_block( + pmap_t pmap, + addr64_t va, + ppnum_t pa, + uint32_t size, + vm_prot_t prot, + int attr, + __unused unsigned int flags) +{ + uint32_t page; + + for (page = 0; page < size; page++) { + pmap_enter(pmap, va, pa, prot, attr, TRUE); + va += PAGE_SIZE; + pa++; + } +} /* - * Insert the given physical page (p) at - * the specified virtual address (v) in the - * target physical map with the protection requested. - * - * If specified, the page will be wired down, meaning - * that the related pte cannot be reclaimed. - * - * NB: This is the only routine which MAY NOT lazy-evaluate - * or lose information. That is, this routine must actually - * insert this page into the given map NOW. + * Routine: pmap_change_wiring + * Function: Change the wiring attribute for a map/virtual-address + * pair. + * In/out conditions: + * The mapping must already exist in the pmap. */ void -pmap_enter( - register pmap_t pmap, - vm_offset_t v, - ppnum_t pn, - vm_prot_t prot, - unsigned int flags, - boolean_t wired) +pmap_change_wiring( + register pmap_t map, + vm_map_offset_t vaddr, + boolean_t wired) { register pt_entry_t *pte; - register pv_entry_t pv_h; - register int pai; - pv_entry_t pv_e; - pt_entry_t template; - spl_t spl; - pmap_paddr_t old_pa; - pmap_paddr_t pa = (pmap_paddr_t)i386_ptob(pn); - - XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n", - current_thread(), - current_thread(), - pmap, v, pn); - - assert(pn != vm_page_fictitious_addr); - if (pmap_debug) - printf("pmap(%x, %x)\n", v, pn); - if (pmap == PMAP_NULL) - return; /* - * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). - * If we determine we need a pvlist entry, we will unlock - * and allocate one. Then we will retry, throughing away - * the allocated entry later (if we no longer need it). - */ - pv_e = PV_ENTRY_NULL; - - PMAP_READ_LOCK(pmap, spl); - - /* - * Expand pmap to include this pte. Assume that - * pmap is always expanded to include enough hardware - * pages to map one VM page. + * We must grab the pmap system lock because we may + * change a pte_page queue. */ + PMAP_LOCK(map); - while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) { - /* - * Must unlock to expand the pmap. - */ - PMAP_READ_UNLOCK(pmap, spl); - - pmap_expand(pmap, v); + if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL) + panic("pmap_change_wiring: pte missing"); - PMAP_READ_LOCK(pmap, spl); + if (wired && !iswired(*pte)) { + /* + * wiring down mapping + */ + OSAddAtomic(+1, &map->stats.wired_count); + pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED)); } - /* - * Special case if the physical page is already mapped - * at this address. - */ - old_pa = pte_to_pa(*pte); - if (old_pa == pa) { + else if (!wired && iswired(*pte)) { /* - * May be changing its wired attribute or protection + * unwiring mapping */ - - template = pa_to_pte(pa) | INTEL_PTE_VALID; - - if(flags & VM_MEM_NOT_CACHEABLE) { - if(!(flags & VM_MEM_GUARDED)) - template |= INTEL_PTE_PTA; - template |= INTEL_PTE_NCACHE; - } - - if (pmap != kernel_pmap) - template |= INTEL_PTE_USER; - if (prot & VM_PROT_WRITE) - template |= INTEL_PTE_WRITE; - if (wired) { - template |= INTEL_PTE_WIRED; - if (!iswired(*pte)) - pmap->stats.wired_count++; - } - else { - if (iswired(*pte)) { - assert(pmap->stats.wired_count >= 1); - pmap->stats.wired_count--; - } - } - - if (*pte & INTEL_PTE_MOD) - template |= INTEL_PTE_MOD; - WRITE_PTE(pte, template) - pte++; - - goto Done; + assert(map->stats.wired_count >= 1); + OSAddAtomic(-1, &map->stats.wired_count); + pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED)); } - /* - * Outline of code from here: - * 1) If va was mapped, update TLBs, remove the mapping - * and remove old pvlist entry. - * 2) Add pvlist entry for new mapping - * 3) Enter new mapping. - * - * SHARING_FAULTS complicates this slightly in that it cannot - * replace the mapping, but must remove it (because adding the - * pvlist entry for the new mapping may remove others), and - * hence always enters the new mapping at step 3) - * - * If the old physical page is not managed step 1) is skipped - * (except for updating the TLBs), and the mapping is - * overwritten at step 3). If the new physical page is not - * managed, step 2) is skipped. - */ - - if (old_pa != (pmap_paddr_t) 0) { + PMAP_UNLOCK(map); +} -#if DEBUG_PTE_PAGE - if (pmap != kernel_pmap) - ptep_check(get_pte_page(pte)); -#endif /* DEBUG_PTE_PAGE */ +/* + * Routine: pmap_extract + * Function: + * Extract the physical page address associated + * with the given map/virtual_address pair. + * Change to shim for backwards compatibility but will not + * work for 64 bit systems. Some old drivers that we cannot + * change need this. + */ - /* - * Don't do anything to pages outside valid memory here. - * Instead convince the code that enters a new mapping - * to overwrite the old one. - */ +vm_offset_t +pmap_extract( + register pmap_t pmap, + vm_map_offset_t vaddr) +{ + ppnum_t ppn; + vm_offset_t paddr; - if (valid_page(i386_btop(old_pa))) { + paddr = (vm_offset_t)0; + ppn = pmap_find_phys(pmap, vaddr); - pai = pa_index(old_pa); - LOCK_PVH(pai); + if (ppn) { + paddr = ((vm_offset_t)i386_ptob(ppn)) | ((vm_offset_t)vaddr & INTEL_OFFMASK); + } + return (paddr); +} - assert(pmap->stats.resident_count >= 1); - pmap->stats.resident_count--; - if (iswired(*pte)) { - assert(pmap->stats.wired_count >= 1); - pmap->stats.wired_count--; - } +void +pmap_expand_pml4( + pmap_t map, + vm_map_offset_t vaddr) +{ + register vm_page_t m; + register pmap_paddr_t pa; + uint64_t i; + spl_t spl; + ppnum_t pn; + pml4_entry_t *pml4p; - pmap_phys_attributes[pai] |= - *pte & (PHYS_MODIFIED|PHYS_REFERENCED); - WRITE_PTE(pte, 0) + if (kernel_pmap == map) panic("expand kernel pml4"); - /* - * Remove the mapping from the pvlist for - * this physical page. - */ - { - register pv_entry_t prev, cur; + spl = splhigh(); + pml4p = pmap64_pml4(map, vaddr); + splx(spl); + if (PML4_ENTRY_NULL == pml4p) panic("pmap_expand_pml4 no pml4p"); - pv_h = pai_to_pvh(pai); - if (pv_h->pmap == PMAP_NULL) { - panic("pmap_enter: null pv_list!"); - } - if (pv_h->va == v && pv_h->pmap == pmap) { - /* - * Header is the pv_entry. Copy the next one - * to header and free the next one (we cannot - * free the header) - */ - cur = pv_h->next; - if (cur != PV_ENTRY_NULL) { - *pv_h = *cur; - pv_e = cur; - } - else { - pv_h->pmap = PMAP_NULL; - } - } - else { - cur = pv_h; - do { - prev = cur; - if ((cur = prev->next) == PV_ENTRY_NULL) { - panic("pmap_enter: mapping not in pv_list!"); - } - } while (cur->va != v || cur->pmap != pmap); - prev->next = cur->next; - pv_e = cur; - } - } - UNLOCK_PVH(pai); - } - else { + /* + * Allocate a VM page for the pml4 page + */ + while ((m = vm_page_grab()) == VM_PAGE_NULL) + VM_PAGE_WAIT(); - /* - * old_pa is not managed. Pretend it's zero so code - * at Step 3) will enter new mapping (overwriting old - * one). Do removal part of accounting. - */ - old_pa = (pmap_paddr_t) 0; - assert(pmap->stats.resident_count >= 1); - pmap->stats.resident_count--; - if (iswired(*pte)) { - assert(pmap->stats.wired_count >= 1); - pmap->stats.wired_count--; - } - } - - } + /* + * put the page into the pmap's obj list so it + * can be found later. + */ + pn = m->phys_page; + pa = i386_ptob(pn); + i = pml4idx(map, vaddr); - if (valid_page(i386_btop(pa))) { + /* + * Zero the page. + */ + pmap_zero_page(pn); - /* - * Step 2) Enter the mapping in the PV list for this - * physical page. - */ + vm_page_lockspin_queues(); + vm_page_wire(m); + vm_page_unlock_queues(); - pai = pa_index(pa); + OSAddAtomic(1, &inuse_ptepages_count); + /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ + vm_object_lock(map->pm_obj_pml4); -#if SHARING_FAULTS -RetryPvList: - /* - * We can return here from the sharing fault code below - * in case we removed the only entry on the pv list and thus - * must enter the new one in the list header. - */ -#endif /* SHARING_FAULTS */ - LOCK_PVH(pai); - pv_h = pai_to_pvh(pai); + PMAP_LOCK(map); + /* + * See if someone else expanded us first + */ + if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) { + PMAP_UNLOCK(map); + vm_object_unlock(map->pm_obj_pml4); - if (pv_h->pmap == PMAP_NULL) { - /* - * No mappings yet - */ - pv_h->va = v; - pv_h->pmap = pmap; - pv_h->next = PV_ENTRY_NULL; - } - else { -#if DEBUG - { - /* - * check that this mapping is not already there - * or there is no alias for this mapping in the same map - */ - pv_entry_t e = pv_h; - while (e != PV_ENTRY_NULL) { - if (e->pmap == pmap && e->va == v) - panic("pmap_enter: already in pv_list"); - e = e->next; - } - } -#endif /* DEBUG */ -#if SHARING_FAULTS - { - /* - * do sharing faults. - * if we find an entry on this pv list in the same address - * space, remove it. we know there will not be more - * than one. - */ - pv_entry_t e = pv_h; - pt_entry_t *opte; - - while (e != PV_ENTRY_NULL) { - if (e->pmap == pmap) { - /* - * Remove it, drop pv list lock first. - */ - UNLOCK_PVH(pai); - - opte = pmap_pte(pmap, e->va); - assert(opte != PT_ENTRY_NULL); - /* - * Invalidate the translation buffer, - * then remove the mapping. - */ - pmap_remove_range(pmap, e->va, opte, - opte + 1); - PMAP_UPDATE_TLBS(pmap, e->va, e->va + PAGE_SIZE); - - /* - * We could have remove the head entry, - * so there could be no more entries - * and so we have to use the pv head entry. - * so, go back to the top and try the entry - * again. - */ - goto RetryPvList; - } - e = e->next; - } + VM_PAGE_FREE(m); - /* - * check that this mapping is not already there - */ - e = pv_h; - while (e != PV_ENTRY_NULL) { - if (e->pmap == pmap) - panic("pmap_enter: alias in pv_list"); - e = e->next; - } - } -#endif /* SHARING_FAULTS */ -#if DEBUG_ALIAS - { - /* - * check for aliases within the same address space. - */ - pv_entry_t e = pv_h; - vm_offset_t rpc = get_rpc(); - - while (e != PV_ENTRY_NULL) { - if (e->pmap == pmap) { - /* - * log this entry in the alias ring buffer - * if it's not there already. - */ - struct pmap_alias *pma; - int ii, logit; - - logit = TRUE; - for (ii = 0; ii < pmap_alias_index; ii++) { - if (pmap_aliasbuf[ii].rpc == rpc) { - /* found it in the log already */ - logit = FALSE; - break; - } - } - if (logit) { - pma = &pmap_aliasbuf[pmap_alias_index]; - pma->pmap = pmap; - pma->va = v; - pma->rpc = rpc; - pma->cookie = PMAP_ALIAS_COOKIE; - if (++pmap_alias_index >= PMAP_ALIAS_MAX) - panic("pmap_enter: exhausted alias log"); - } - } - e = e->next; - } - } -#endif /* DEBUG_ALIAS */ - /* - * Add new pv_entry after header. - */ - if (pv_e == PV_ENTRY_NULL) { - PV_ALLOC(pv_e); - if (pv_e == PV_ENTRY_NULL) { - panic("pmap no pv_e's"); - } - } - pv_e->va = v; - pv_e->pmap = pmap; - pv_e->next = pv_h->next; - pv_h->next = pv_e; - /* - * Remember that we used the pvlist entry. - */ - pv_e = PV_ENTRY_NULL; - } - UNLOCK_PVH(pai); + OSAddAtomic(-1, &inuse_ptepages_count); + return; } - /* - * Step 3) Enter and count the mapping. - */ - - pmap->stats.resident_count++; +#if 0 /* DEBUG */ + if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) { + panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj_pml4, vaddr, i); + } +#endif + vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i); + vm_object_unlock(map->pm_obj_pml4); /* - * Build a template to speed up entering - - * only the pfn changes. + * Set the page directory entry for this page table. */ - template = pa_to_pte(pa) | INTEL_PTE_VALID; - - if(flags & VM_MEM_NOT_CACHEABLE) { - if(!(flags & VM_MEM_GUARDED)) - template |= INTEL_PTE_PTA; - template |= INTEL_PTE_NCACHE; - } - - if (pmap != kernel_pmap) - template |= INTEL_PTE_USER; - if (prot & VM_PROT_WRITE) - template |= INTEL_PTE_WRITE; - if (wired) { - template |= INTEL_PTE_WIRED; - pmap->stats.wired_count++; - } + pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */ - WRITE_PTE(pte, template) + pmap_store_pte(pml4p, pa_to_pte(pa) + | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE); -Done: - PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE); + PMAP_UNLOCK(map); - if (pv_e != PV_ENTRY_NULL) { - PV_FREE(pv_e); - } + return; - PMAP_READ_UNLOCK(pmap, spl); } -/* - * Routine: pmap_change_wiring - * Function: Change the wiring attribute for a map/virtual-address - * pair. - * In/out conditions: - * The mapping must already exist in the pmap. - */ void -pmap_change_wiring( - register pmap_t map, - vm_offset_t v, - boolean_t wired) +pmap_expand_pdpt( + pmap_t map, + vm_map_offset_t vaddr) { - register pt_entry_t *pte; + register vm_page_t m; + register pmap_paddr_t pa; + uint64_t i; spl_t spl; + ppnum_t pn; + pdpt_entry_t *pdptp; + + if (kernel_pmap == map) panic("expand kernel pdpt"); + + spl = splhigh(); + while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) { + splx(spl); + pmap_expand_pml4(map, vaddr); /* need room for another pdpt entry */ + spl = splhigh(); + } + splx(spl); -#if 1 /* - * We must grab the pmap system lock because we may - * change a pte_page queue. + * Allocate a VM page for the pdpt page */ - PMAP_READ_LOCK(map, spl); + while ((m = vm_page_grab()) == VM_PAGE_NULL) + VM_PAGE_WAIT(); - if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL) - panic("pmap_change_wiring: pte missing"); + /* + * put the page into the pmap's obj list so it + * can be found later. + */ + pn = m->phys_page; + pa = i386_ptob(pn); + i = pdptidx(map, vaddr); - if (wired && !iswired(*pte)) { - /* - * wiring down mapping - */ - map->stats.wired_count++; - *pte++ |= INTEL_PTE_WIRED; - } - else if (!wired && iswired(*pte)) { - /* - * unwiring mapping - */ - assert(map->stats.wired_count >= 1); - map->stats.wired_count--; - *pte++ &= ~INTEL_PTE_WIRED; - } + /* + * Zero the page. + */ + pmap_zero_page(pn); - PMAP_READ_UNLOCK(map, spl); + vm_page_lockspin_queues(); + vm_page_wire(m); + vm_page_unlock_queues(); -#else - return; -#endif + OSAddAtomic(1, &inuse_ptepages_count); -} + /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ + vm_object_lock(map->pm_obj_pdpt); -ppnum_t -pmap_find_phys(pmap_t pmap, addr64_t va) -{ - pt_entry_t *ptp; - vm_offset_t a32; - ppnum_t ppn; + PMAP_LOCK(map); + /* + * See if someone else expanded us first + */ + if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) { + PMAP_UNLOCK(map); + vm_object_unlock(map->pm_obj_pdpt); - if (value_64bit(va)) - panic("pmap_find_phys 64 bit value"); - a32 = (vm_offset_t) low32(va); - ptp = pmap_pte(pmap, a32); - if (PT_ENTRY_NULL == ptp) { - ppn = 0; - } else { - ppn = (ppnum_t) i386_btop(pte_to_pa(*ptp)); + VM_PAGE_FREE(m); + + OSAddAtomic(-1, &inuse_ptepages_count); + return; } - return ppn; -} -/* - * Routine: pmap_extract - * Function: - * Extract the physical page address associated - * with the given map/virtual_address pair. - * Change to shim for backwards compatibility but will not - * work for 64 bit systems. Some old drivers that we cannot - * change need this. - */ +#if 0 /* DEBUG */ + if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) { + panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj_pdpt, vaddr, i); + } +#endif + vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i); + vm_object_unlock(map->pm_obj_pdpt); -vm_offset_t -pmap_extract( - register pmap_t pmap, - vm_offset_t va) -{ - ppnum_t ppn; - vm_offset_t vaddr; + /* + * Set the page directory entry for this page table. + */ + pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */ + + pmap_store_pte(pdptp, pa_to_pte(pa) + | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE); + + PMAP_UNLOCK(map); + + return; - vaddr = (vm_offset_t)0; - ppn = pmap_find_phys(pmap, (addr64_t)va); - if (ppn) { - vaddr = ((vm_offset_t)i386_ptob(ppn)) | (va & INTEL_OFFMASK); - } - return (vaddr); } + /* * Routine: pmap_expand * @@ -2160,23 +1979,33 @@ pmap_extract( */ void pmap_expand( - register pmap_t map, - register vm_offset_t v) + pmap_t map, + vm_map_offset_t vaddr) { pt_entry_t *pdp; register vm_page_t m; register pmap_paddr_t pa; - register int i; + uint64_t i; spl_t spl; ppnum_t pn; - if (map == kernel_pmap) { - pmap_growkernel(v); - return; + /* + * if not the kernel map (while we are still compat kernel mode) + * and we are 64 bit, propagate expand upwards + */ + + if (cpu_64bit && (map != kernel_pmap)) { + spl = splhigh(); + while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) { + splx(spl); + pmap_expand_pdpt(map, vaddr); /* need room for another pde entry */ + spl = splhigh(); + } + splx(spl); } /* - * Allocate a VM page for the level 2 page table entries. + * Allocate a VM page for the pde entries. */ while ((m = vm_page_grab()) == VM_PAGE_NULL) VM_PAGE_WAIT(); @@ -2187,72 +2016,65 @@ pmap_expand( */ pn = m->phys_page; pa = i386_ptob(pn); - i = pdenum(map, v); - vm_object_lock(map->pm_obj); - vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i); - vm_page_lock_queues(); - vm_page_wire(m); - inuse_ptepages_count++; - vm_object_unlock(map->pm_obj); - vm_page_unlock_queues(); + i = pdeidx(map, vaddr); /* * Zero the page. */ pmap_zero_page(pn); - PMAP_READ_LOCK(map, spl); + vm_page_lockspin_queues(); + vm_page_wire(m); + vm_page_unlock_queues(); + + OSAddAtomic(1, &inuse_ptepages_count); + + /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ + vm_object_lock(map->pm_obj); + + PMAP_LOCK(map); /* * See if someone else expanded us first */ - if (pmap_pte(map, v) != PT_ENTRY_NULL) { - PMAP_READ_UNLOCK(map, spl); - vm_object_lock(map->pm_obj); - vm_page_lock_queues(); - vm_page_free(m); - inuse_ptepages_count--; - vm_page_unlock_queues(); + + if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) { + PMAP_UNLOCK(map); vm_object_unlock(map->pm_obj); + + VM_PAGE_FREE(m); + + OSAddAtomic(-1, &inuse_ptepages_count); return; } +#if 0 /* DEBUG */ + if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) { + panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj, vaddr, i); + } +#endif + vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i); + vm_object_unlock(map->pm_obj); + + /* + * refetch while locked + */ + + pdp = pmap_pde(map, vaddr); + /* * Set the page directory entry for this page table. - * If we have allocated more than one hardware page, - * set several page directory entries. */ + pmap_store_pte(pdp, pa_to_pte(pa) + | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE); - pdp = &map->dirbase[pdenum(map, v)]; - *pdp = pa_to_pte(pa) - | INTEL_PTE_VALID - | INTEL_PTE_USER - | INTEL_PTE_WRITE; + PMAP_UNLOCK(map); - PMAP_READ_UNLOCK(map, spl); return; } -/* - * Copy the range specified by src_addr/len - * from the source map to the range dst_addr/len - * in the destination map. - * - * This routine is only advisory and need not do anything. - */ -#if 0 -void -pmap_copy( - pmap_t dst_pmap, - pmap_t src_pmap, - vm_offset_t dst_addr, - vm_size_t len, - vm_offset_t src_addr) -{ -#ifdef lint - dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++; -#endif /* lint */ -} -#endif/* 0 */ /* * pmap_sync_page_data_phys(ppnum_t pa) @@ -2278,6 +2100,10 @@ pmap_sync_page_attributes_phys(ppnum_t pa) cache_flush_page_phys(pa); } + + +#ifdef CURRENTLY_UNUSED_AND_UNTESTED + int collect_ref; int collect_unref; @@ -2299,7 +2125,6 @@ pmap_collect( register pt_entry_t *pdp, *ptp; pt_entry_t *eptp; int wired; - spl_t spl; if (p == PMAP_NULL) return; @@ -2310,7 +2135,7 @@ pmap_collect( /* * Garbage collect map. */ - PMAP_READ_LOCK(p, spl); + PMAP_LOCK(p); for (pdp = (pt_entry_t *)p->dirbase; pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]; @@ -2318,7 +2143,7 @@ pmap_collect( { if (*pdp & INTEL_PTE_VALID) { if(*pdp & INTEL_PTE_REF) { - *pdp &= ~INTEL_PTE_REF; + pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF); collect_ref++; } else { collect_unref++; @@ -2351,9 +2176,9 @@ pmap_collect( /* * Invalidate the page directory pointer. */ - *pdp = 0x0; + pmap_store_pte(pdp, 0x0); - PMAP_READ_UNLOCK(p, spl); + PMAP_UNLOCK(p); /* * And free the pte page itself. @@ -2362,44 +2187,34 @@ pmap_collect( register vm_page_t m; vm_object_lock(p->pm_obj); + m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0])); if (m == VM_PAGE_NULL) panic("pmap_collect: pte page not in object"); - vm_page_lock_queues(); - vm_page_free(m); - inuse_ptepages_count--; - vm_page_unlock_queues(); + + VM_PAGE_FREE(m); + + OSAddAtomic(-1, &inuse_ptepages_count); + vm_object_unlock(p->pm_obj); } - PMAP_READ_LOCK(p, spl); + PMAP_LOCK(p); } } } } - PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS); - PMAP_READ_UNLOCK(p, spl); + + PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL); + PMAP_UNLOCK(p); return; } +#endif -/* - * Routine: pmap_kernel - * Function: - * Returns the physical map handle for the kernel. - */ -#if 0 -pmap_t -pmap_kernel(void) -{ - return (kernel_pmap); -} -#endif/* 0 */ void -pmap_copy_page(src, dst) - ppnum_t src; - ppnum_t dst; +pmap_copy_page(ppnum_t src, ppnum_t dst) { bcopy_phys((addr64_t)i386_ptob(src), (addr64_t)i386_ptob(dst), @@ -2424,8 +2239,8 @@ pmap_copy_page(src, dst) void pmap_pageable( __unused pmap_t pmap, - __unused vm_offset_t start_addr, - __unused vm_offset_t end_addr, + __unused vm_map_offset_t start_addr, + __unused vm_map_offset_t end_addr, __unused boolean_t pageable) { #ifdef lint @@ -2438,35 +2253,37 @@ pmap_pageable( */ void phys_attribute_clear( - ppnum_t pn, + ppnum_t pn, int bits) { - pv_entry_t pv_h; - register pv_entry_t pv_e; + pv_rooted_entry_t pv_h; + register pv_hashed_entry_t pv_e; register pt_entry_t *pte; int pai; register pmap_t pmap; - spl_t spl; - pmap_paddr_t phys; + pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (!valid_page(pn)) { + if (pn == vm_page_guard_addr) + return; + + pai = ppn_to_pai(pn); + + if (!managed_page(pai)) { /* * Not a managed page. */ return; } - /* - * Lock the pmap system first, since we will be changing - * several pmaps. - */ - PMAP_WRITE_LOCK(spl); - phys = i386_ptob(pn); - pai = pa_index(phys); + PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START, + (int) pn, bits, 0, 0, 0); + pv_h = pai_to_pvh(pai); + LOCK_PVH(pai); + /* * Walk down PV list, clearing all modify or reference bits. * We do not have to lock the pv_list because we have @@ -2476,86 +2293,94 @@ phys_attribute_clear( /* * There are some mappings. */ - for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) { + pv_e = (pv_hashed_entry_t)pv_h; + + do { pmap = pv_e->pmap; - /* - * Lock the pmap to block pmap_extract and similar routines. - */ - simple_lock(&pmap->lock); { - register vm_offset_t va; + vm_map_offset_t va; va = pv_e->va; - pte = pmap_pte(pmap, va); -#if 0 /* - * Consistency checks. + * Clear modify and/or reference bits. */ - assert(*pte & INTEL_PTE_VALID); - /* assert(pte_to_phys(*pte) == phys); */ -#endif - /* - * Clear modify or reference bits. - */ - - *pte++ &= ~bits; - PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); + pte = pmap_pte(pmap, va); + pmap_update_pte(pte, *pte, (*pte & ~bits)); + /* Ensure all processors using this translation + * invalidate this TLB entry. The invalidation *must* follow + * the PTE update, to ensure that the TLB shadow of the + * 'D' bit (in particular) is synchronized with the + * updated PTE. + */ + PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); } - simple_unlock(&pmap->lock); - } - } + pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink); + } while (pv_e != (pv_hashed_entry_t)pv_h); + } pmap_phys_attributes[pai] &= ~bits; - PMAP_WRITE_UNLOCK(spl); + UNLOCK_PVH(pai); + + PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END, + 0, 0, 0, 0, 0); + } /* * Check specified attribute bits. */ -boolean_t +int phys_attribute_test( - ppnum_t pn, + ppnum_t pn, int bits) { - pv_entry_t pv_h; - register pv_entry_t pv_e; + pv_rooted_entry_t pv_h; + register pv_hashed_entry_t pv_e; register pt_entry_t *pte; int pai; register pmap_t pmap; - spl_t spl; - pmap_paddr_t phys; + int attributes = 0; + pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (!valid_page(pn)) { + if (pn == vm_page_guard_addr) + return 0; + + pai = ppn_to_pai(pn); + + if (!managed_page(pai)) { /* * Not a managed page. */ - return (FALSE); + return (0); } /* - * Lock the pmap system first, since we will be checking - * several pmaps. + * super fast check... if bits already collected + * no need to take any locks... + * if not set, we need to recheck after taking + * the lock in case they got pulled in while + * we were waiting for the lock */ + if ( (pmap_phys_attributes[pai] & bits) == bits) + return (bits); - PMAP_WRITE_LOCK(spl); - phys = i386_ptob(pn); - pai = pa_index(phys); pv_h = pai_to_pvh(pai); - if (pmap_phys_attributes[pai] & bits) { - PMAP_WRITE_UNLOCK(spl); - return (TRUE); - } + LOCK_PVH(pai); + + attributes = pmap_phys_attributes[pai] & bits; + /* - * Walk down PV list, checking all mappings. + * Walk down PV list, checking the mappings until we + * reach the end or we've found the attributes we've asked for * We do not have to lock the pv_list because we have * the entire pmap system locked. */ @@ -2563,44 +2388,36 @@ phys_attribute_test( /* * There are some mappings. */ - for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) { + pv_e = (pv_hashed_entry_t)pv_h; + if (attributes != bits) do { - pmap = pv_e->pmap; - /* - * Lock the pmap to block pmap_extract and similar routines. - */ - simple_lock(&pmap->lock); + pmap = pv_e->pmap; { - register vm_offset_t va; + vm_map_offset_t va; va = pv_e->va; - pte = pmap_pte(pmap, va); + /* + * first make sure any processor actively + * using this pmap, flushes its TLB state + */ + PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE); -#if 0 /* - * Consistency checks. + * pick up modify and/or reference bits from this mapping */ - assert(*pte & INTEL_PTE_VALID); - /* assert(pte_to_phys(*pte) == phys); */ -#endif - } + pte = pmap_pte(pmap, va); + attributes |= (int)(*pte & bits); - /* - * Check modify or reference bits. - */ - { - if (*pte++ & bits) { - simple_unlock(&pmap->lock); - PMAP_WRITE_UNLOCK(spl); - return (TRUE); - } } - simple_unlock(&pmap->lock); - } + + pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink); + + } while ((attributes != bits) && (pv_e != (pv_hashed_entry_t)pv_h)); } - PMAP_WRITE_UNLOCK(spl); - return (FALSE); + + UNLOCK_PVH(pai); + return (attributes); } /* @@ -2608,29 +2425,30 @@ phys_attribute_test( */ void phys_attribute_set( - ppnum_t pn, + ppnum_t pn, int bits) { - int spl; - pmap_paddr_t phys; + int pai; + pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (!valid_page(pn)) { + if (pn == vm_page_guard_addr) + return; + + pai = ppn_to_pai(pn); + + if (!managed_page(pai)) { /* * Not a managed page. */ return; } - /* - * Lock the pmap system and set the requested bits in - * the phys attributes array. Don't need to bother with - * ptes because the test routine looks here first. - */ - phys = i386_ptob(pn); - PMAP_WRITE_LOCK(spl); - pmap_phys_attributes[pa_index(phys)] |= bits; - PMAP_WRITE_UNLOCK(spl); + LOCK_PVH(pai); + + pmap_phys_attributes[pai] |= bits; + + UNLOCK_PVH(pai); } /* @@ -2665,7 +2483,10 @@ boolean_t pmap_is_modified( ppnum_t pn) { - return (phys_attribute_test(pn, PHYS_MODIFIED)); + if (phys_attribute_test(pn, PHYS_MODIFIED)) + return TRUE; + + return FALSE; } /* @@ -2698,7 +2519,10 @@ boolean_t pmap_is_referenced( ppnum_t pn) { - return (phys_attribute_test(pn, PHYS_REFERENCED)); + if (phys_attribute_test(pn, PHYS_REFERENCED)) + return TRUE; + + return FALSE; } /* @@ -2709,8 +2533,17 @@ pmap_is_referenced( unsigned int pmap_get_refmod(ppnum_t pa) { - return ( ((phys_attribute_test(pa, PHYS_MODIFIED))? VM_MEM_MODIFIED : 0) - | ((phys_attribute_test(pa, PHYS_REFERENCED))? VM_MEM_REFERENCED : 0)); + int refmod; + unsigned int retval = 0; + + refmod = phys_attribute_test(pa, PHYS_MODIFIED | PHYS_REFERENCED); + + if (refmod & PHYS_MODIFIED) + retval |= VM_MEM_MODIFIED; + if (refmod & PHYS_REFERENCED) + retval |= VM_MEM_REFERENCED; + + return (retval); } /* @@ -2725,280 +2558,48 @@ pmap_clear_refmod(ppnum_t pa, unsigned int mask) x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0) | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0)); - phys_attribute_clear(pa, x86Mask); -} - -/* - * Set the modify bit on the specified range - * of this map as requested. - * - * This optimization stands only if each time the dirty bit - * in vm_page_t is tested, it is also tested in the pmap. - */ -void -pmap_modify_pages( - pmap_t map, - vm_offset_t s, - vm_offset_t e) -{ - spl_t spl; - register pt_entry_t *pde; - register pt_entry_t *spte, *epte; - vm_offset_t l; - vm_offset_t orig_s = s; - - if (map == PMAP_NULL) - return; - - PMAP_READ_LOCK(map, spl); - - pde = pmap_pde(map, s); - while (s && s < e) { - l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); - if (l > e) - l = e; - if (*pde & INTEL_PTE_VALID) { - spte = (pt_entry_t *)pmap_pte(map, (s & ~(PDE_MAPPED_SIZE-1))); - if (l) { - spte = &spte[ptenum(s)]; - epte = &spte[intel_btop(l-s)]; - } else { - epte = &spte[intel_btop(PDE_MAPPED_SIZE)]; - spte = &spte[ptenum(s)]; - } - while (spte < epte) { - if (*spte & INTEL_PTE_VALID) { - *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE); - } - spte++; - } - } - s = l; - pde++; - } - PMAP_UPDATE_TLBS(map, orig_s, e); - PMAP_READ_UNLOCK(map, spl); -} - - -void -invalidate_icache(__unused vm_offset_t addr, - __unused unsigned cnt, - __unused int phys) -{ - return; -} -void -flush_dcache(__unused vm_offset_t addr, - __unused unsigned count, - __unused int phys) -{ - return; -} - -/* -* TLB Coherence Code (TLB "shootdown" code) -* -* Threads that belong to the same task share the same address space and -* hence share a pmap. However, they may run on distinct cpus and thus -* have distinct TLBs that cache page table entries. In order to guarantee -* the TLBs are consistent, whenever a pmap is changed, all threads that -* are active in that pmap must have their TLB updated. To keep track of -* this information, the set of cpus that are currently using a pmap is -* maintained within each pmap structure (cpus_using). Pmap_activate() and -* pmap_deactivate add and remove, respectively, a cpu from this set. -* Since the TLBs are not addressable over the bus, each processor must -* flush its own TLB; a processor that needs to invalidate another TLB -* needs to interrupt the processor that owns that TLB to signal the -* update. -* -* Whenever a pmap is updated, the lock on that pmap is locked, and all -* cpus using the pmap are signaled to invalidate. All threads that need -* to activate a pmap must wait for the lock to clear to await any updates -* in progress before using the pmap. They must ACQUIRE the lock to add -* their cpu to the cpus_using set. An implicit assumption made -* throughout the TLB code is that all kernel code that runs at or higher -* than splvm blocks out update interrupts, and that such code does not -* touch pageable pages. -* -* A shootdown interrupt serves another function besides signaling a -* processor to invalidate. The interrupt routine (pmap_update_interrupt) -* waits for the both the pmap lock (and the kernel pmap lock) to clear, -* preventing user code from making implicit pmap updates while the -* sending processor is performing its update. (This could happen via a -* user data write reference that turns on the modify bit in the page -* table). It must wait for any kernel updates that may have started -* concurrently with a user pmap update because the IPC code -* changes mappings. -* Spinning on the VALUES of the locks is sufficient (rather than -* having to acquire the locks) because any updates that occur subsequent -* to finding the lock unlocked will be signaled via another interrupt. -* (This assumes the interrupt is cleared before the low level interrupt code -* calls pmap_update_interrupt()). -* -* The signaling processor must wait for any implicit updates in progress -* to terminate before continuing with its update. Thus it must wait for an -* acknowledgement of the interrupt from each processor for which such -* references could be made. For maintaining this information, a set -* cpus_active is used. A cpu is in this set if and only if it can -* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from -* this set; when all such cpus are removed, it is safe to update. -* -* Before attempting to acquire the update lock on a pmap, a cpu (A) must -* be at least at the priority of the interprocessor interrupt -* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a -* kernel update; it would spin forever in pmap_update_interrupt() trying -* to acquire the user pmap lock it had already acquired. Furthermore A -* must remove itself from cpus_active. Otherwise, another cpu holding -* the lock (B) could be in the process of sending an update signal to A, -* and thus be waiting for A to remove itself from cpus_active. If A is -* spinning on the lock at priority this will never happen and a deadlock -* will result. -*/ - -/* - * Signal another CPU that it must flush its TLB - */ -void -signal_cpus( - cpu_set use_list, - pmap_t pmap, - vm_offset_t start_addr, - vm_offset_t end_addr) -{ - register int which_cpu, j; - register pmap_update_list_t update_list_p; - - while ((which_cpu = ffs((unsigned long)use_list)) != 0) { - which_cpu -= 1; /* convert to 0 origin */ - - update_list_p = cpu_update_list(which_cpu); - simple_lock(&update_list_p->lock); - - j = update_list_p->count; - if (j >= UPDATE_LIST_SIZE) { - /* - * list overflowed. Change last item to - * indicate overflow. - */ - update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap; - update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS; - update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS; - } - else { - update_list_p->item[j].pmap = pmap; - update_list_p->item[j].start = start_addr; - update_list_p->item[j].end = end_addr; - update_list_p->count = j+1; - } - cpu_update_needed(which_cpu) = TRUE; - simple_unlock(&update_list_p->lock); - - /* if its the kernel pmap, ignore cpus_idle */ - if (((cpus_idle & (1 << which_cpu)) == 0) || - (pmap == kernel_pmap) || PMAP_REAL(which_cpu) == pmap) - { - i386_signal_cpu(which_cpu, MP_TLB_FLUSH, ASYNC); - } - use_list &= ~(1 << which_cpu); - } + phys_attribute_clear(pa, x86Mask); } -void -process_pmap_updates( - register pmap_t my_pmap) +void +invalidate_icache(__unused vm_offset_t addr, + __unused unsigned cnt, + __unused int phys) { - register int my_cpu; - register pmap_update_list_t update_list_p; - register int j; - register pmap_t pmap; - - mp_disable_preemption(); - my_cpu = cpu_number(); - update_list_p = cpu_update_list(my_cpu); - simple_lock(&update_list_p->lock); - - for (j = 0; j < update_list_p->count; j++) { - pmap = update_list_p->item[j].pmap; - if (pmap == my_pmap || - pmap == kernel_pmap) { - - if (pmap->ref_count <= 0) { - PMAP_CPU_CLR(pmap, my_cpu); - PMAP_REAL(my_cpu) = kernel_pmap; -#ifdef PAE - set_cr3((unsigned int)kernel_pmap->pm_ppdpt); -#else - set_cr3((unsigned int)kernel_pmap->pdirbase); -#endif - } else - INVALIDATE_TLB(pmap, - update_list_p->item[j].start, - update_list_p->item[j].end); - } - } - update_list_p->count = 0; - cpu_update_needed(my_cpu) = FALSE; - simple_unlock(&update_list_p->lock); - mp_enable_preemption(); + return; +} +void +flush_dcache(__unused vm_offset_t addr, + __unused unsigned count, + __unused int phys) +{ + return; } +#if CONFIG_DTRACE /* - * Interrupt routine for TBIA requested from other processor. - * This routine can also be called at all interrupts time if - * the cpu was idle. Some driver interrupt routines might access - * newly allocated vm. (This is the case for hd) + * Constrain DTrace copyin/copyout actions */ -void -pmap_update_interrupt(void) -{ - register int my_cpu; - spl_t s; - register pmap_t my_pmap; - - mp_disable_preemption(); - my_cpu = cpu_number(); - - /* - * Raise spl to splvm (above splip) to block out pmap_extract - * from IO code (which would put this cpu back in the active - * set). - */ - s = splhigh(); - - my_pmap = PMAP_REAL(my_cpu); - - if (!(my_pmap && pmap_in_use(my_pmap, my_cpu))) - my_pmap = kernel_pmap; - - do { - LOOP_VAR; - - /* - * Indicate that we're not using either user or kernel - * pmap. - */ - i_bit_clear(my_cpu, &cpus_active); - - /* - * Wait for any pmap updates in progress, on either user - * or kernel pmap. - */ - while (*(volatile int *)(&my_pmap->lock.interlock.lock_data) || - *(volatile int *)(&kernel_pmap->lock.interlock.lock_data)) { - LOOP_CHECK("pmap_update_interrupt", my_pmap); - cpu_pause(); - } +extern kern_return_t dtrace_copyio_preflight(addr64_t); +extern kern_return_t dtrace_copyio_postflight(addr64_t); - process_pmap_updates(my_pmap); - - i_bit_set(my_cpu, &cpus_active); +kern_return_t dtrace_copyio_preflight(__unused addr64_t va) +{ + thread_t thread = current_thread(); - } while (cpu_update_needed(my_cpu)); - - splx(s); - mp_enable_preemption(); + if (current_map() == kernel_map) + return KERN_FAILURE; + else if (thread->machine.specFlags & CopyIOActive) + return KERN_FAILURE; + else + return KERN_SUCCESS; +} + +kern_return_t dtrace_copyio_postflight(__unused addr64_t va) +{ + return KERN_SUCCESS; } +#endif /* CONFIG_DTRACE */ #if MACH_KDB @@ -3006,6 +2607,7 @@ pmap_update_interrupt(void) extern void db_show_page(pmap_paddr_t pa); +#if 0 void db_show_page(pmap_paddr_t pa) { @@ -3017,7 +2619,7 @@ db_show_page(pmap_paddr_t pa) pv_h = pai_to_pvh(pai); attr = pmap_phys_attributes[pai]; - printf("phys page %x ", pa); + printf("phys page %llx ", pa); if (attr & PHYS_MODIFIED) printf("modified, "); if (attr & PHYS_REFERENCED) @@ -3028,12 +2630,14 @@ db_show_page(pmap_paddr_t pa) printf(" not mapped\n"); for (; pv_h; pv_h = pv_h->next) if (pv_h->pmap) - printf("%x in pmap %x\n", pv_h->va, pv_h->pmap); + printf("%llx in pmap %p\n", pv_h->va, pv_h->pmap); } +#endif #endif /* MACH_KDB */ #if MACH_KDB +#if 0 void db_kvtophys(vm_offset_t); void db_show_vaddrs(pt_entry_t *); @@ -3044,7 +2648,7 @@ void db_kvtophys( vm_offset_t vaddr) { - db_printf("0x%x", kvtophys(vaddr)); + db_printf("0x%qx", kvtophys(vaddr)); } /* @@ -3055,7 +2659,7 @@ db_show_vaddrs( pt_entry_t *dirbase) { pt_entry_t *ptep, *pdep, tmp; - int x, y, pdecnt, ptecnt; + unsigned int x, y, pdecnt, ptecnt; if (dirbase == 0) { dirbase = kernel_pmap->dirbase; @@ -3064,7 +2668,7 @@ db_show_vaddrs( db_printf("need a dirbase...\n"); return; } - dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK); + dirbase = (pt_entry_t *) (int) ((unsigned long) dirbase & ~INTEL_OFFMASK); db_printf("dirbase: 0x%x\n", dirbase); @@ -3075,7 +2679,7 @@ db_show_vaddrs( continue; } pdecnt++; - ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK); + ptep = (pt_entry_t *) ((unsigned long)(*pdep) & ~INTEL_OFFMASK); db_printf("dir[%4d]: 0x%x\n", y, *pdep); for (x = 0; x < NPTEPG; x++, ptep++) { if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) { @@ -3093,6 +2697,7 @@ db_show_vaddrs( db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt); } +#endif #endif /* MACH_KDB */ #include @@ -3109,230 +2714,91 @@ pmap_list_resident_pages( } #endif /* MACH_VM_DEBUG */ -#ifdef MACH_BSD -/* - * pmap_pagemove - * - * BSD support routine to reassign virtual addresses. - */ - -void -pmap_movepage(unsigned long from, unsigned long to, vm_size_t size) -{ - spl_t spl; - pt_entry_t *pte, saved_pte; - - /* Lock the kernel map */ - PMAP_READ_LOCK(kernel_pmap, spl); - - - while (size > 0) { - pte = pmap_pte(kernel_pmap, from); - if (pte == NULL) - panic("pmap_pagemove from pte NULL"); - saved_pte = *pte; - PMAP_READ_UNLOCK(kernel_pmap, spl); - - pmap_enter(kernel_pmap, to, (ppnum_t)i386_btop(i386_trunc_page(*pte)), - VM_PROT_READ|VM_PROT_WRITE, 0, *pte & INTEL_PTE_WIRED); - - pmap_remove(kernel_pmap, (addr64_t)from, (addr64_t)(from+PAGE_SIZE)); - - PMAP_READ_LOCK(kernel_pmap, spl); - pte = pmap_pte(kernel_pmap, to); - if (pte == NULL) - panic("pmap_pagemove 'to' pte NULL"); - - *pte = saved_pte; - - from += PAGE_SIZE; - to += PAGE_SIZE; - size -= PAGE_SIZE; - } - - /* Get the processors to update the TLBs */ - PMAP_UPDATE_TLBS(kernel_pmap, from, from+size); - PMAP_UPDATE_TLBS(kernel_pmap, to, to+size); - PMAP_READ_UNLOCK(kernel_pmap, spl); - -} -#endif /* MACH_BSD */ /* temporary workaround */ boolean_t -coredumpok(vm_map_t map, vm_offset_t va) +coredumpok(__unused vm_map_t map, __unused vm_offset_t va) { +#if 0 pt_entry_t *ptep; ptep = pmap_pte(map->pmap, va); if (0 == ptep) return FALSE; return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)); -} - -/* - * grow the number of kernel page table entries, if needed - */ -void -pmap_growkernel(vm_offset_t addr) -{ -#if GROW_KERNEL_FUNCTION_IMPLEMENTED - struct pmap *pmap; - int s; - vm_offset_t ptppaddr; - ppnum_t ppn; - vm_page_t nkpg; - pd_entry_t newpdir = 0; - - /* - * Serialize. - * Losers return to try again until the winner completes the work. - */ - if (kptobj == 0) panic("growkernel 0"); - if (!vm_object_lock_try(kptobj)) { - return; - } - - vm_page_lock_queues(); - - s = splhigh(); - - /* - * If this is the first time thru, locate the end of the - * kernel page table entries and set nkpt to the current - * number of kernel page table pages - */ - - if (kernel_vm_end == 0) { - kernel_vm_end = KERNBASE; - nkpt = 0; - - while (pdir_pde(kernel_pmap->dirbase, kernel_vm_end)) { - kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); - nkpt++; - } - } - - /* - * Now allocate and map the required number of page tables - */ - addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); - while (kernel_vm_end < addr) { - if (pdir_pde(kernel_pmap->dirbase, kernel_vm_end)) { - kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1); - continue; /* someone already filled this one */ - } - - nkpg = vm_page_alloc(kptobj, nkpt); - if (!nkpg) - panic("pmap_growkernel: no memory to grow kernel"); - - nkpt++; - vm_page_wire(nkpg); - ppn = nkpg->phys_page; - pmap_zero_page(ppn); - ptppaddr = i386_ptob(ppn); - newpdir = (pd_entry_t) (ptppaddr | INTEL_PTE_VALID | - INTEL_PTE_RW | INTEL_PTE_REF | INTEL_PTE_MOD); - pdir_pde(kernel_pmap->dirbase, kernel_vm_end) = newpdir; - - simple_lock(&free_pmap_lock); - for (pmap = (struct pmap *)kernel_pmap->pmap_link.next; - pmap != kernel_pmap ; - pmap = (struct pmap *)pmap->pmap_link.next ) { - *pmap_pde(pmap, kernel_vm_end) = newpdir; - } - simple_unlock(&free_pmap_lock); - } - splx(s); - vm_page_unlock_queues(); - vm_object_unlock(kptobj); +#else + return TRUE; #endif } -pt_entry_t * -pmap_mapgetpte(vm_map_t map, vm_offset_t v) -{ - return pmap_pte(map->pmap, v); -} boolean_t phys_page_exists( ppnum_t pn) { - pmap_paddr_t phys; - assert(pn != vm_page_fictitious_addr); if (!pmap_initialized) return (TRUE); - phys = (pmap_paddr_t) i386_ptob(pn); - if (!pmap_valid_page(pn)) + + if (pn == vm_page_guard_addr) + return FALSE; + + if (!managed_page(ppn_to_pai(pn))) return (FALSE); return TRUE; } void -mapping_free_prime() +pmap_commpage32_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt) { - int i; - pv_entry_t pv_e; - - for (i = 0; i < (5 * PV_ALLOC_CHUNK); i++) { - pv_e = (pv_entry_t) zalloc(pv_list_zone); - PV_FREE(pv_e); + int i; + pt_entry_t *opte, *npte; + pt_entry_t pte; + spl_t s; + + for (i = 0; i < cnt; i++) { + s = splhigh(); + opte = pmap_pte(kernel_pmap, (vm_map_offset_t)kernel_commpage); + if (0 == opte) + panic("kernel_commpage"); + pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL; + pte &= ~INTEL_PTE_WRITE; // ensure read only + npte = pmap_pte(kernel_pmap, (vm_map_offset_t)user_commpage); + if (0 == npte) + panic("user_commpage"); + pmap_store_pte(npte, pte); + splx(s); + kernel_commpage += INTEL_PGBYTES; + user_commpage += INTEL_PGBYTES; } } -void -mapping_adjust() -{ - pv_entry_t pv_e; - int i; - int spl; - if (mapping_adjust_call == NULL) { - thread_call_setup(&mapping_adjust_call_data, - (thread_call_func_t) mapping_adjust, - (thread_call_param_t) NULL); - mapping_adjust_call = &mapping_adjust_call_data; - } - /* XXX rethink best way to do locking here */ - if (pv_free_count < PV_LOW_WATER_MARK) { - for (i = 0; i < PV_ALLOC_CHUNK; i++) { - pv_e = (pv_entry_t) zalloc(pv_list_zone); - SPLVM(spl); - PV_FREE(pv_e); - SPLX(spl); - } - } - mappingrecurse = 0; -} +#define PMAP_COMMPAGE64_CNT (_COMM_PAGE64_AREA_USED/PAGE_SIZE) +pt_entry_t pmap_commpage64_ptes[PMAP_COMMPAGE64_CNT]; void -pmap_commpage_init(vm_offset_t kernel_commpage, vm_offset_t user_commpage, int cnt) +pmap_commpage64_init(vm_offset_t kernel_commpage, __unused vm_map_offset_t user_commpage, int cnt) { - int i; - pt_entry_t *opte, *npte; - pt_entry_t pte; - - for (i = 0; i < cnt; i++) { - opte = pmap_pte(kernel_pmap, kernel_commpage); - if (0 == opte) panic("kernel_commpage"); - npte = pmap_pte(kernel_pmap, user_commpage); - if (0 == npte) panic("user_commpage"); - pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL; - pte &= ~INTEL_PTE_WRITE; // ensure read only - WRITE_PTE_FAST(npte, pte); - kernel_commpage += INTEL_PGBYTES; - user_commpage += INTEL_PGBYTES; - } + int i; + pt_entry_t *kptep; + + PMAP_LOCK(kernel_pmap); + + for (i = 0; i < cnt; i++) { + kptep = pmap_pte(kernel_pmap, (uint64_t)kernel_commpage + (i*PAGE_SIZE)); + if ((0 == kptep) || (0 == (*kptep & INTEL_PTE_VALID))) + panic("pmap_commpage64_init pte"); + pmap_commpage64_ptes[i] = ((*kptep & ~INTEL_PTE_WRITE) | INTEL_PTE_USER); + } + PMAP_UNLOCK(kernel_pmap); } + static cpu_pmap_t cpu_pmap_master; -static struct pmap_update_list cpu_update_list_master; struct cpu_pmap * pmap_cpu_alloc(boolean_t is_boot_cpu) @@ -3340,13 +2806,13 @@ pmap_cpu_alloc(boolean_t is_boot_cpu) int ret; int i; cpu_pmap_t *cp; - pmap_update_list_t up; vm_offset_t address; + vm_map_address_t mapaddr; vm_map_entry_t entry; + pt_entry_t *pte; if (is_boot_cpu) { cp = &cpu_pmap_master; - up = &cpu_update_list_master; } else { /* * The per-cpu pmap data structure itself. @@ -3360,57 +2826,387 @@ pmap_cpu_alloc(boolean_t is_boot_cpu) bzero((void *)cp, sizeof(cpu_pmap_t)); /* - * The tlb flush update list. + * The temporary windows used for copy/zero - see loose_ends.c */ - ret = kmem_alloc(kernel_map, - (vm_offset_t *) &up, sizeof(*up)); + ret = vm_map_find_space(kernel_map, + &mapaddr, PMAP_NWINDOWS*PAGE_SIZE, (vm_map_offset_t)0, 0, &entry); if (ret != KERN_SUCCESS) { - printf("pmap_cpu_alloc() failed ret=%d\n", ret); + printf("pmap_cpu_alloc() " + "vm_map_find_space ret=%d\n", ret); pmap_cpu_free(cp); return NULL; } + address = (vm_offset_t)mapaddr; + + for (i = 0; i < PMAP_NWINDOWS; i++, address += PAGE_SIZE) { + spl_t s; + s = splhigh(); + while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0) + pmap_expand(kernel_pmap, (vm_map_offset_t)address); + * (int *) pte = 0; + cp->mapwindow[i].prv_CADDR = (caddr_t) address; + cp->mapwindow[i].prv_CMAP = pte; + splx(s); + } + vm_map_unlock(kernel_map); + } + + cp->pdpt_window_index = PMAP_PDPT_FIRST_WINDOW; + cp->pde_window_index = PMAP_PDE_FIRST_WINDOW; + cp->pte_window_index = PMAP_PTE_FIRST_WINDOW; + + return cp; +} + +void +pmap_cpu_free(struct cpu_pmap *cp) +{ + if (cp != NULL && cp != &cpu_pmap_master) { + kfree((void *) cp, sizeof(cpu_pmap_t)); + } +} + + +mapwindow_t * +pmap_get_mapwindow(pt_entry_t pentry) +{ + mapwindow_t *mp; + int i; + + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + + /* + * Note: 0th map reserved for pmap_pte() + */ + for (i = PMAP_NWINDOWS_FIRSTFREE; i < PMAP_NWINDOWS; i++) { + mp = ¤t_cpu_datap()->cpu_pmap->mapwindow[i]; + + if (*mp->prv_CMAP == 0) { + pmap_store_pte(mp->prv_CMAP, pentry); + + invlpg((uintptr_t)mp->prv_CADDR); + + return (mp); + } + } + panic("pmap_get_mapwindow: no windows available"); + + return NULL; +} + + +void +pmap_put_mapwindow(mapwindow_t *mp) +{ + pmap_store_pte(mp->prv_CMAP, 0); +} + +void +pmap_switch(pmap_t tpmap) +{ + spl_t s; + + s = splhigh(); /* Make sure interruptions are disabled */ + + set_dirbase(tpmap, current_thread()); + + splx(s); +} + + +/* + * disable no-execute capability on + * the specified pmap + */ +void pmap_disable_NX(pmap_t pmap) { + + pmap->nx_enabled = 0; +} + +void +pt_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, + vm_size_t *alloc_size, int *collectable, int *exhaustable) +{ + *count = inuse_ptepages_count; + *cur_size = PAGE_SIZE * inuse_ptepages_count; + *max_size = PAGE_SIZE * (inuse_ptepages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count); + *elem_size = PAGE_SIZE; + *alloc_size = PAGE_SIZE; + + *collectable = 1; + *exhaustable = 0; +} + +vm_offset_t pmap_cpu_high_map_vaddr(int cpu, enum high_cpu_types e) +{ + enum high_fixed_addresses a; + a = e + HIGH_CPU_END * cpu; + return pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a); +} + +vm_offset_t pmap_high_map_vaddr(enum high_cpu_types e) +{ + return pmap_cpu_high_map_vaddr(cpu_number(), e); +} + +vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e) +{ + enum high_fixed_addresses a; + vm_offset_t vaddr; + + a = e + HIGH_CPU_END * cpu_number(); + vaddr = (vm_offset_t)pmap_index_to_virt(HIGH_FIXED_CPUS_BEGIN + a); + pmap_store_pte(pte_unique_base + a, pte); + + /* TLB flush for this page for this cpu */ + invlpg((uintptr_t)vaddr); + + return vaddr; +} + +static inline void +pmap_cpuset_NMIPI(cpu_set cpu_mask) { + unsigned int cpu, cpu_bit; + uint64_t deadline; + + for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { + if (cpu_mask & cpu_bit) + cpu_NMI_interrupt(cpu); + } + deadline = mach_absolute_time() + (LockTimeOut); + while (mach_absolute_time() < deadline) + cpu_pause(); +} + +/* + * Called with pmap locked, we: + * - scan through per-cpu data to see which other cpus need to flush + * - send an IPI to each non-idle cpu to be flushed + * - wait for all to signal back that they are inactive or we see that + * they are in an interrupt handler or at a safe point + * - flush the local tlb is active for this pmap + * - return ... the caller will unlock the pmap + */ +void +pmap_flush_tlbs(pmap_t pmap) +{ + unsigned int cpu; + unsigned int cpu_bit; + cpu_set cpus_to_signal; + unsigned int my_cpu = cpu_number(); + pmap_paddr_t pmap_cr3 = pmap->pm_cr3; + boolean_t flush_self = FALSE; + uint64_t deadline; + + assert((processor_avail_count < 2) || + (ml_get_interrupts_enabled() && get_preemption_level() != 0)); + + /* + * Scan other cpus for matching active or task CR3. + * For idle cpus (with no active map) we mark them invalid but + * don't signal -- they'll check as they go busy. + * Note: for the kernel pmap we look for 64-bit shared address maps. + */ + cpus_to_signal = 0; + for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { + if (!cpu_datap(cpu)->cpu_running) + continue; + if ((cpu_datap(cpu)->cpu_task_cr3 == pmap_cr3) || + (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) || + (pmap->pm_shared) || + ((pmap == kernel_pmap) && + (!CPU_CR3_IS_ACTIVE(cpu) || + cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) { + if (cpu == my_cpu) { + flush_self = TRUE; + continue; + } + cpu_datap(cpu)->cpu_tlb_invalid = TRUE; + __asm__ volatile("mfence"); + + if (CPU_CR3_IS_ACTIVE(cpu)) { + cpus_to_signal |= cpu_bit; + i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC); + } + } + } + + PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START, + (int) pmap, cpus_to_signal, flush_self, 0, 0); + if (cpus_to_signal) { + cpu_set cpus_to_respond = cpus_to_signal; + + deadline = mach_absolute_time() + LockTimeOut; /* - * The temporary windows used for copy/zero - see loose_ends.c + * Wait for those other cpus to acknowledge */ - for (i = 0; i < PMAP_NWINDOWS; i++) { - ret = vm_map_find_space(kernel_map, - &address, PAGE_SIZE, 0, &entry); - if (ret != KERN_SUCCESS) { - printf("pmap_cpu_alloc() " - "vm_map_find_space ret=%d\n", ret); - pmap_cpu_free(cp); - return NULL; + while (cpus_to_respond != 0) { + if (mach_absolute_time() > deadline) { + if (mp_recent_debugger_activity()) + continue; + if (!panic_active()) { + pmap_tlb_flush_timeout = TRUE; + pmap_cpuset_NMIPI(cpus_to_respond); + } + panic("pmap_flush_tlbs() timeout: " + "cpu(s) failing to respond to interrupts, pmap=%p cpus_to_respond=0x%lx", + pmap, cpus_to_respond); } - vm_map_unlock(kernel_map); - - cp->mapwindow[i].prv_CADDR = (caddr_t) address; - cp->mapwindow[i].prv_CMAP = vtopte(address); - * (int *) cp->mapwindow[i].prv_CMAP = 0; - kprintf("pmap_cpu_alloc() " - "window=%d CADDR=0x%x CMAP=0x%x\n", - i, address, vtopte(address)); + for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { + if ((cpus_to_respond & cpu_bit) != 0) { + if (!cpu_datap(cpu)->cpu_running || + cpu_datap(cpu)->cpu_tlb_invalid == FALSE || + !CPU_CR3_IS_ACTIVE(cpu)) { + cpus_to_respond &= ~cpu_bit; + } + cpu_pause(); + } + if (cpus_to_respond == 0) + break; + } } } - /* - * Set up the pmap request list + * Flush local tlb if required. + * We need this flush even if the pmap being changed + * is the user map... in case we do a copyin/out + * before returning to user mode. */ - cp->update_list = up; - simple_lock_init(&up->lock, 0); - up->count = 0; + if (flush_self) + flush_tlb(); - return cp; + if ((pmap == kernel_pmap) && (flush_self != TRUE)) { + panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map); + } + + PMAP_TRACE(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END, + (int) pmap, cpus_to_signal, flush_self, 0, 0); } void -pmap_cpu_free(struct cpu_pmap *cp) +process_pmap_updates(void) { - if (cp != NULL && cp != &cpu_pmap_master) { - if (cp->update_list != NULL) - kfree((void *) cp->update_list, - sizeof(*cp->update_list)); - kfree((void *) cp, sizeof(cpu_pmap_t)); + assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); + + flush_tlb(); + + current_cpu_datap()->cpu_tlb_invalid = FALSE; + __asm__ volatile("mfence"); +} + +void +pmap_update_interrupt(void) +{ + PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START, + 0, 0, 0, 0, 0); + + process_pmap_updates(); + + PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END, + 0, 0, 0, 0, 0); +} + + +unsigned int pmap_cache_attributes(ppnum_t pn) { + + if (!managed_page(ppn_to_pai(pn))) + return (VM_WIMG_IO); + + return (VM_WIMG_COPYBACK); +} + +#ifdef PMAP_DEBUG +void +pmap_dump(pmap_t p) +{ + int i; + + kprintf("pmap 0x%x\n",p); + + kprintf(" pm_cr3 0x%llx\n",p->pm_cr3); + kprintf(" pm_pml4 0x%x\n",p->pm_pml4); + kprintf(" pm_pdpt 0x%x\n",p->pm_pdpt); + + kprintf(" pml4[0] 0x%llx\n",*p->pm_pml4); + for (i=0;i<8;i++) + kprintf(" pdpt[%d] 0x%llx\n",i, p->pm_pdpt[i]); +} + +void pmap_dump_wrap(void) +{ + pmap_dump(current_cpu_datap()->cpu_active_thread->task->map->pmap); +} + +void +dump_4GB_pdpt(pmap_t p) +{ + int spl; + pdpt_entry_t *user_pdptp; + pdpt_entry_t *kern_pdptp; + pdpt_entry_t *pml4p; + + spl = splhigh(); + while ((user_pdptp = pmap64_pdpt(p, 0x0)) == PDPT_ENTRY_NULL) { + splx(spl); + pmap_expand_pml4(p, 0x0); + spl = splhigh(); } + kern_pdptp = kernel_pmap->pm_pdpt; + if (kern_pdptp == NULL) + panic("kern_pdptp == NULL"); + kprintf("dump_4GB_pdpt(%p)\n" + "kern_pdptp=%p (phys=0x%016llx)\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "user_pdptp=%p (phys=0x%016llx)\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n", + p, kern_pdptp, kvtophys(kern_pdptp), + kern_pdptp+0, *(kern_pdptp+0), + kern_pdptp+1, *(kern_pdptp+1), + kern_pdptp+2, *(kern_pdptp+2), + kern_pdptp+3, *(kern_pdptp+3), + kern_pdptp+4, *(kern_pdptp+4), + user_pdptp, kvtophys(user_pdptp), + user_pdptp+0, *(user_pdptp+0), + user_pdptp+1, *(user_pdptp+1), + user_pdptp+2, *(user_pdptp+2), + user_pdptp+3, *(user_pdptp+3), + user_pdptp+4, *(user_pdptp+4)); + kprintf("user pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n", + p->pm_cr3, p->pm_hold, p->pm_pml4); + pml4p = (pdpt_entry_t *)p->pm_hold; + if (pml4p == NULL) + panic("user pml4p == NULL"); + kprintf("\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n", + pml4p+0, *(pml4p), + pml4p+KERNEL_UBER_PML4_INDEX, *(pml4p+KERNEL_UBER_PML4_INDEX)); + kprintf("kern pm_cr3=0x%016llx pm_hold=0x%08x pm_pml4=0x%08x\n", + kernel_pmap->pm_cr3, kernel_pmap->pm_hold, kernel_pmap->pm_pml4); + pml4p = (pdpt_entry_t *)kernel_pmap->pm_hold; + if (pml4p == NULL) + panic("kern pml4p == NULL"); + kprintf("\t 0x%08x: 0x%016llx\n" + "\t 0x%08x: 0x%016llx\n", + pml4p+0, *(pml4p), + pml4p+511, *(pml4p+511)); + splx(spl); +} + +void dump_4GB_pdpt_thread(thread_t tp) +{ + dump_4GB_pdpt(tp->map->pmap); } + + +#endif +