]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/pmap.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap.c
index b6a6372e00124258b03f804f4b61ada5e493dcb9..7224204c91069e46c7e0570465dcb6cbdc6a2140 100644 (file)
@@ -1076,7 +1076,7 @@ pmap_bootstrap(
          ml_set_interrupts_enabled(istate);
 
        }
-       kernel_pmap->pm_hold = kernel_pmap->pm_pml4;
+       kernel_pmap->pm_hold = (vm_offset_t)kernel_pmap->pm_pml4;
 
        kprintf("Kernel virtual space from 0x%x to 0x%x.\n",
                        VADDR(KPTDI,0), virtual_end);
@@ -1267,7 +1267,6 @@ pmap_create(
        vm_size_t       size;
        pdpt_entry_t    *pdpt;
        pml4_entry_t    *pml4p;
-       vm_page_t       m;
        int template;
        pd_entry_t      *pdp;
        spl_t s;
@@ -1294,6 +1293,7 @@ pmap_create(
        p->nx_enabled = 1;
        p->pm_64bit = is_64bit;
        p->pm_kernel_cr3 = FALSE;
+       p->pm_shared = FALSE;
 
        if (!cpu_64bit) {
          /* legacy 32 bit setup */
@@ -1428,7 +1428,6 @@ pmap_clear_4GB_pagezero(pmap_t p)
 {
        int             spl;
        pdpt_entry_t    *user_pdptp;
-       uint32_t        cr3;
 
        if (!p->pm_kernel_cr3)
                return;
@@ -1591,24 +1590,29 @@ pmap_reference(
 static void
 pmap_remove_range(
        pmap_t                  pmap,
-       vm_map_offset_t         vaddr,
+       vm_map_offset_t         start_vaddr,
        pt_entry_t              *spte,
        pt_entry_t              *epte)
 {
        register pt_entry_t     *cpte;
-       int                     num_removed, num_unwired;
+       int                     num_removed, num_unwired, num_found;
        int                     pai;
        pmap_paddr_t            pa;
+       vm_map_offset_t         vaddr;
 
        num_removed = 0;
        num_unwired = 0;
+       num_found = 0;
 
-       for (cpte = spte; cpte < epte;
-            cpte++, vaddr += PAGE_SIZE) {
+       /* invalidate the PTEs first to "freeze" them */
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
 
            pa = pte_to_pa(*cpte);
            if (pa == 0)
                continue;
+           num_found++;
 
            if (iswired(*cpte))
                num_unwired++;
@@ -1619,28 +1623,46 @@ pmap_remove_range(
                 *      Outside range of managed physical memory.
                 *      Just remove the mappings.
                 */
-               register pt_entry_t     *lpte = cpte;
-
-               pmap_store_pte(lpte, 0);
+               pmap_store_pte(cpte, 0);
                continue;
            }
-           num_removed++;
+
+           /* invalidate the PTE */
+           pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
+       }
+
+       if (0 == num_found) {
+         /* nothing was changed, we're done */
+         goto update_counts;
+       }
+
+       /* propagate the invalidates to other CPUs */
+
+       PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
+
+       for (cpte = spte, vaddr = start_vaddr;
+            cpte < epte;
+            cpte++, vaddr += PAGE_SIZE_64) {
+
+           pa = pte_to_pa(*cpte);
+           if (pa == 0)
+               continue;
 
            pai = pa_index(pa);
+
            LOCK_PVH(pai);
 
+           num_removed++;
+
            /*
-            *  Get the modify and reference bits.
+            *  Get the modify and reference bits, then
+            *  nuke the entry in the page table
             */
-           {
-               register pt_entry_t     *lpte;
-
-               lpte = cpte;
-               pmap_phys_attributes[pai] |=
-                       *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-               pmap_store_pte(lpte, 0);
-
-           }
+           /* remember reference and change */
+           pmap_phys_attributes[pai] |=
+             (char)(*cpte & (PHYS_MODIFIED|PHYS_REFERENCED));
+           /* completely invalidate the PTE */
+           pmap_store_pte(cpte, 0);
 
            /*
             *  Remove the mapping from the pvlist for
@@ -1683,6 +1705,7 @@ pmap_remove_range(
            }
        }
 
+ update_counts:
        /*
         *      Update the counts
         */
@@ -1690,6 +1713,7 @@ pmap_remove_range(
        pmap->stats.resident_count -= num_removed;
        assert(pmap->stats.wired_count >= num_unwired);
        pmap->stats.wired_count -= num_unwired;
+       return;
 }
 
 /*
@@ -1847,7 +1871,7 @@ pmap_page_protect(
                                /*
                                 * Remove the mapping, collecting any modify bits.
                                 */
-                               pmap_store_pte(pte, *pte & ~INTEL_PTE_VALID);
+                               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
 
                                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
 
@@ -1879,8 +1903,7 @@ pmap_page_protect(
                                /*
                                 * Write-protect.
                                 */
-                               pmap_store_pte(pte, *pte & ~INTEL_PTE_WRITE);
-
+                               pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
                                PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
                                /*
                                 * Advance prev.
@@ -1941,6 +1964,7 @@ pmap_protect(
        vm_map_offset_t         orig_sva;
        spl_t           spl;
        boolean_t       set_NX;
+       int num_found = 0;
 
        if (map == PMAP_NULL)
                return;
@@ -1973,23 +1997,25 @@ pmap_protect(
                    if (*spte & INTEL_PTE_VALID) {
                      
                        if (prot & VM_PROT_WRITE)
-                           pmap_store_pte(spte, *spte | INTEL_PTE_WRITE);
+                         pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_WRITE));
                        else
-                           pmap_store_pte(spte, *spte & ~INTEL_PTE_WRITE);
+                         pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_WRITE));
 
                        if (set_NX == TRUE)
-                           pmap_store_pte(spte, *spte | INTEL_PTE_NX);
+                         pmap_update_pte(spte, *spte, (*spte | INTEL_PTE_NX));
                        else
-                           pmap_store_pte(spte, *spte & ~INTEL_PTE_NX);
+                         pmap_update_pte(spte, *spte, (*spte & ~INTEL_PTE_NX));
+
+                       num_found++;
 
                    }
                    spte++;
                }
            }
            sva = lva;
-           pde++;
        }
-       PMAP_UPDATE_TLBS(map, orig_sva, eva);
+       if (num_found)
+         PMAP_UPDATE_TLBS(map, orig_sva, eva);
 
        simple_unlock(&map->lock);
        SPLX(spl);
@@ -2047,6 +2073,7 @@ pmap_enter(
        pmap_paddr_t            pa = (pmap_paddr_t)i386_ptob(pn);
        boolean_t               need_tlbflush = FALSE;
        boolean_t               set_NX;
+       char                    oattr;
 
        XPR(0x80000000, "%x/%x: pmap_enter %x/%qx/%x\n",
            current_thread(),
@@ -2129,13 +2156,9 @@ pmap_enter(
                }
            }
 
-               if (*pte & INTEL_PTE_MOD)
-                   template |= INTEL_PTE_MOD;
-
-               pmap_store_pte(pte, template);
-               pte++;
-
-               need_tlbflush = TRUE;
+           /* store modified PTE and preserve RC bits */
+           pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
+           need_tlbflush = TRUE;
            goto Done;
        }
 
@@ -2166,6 +2189,15 @@ pmap_enter(
             *  to overwrite the old one.
             */
 
+         /* invalidate the PTE */
+         pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
+         /* propagate the invalidate everywhere */
+         PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
+         /* remember reference and change */
+         oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
+         /* completely invalidate the PTE */
+         pmap_store_pte(pte,0);
+
            if (valid_page(i386_btop(old_pa))) {
 
                pai = pa_index(old_pa);
@@ -2178,10 +2210,7 @@ pmap_enter(
                    pmap->stats.wired_count--;
                }
 
-                   pmap_phys_attributes[pai] |=
-                       *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-
-               pmap_store_pte(pte, 0);
+               pmap_phys_attributes[pai] |= oattr;
                /*
                 *      Remove the mapping from the pvlist for
                 *      this physical page.
@@ -2193,6 +2222,7 @@ pmap_enter(
                    if (pv_h->pmap == PMAP_NULL) {
                        panic("pmap_enter: null pv_list!");
                    }
+
                    if (pv_h->va == vaddr && pv_h->pmap == pmap) {
                        /*
                         * Header is the pv_entry.  Copy the next one
@@ -2475,7 +2505,7 @@ pmap_change_wiring(
             *  wiring down mapping
             */
            map->stats.wired_count++;
-           pmap_store_pte(pte, *pte | INTEL_PTE_WIRED);
+           pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
            pte++;
        }
        else if (!wired && iswired(*pte)) {
@@ -2484,7 +2514,7 @@ pmap_change_wiring(
             */
            assert(map->stats.wired_count >= 1);
            map->stats.wired_count--;
-           pmap_store_pte(pte, *pte & ~INTEL_PTE_WIRED);
+           pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
            pte++;
        }
 
@@ -3073,23 +3103,21 @@ phys_attribute_clear(
                    register vm_map_offset_t va;
 
                    va = pv_e->va;
-                   pte = pmap_pte(pmap, va);
 
-#if    0
                    /*
-                    * Consistency checks.
+                    * first make sure any processor actively
+                    * using this pmap fluses its TLB state
                     */
-                   assert(*pte & INTEL_PTE_VALID);
-                   /* assert(pte_to_phys(*pte) == phys); */
-#endif
+
+                   PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
 
                /*
                 * Clear modify or reference bits.
                 */
 
-                       pmap_store_pte(pte, *pte & ~bits);
-                       pte++;
-                       PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+                   pte = pmap_pte(pmap, va);
+                   pmap_update_pte(pte, *pte, (*pte & ~bits));
+
                }
                simple_unlock(&pmap->lock);
 
@@ -3795,6 +3823,8 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t n
        }
        if ((size >> 28) != 1) panic("pmap_nest: size 0x%llx must be 0x%x", size, NBPDE);
 
+       subord->pm_shared = TRUE;
+
        // prepopulate subord pmap pde's if necessary
 
        if (cpu_64bit) {
@@ -3997,7 +4027,8 @@ pmap_flush_tlbs(pmap_t    pmap)
                if (!cpu_datap(cpu)->cpu_running)
                        continue;
                if ((cpu_datap(cpu)->cpu_task_cr3   == pmap_cr3) ||
-                   (cpu_datap(cpu)->cpu_active_cr3 == pmap_cr3) ||
+                   (CPU_GET_ACTIVE_CR3(cpu) == pmap_cr3) ||
+                   (pmap->pm_shared) ||
                    ((pmap == kernel_pmap) &&
                     (!CPU_CR3_IS_ACTIVE(cpu) ||
                      cpu_datap(cpu)->cpu_task_map == TASK_MAP_64BIT_SHARED))) {