-void
-pmap_reusable(
- pmap_t pmap,
- addr64_t s64,
- addr64_t e64,
- boolean_t reusable)
-{
- pt_entry_t *pde;
- pt_entry_t *spte, *epte;
- addr64_t l64;
- uint64_t deadline;
-
- pmap_intr_assert();
-
- if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64)
- return;
-
- PMAP_TRACE(PMAP_CODE(PMAP__REUSABLE) | DBG_FUNC_START,
- pmap,
- (uint32_t) (s64 >> 32), s64,
- (uint32_t) (e64 >> 32), e64);
-
- PMAP_LOCK(pmap);
-
- deadline = rdtsc64() + max_preemption_latency_tsc;
-
- while (s64 < e64) {
- l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
- if (l64 > e64)
- l64 = e64;
- pde = pmap_pde(pmap, s64);
-
- if (pde && (*pde & INTEL_PTE_VALID)) {
- if (*pde & INTEL_PTE_PS) {
- /* superpage: not supported */
- } else {
- spte = pmap_pte(pmap,
- (s64 & ~(pde_mapped_size - 1)));
- spte = &spte[ptenum(s64)];
- epte = &spte[intel_btop(l64 - s64)];
- pmap_reusable_range(pmap, s64, spte, epte,
- reusable);
- }
- }
- s64 = l64;
-
- if (s64 < e64 && rdtsc64() >= deadline) {
- PMAP_UNLOCK(pmap);
- PMAP_LOCK(pmap);
- deadline = rdtsc64() + max_preemption_latency_tsc;
- }
- }
-
- PMAP_UNLOCK(pmap);
-
- PMAP_TRACE(PMAP_CODE(PMAP__REUSABLE) | DBG_FUNC_END,
- pmap, reusable, 0, 0, 0);
-}
-
-void
-pmap_reusable_range(
- pmap_t pmap,
- vm_map_offset_t start_vaddr,
- pt_entry_t *spte,
- pt_entry_t *epte,
- boolean_t reusable)
-{
- pt_entry_t *cpte;
- int num_external, num_internal, num_reusable;
- ppnum_t pai;
- pmap_paddr_t pa;
- vm_map_offset_t vaddr;
-
- num_external = 0;
- num_internal = 0;
- num_reusable = 0;
-
- for (cpte = spte, vaddr = start_vaddr;
- cpte < epte;
- cpte++, vaddr += PAGE_SIZE_64) {
-
- pa = pte_to_pa(*cpte);
- if (pa == 0)
- continue;
-
- pai = pa_index(pa);
-
- LOCK_PVH(pai);
-
- pa = pte_to_pa(*cpte);
- if (pa == 0) {
- UNLOCK_PVH(pai);
- continue;
- }
- if (reusable) {
- /* we want to set "reusable" */
- if (IS_REUSABLE_PAGE(pai)) {
- /* already reusable: no change */
- } else {
- pmap_phys_attributes[pai] |= PHYS_REUSABLE;
- /* one more "reusable" */
- num_reusable++;
- if (IS_INTERNAL_PAGE(pai)) {
- /* one less "internal" */
- num_internal--;
- } else {
- /* one less "external" */
- num_external--;
- }
- }
- } else {
- /* we want to clear "reusable" */
- if (IS_REUSABLE_PAGE(pai)) {
- pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
- /* one less "reusable" */
- num_reusable--;
- if (IS_INTERNAL_PAGE(pai)) {
- /* one more "internal" */
- num_internal++;
- } else {
- /* one more "external" */
- num_external++;
- }
- } else {
- /* already not reusable: no change */
- }
- }
-
- UNLOCK_PVH(pai);
-
- } /* for loop */
-
- /*
- * Update the counts
- */
- if (pmap != kernel_pmap) {
- if (num_external) {
- OSAddAtomic(num_external, &pmap->stats.external);
- PMAP_STATS_PEAK(pmap->stats.external);
- }
- assert(pmap->stats.external >= 0);
- if (num_internal) {
- OSAddAtomic(num_internal, &pmap->stats.internal);
- PMAP_STATS_PEAK(pmap->stats.internal);
- }
- assert(pmap->stats.internal >= 0);
- if (num_reusable) {
- OSAddAtomic(num_reusable, &pmap->stats.reusable);
- PMAP_STATS_PEAK(pmap->stats.reusable);
- }
- assert(pmap->stats.reusable >= 0);
- }
-
- return;
-}
-