2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
32 #include <vm/vm_map.h>
33 #include <vm/vm_kern.h>
34 #include <kern/ledger.h>
35 #include <i386/pmap_internal.h>
37 void pmap_remove_range(
43 void pmap_remove_range_options(
50 void pmap_reusable_range(
57 uint32_t pmap_update_clear_pte_count
;
60 * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
61 * on a NBPDE boundary.
64 /* These symbols may be referenced directly by VM */
65 uint64_t pmap_nesting_size_min
= NBPDE
;
66 uint64_t pmap_nesting_size_max
= 0 - (uint64_t)NBPDE
;
69 * kern_return_t pmap_nest(grand, subord, va_start, size)
71 * grand = the pmap that we will nest subord into
72 * subord = the pmap that goes into the grand
73 * va_start = start of range in pmap to be inserted
74 * nstart = start of range in pmap nested pmap
75 * size = Size of nest area (up to 16TB)
77 * Inserts a pmap into another. This is used to implement shared segments.
79 * Note that we depend upon higher level VM locks to insure that things don't change while
80 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
81 * or do 2 nests at once.
85 * This routine can nest subtrees either at the PDPT level (1GiB) or at the
86 * PDE level (2MiB). We currently disallow disparate offsets for the "subord"
87 * container and the "grand" parent. A minor optimization to consider for the
88 * future: make the "subord" truly a container rather than a full-fledged
89 * pagetable hierarchy which can be unnecessarily sparse (DRK).
92 kern_return_t
pmap_nest(pmap_t grand
, pmap_t subord
, addr64_t va_start
, addr64_t nstart
, uint64_t size
) {
93 vm_map_offset_t vaddr
, nvaddr
;
94 pd_entry_t
*pde
,*npde
;
98 assert(!is_ept_pmap(grand
));
99 assert(!is_ept_pmap(subord
));
101 if ((size
& (pmap_nesting_size_min
-1)) ||
102 (va_start
& (pmap_nesting_size_min
-1)) ||
103 (nstart
& (pmap_nesting_size_min
-1)) ||
104 ((size
>> 28) > 65536)) /* Max size we can nest is 16TB */
105 return KERN_INVALID_VALUE
;
108 panic("pmap_nest: size is invalid - %016llX\n", size
);
111 if (va_start
!= nstart
)
112 panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start
, nstart
);
114 PMAP_TRACE(PMAP_CODE(PMAP__NEST
) | DBG_FUNC_START
,
115 (uintptr_t) grand
, (uintptr_t) subord
,
116 (uintptr_t) (va_start
>>32), (uintptr_t) va_start
, 0);
118 nvaddr
= (vm_map_offset_t
)nstart
;
119 num_pde
= size
>> PDESHIFT
;
123 subord
->pm_shared
= TRUE
;
125 for (i
= 0; i
< num_pde
;) {
126 if (((nvaddr
& PDPTMASK
) == 0) && (num_pde
- i
) >= NPDEPG
&& cpu_64bit
) {
128 npde
= pmap64_pdpt(subord
, nvaddr
);
130 while (0 == npde
|| ((*npde
& INTEL_PTE_VALID
) == 0)) {
132 pmap_expand_pdpt(subord
, nvaddr
, PMAP_EXPAND_OPTIONS_NONE
);
134 npde
= pmap64_pdpt(subord
, nvaddr
);
136 *npde
|= INTEL_PDPTE_NESTED
;
138 i
+= (uint32_t)NPDEPG
;
141 npde
= pmap_pde(subord
, nvaddr
);
143 while (0 == npde
|| ((*npde
& INTEL_PTE_VALID
) == 0)) {
145 pmap_expand(subord
, nvaddr
, PMAP_EXPAND_OPTIONS_NONE
);
147 npde
= pmap_pde(subord
, nvaddr
);
156 vaddr
= (vm_map_offset_t
)va_start
;
160 for (i
= 0;i
< num_pde
;) {
163 if (((vaddr
& PDPTMASK
) == 0) && ((num_pde
- i
) >= NPDEPG
) && cpu_64bit
) {
164 npde
= pmap64_pdpt(subord
, vaddr
);
166 panic("pmap_nest: no PDPT, subord %p nstart 0x%llx", subord
, vaddr
);
168 pde
= pmap64_pdpt(grand
, vaddr
);
171 pmap_expand_pml4(grand
, vaddr
, PMAP_EXPAND_OPTIONS_NONE
);
173 pde
= pmap64_pdpt(grand
, vaddr
);
176 panic("pmap_nest: no PDPT, grand %p vaddr 0x%llx", grand
, vaddr
);
177 pmap_store_pte(pde
, tpde
);
179 i
+= (uint32_t) NPDEPG
;
182 npde
= pmap_pde(subord
, vaddr
);
184 panic("pmap_nest: no npde, subord %p vaddr 0x%llx", subord
, vaddr
);
186 pde
= pmap_pde(grand
, vaddr
);
187 if ((0 == pde
) && cpu_64bit
) {
189 pmap_expand_pdpt(grand
, vaddr
, PMAP_EXPAND_OPTIONS_NONE
);
191 pde
= pmap_pde(grand
, vaddr
);
195 panic("pmap_nest: no pde, grand %p vaddr 0x%llx", grand
, vaddr
);
197 pmap_store_pte(pde
, tpde
);
204 PMAP_TRACE(PMAP_CODE(PMAP__NEST
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
210 * kern_return_t pmap_unnest(grand, vaddr)
212 * grand = the pmap that we will un-nest subord from
213 * vaddr = start of range in pmap to be unnested
215 * Removes a pmap from another. This is used to implement shared segments.
218 kern_return_t
pmap_unnest(pmap_t grand
, addr64_t vaddr
, uint64_t size
) {
223 addr64_t va_start
, va_end
;
224 uint64_t npdpt
= PMAP_INVALID_PDPTNUM
;
226 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST
) | DBG_FUNC_START
,
228 (uintptr_t) (vaddr
>>32), (uintptr_t) vaddr
, 0, 0);
230 if ((size
& (pmap_nesting_size_min
-1)) ||
231 (vaddr
& (pmap_nesting_size_min
-1))) {
232 panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
236 assert(!is_ept_pmap(grand
));
238 /* align everything to PDE boundaries */
239 va_start
= vaddr
& ~(NBPDE
-1);
240 va_end
= (vaddr
+ size
+ NBPDE
- 1) & ~(NBPDE
-1);
241 size
= va_end
- va_start
;
245 num_pde
= size
>> PDESHIFT
;
248 for (i
= 0; i
< num_pde
; ) {
249 if ((pdptnum(grand
, vaddr
) != npdpt
) && cpu_64bit
) {
250 npdpt
= pdptnum(grand
, vaddr
);
251 pde
= pmap64_pdpt(grand
, vaddr
);
252 if (pde
&& (*pde
& INTEL_PDPTE_NESTED
)) {
253 pmap_store_pte(pde
, (pd_entry_t
)0);
254 i
+= (uint32_t) NPDEPG
;
259 pde
= pmap_pde(grand
, (vm_map_offset_t
)vaddr
);
261 panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand
, vaddr
);
262 pmap_store_pte(pde
, (pd_entry_t
)0);
267 PMAP_UPDATE_TLBS(grand
, va_start
, va_end
);
271 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
280 __unused
uint64_t size
,
281 __unused
unsigned int options
) {
282 return pmap_unnest(grand
, vaddr
, size
);
285 /* Invoked by the Mach VM to determine the platform specific unnest region */
287 boolean_t
pmap_adjust_unnest_parameters(pmap_t p
, vm_map_offset_t
*s
, vm_map_offset_t
*e
) {
289 boolean_t rval
= FALSE
;
296 pdpte
= pmap64_pdpt(p
, *s
);
297 if (pdpte
&& (*pdpte
& INTEL_PDPTE_NESTED
)) {
302 pdpte
= pmap64_pdpt(p
, *e
);
303 if (pdpte
&& (*pdpte
& INTEL_PDPTE_NESTED
)) {
304 *e
= ((*e
+ NBPDPT
) & ~(NBPDPT
-1));
314 * pmap_find_phys returns the (4K) physical page number containing a
315 * given virtual address in a given pmap.
316 * Note that pmap_pte may return a pde if this virtual address is
317 * mapped by a large page and this is taken into account in order
318 * to return the correct page number in this case.
321 pmap_find_phys(pmap_t pmap
, addr64_t va
)
330 is_ept
= is_ept_pmap(pmap
);
332 mp_disable_preemption();
334 /* This refcount test is a band-aid--several infrastructural changes
335 * are necessary to eliminate invocation of this routine from arbitrary
339 if (!pmap
->ref_count
)
342 pdep
= pmap_pde(pmap
, va
);
344 if ((pdep
!= PD_ENTRY_NULL
) && ((pde
= *pdep
) & PTE_VALID_MASK(is_ept
))) {
346 ppn
= (ppnum_t
) i386_btop(pte_to_pa(pde
));
347 ppn
+= (ppnum_t
) ptenum(va
);
350 ptp
= pmap_pte(pmap
, va
);
351 if ((PT_ENTRY_NULL
!= ptp
) && (((pte
= *ptp
) & PTE_VALID_MASK(is_ept
)) != 0)) {
352 ppn
= (ppnum_t
) i386_btop(pte_to_pa(pte
));
357 mp_enable_preemption();
363 * Update cache attributes for all extant managed mappings.
364 * Assumes PV for this page is locked, and that the page
365 * is managed. We assume that this physical page may be mapped in
366 * both EPT and normal Intel PTEs, so we convert the attributes
367 * to the corresponding format for each pmap.
369 * We assert that the passed set of attributes is a subset of the
370 * PHYS_CACHEABILITY_MASK.
373 pmap_update_cache_attributes_locked(ppnum_t pn
, unsigned attributes
) {
374 pv_rooted_entry_t pv_h
, pv_e
;
375 pv_hashed_entry_t pvh_e
, nexth
;
376 vm_map_offset_t vaddr
;
380 unsigned ept_attributes
;
382 assert(IS_MANAGED_PAGE(pn
));
383 assert(((~PHYS_CACHEABILITY_MASK
) & attributes
) == 0);
385 /* We don't support the PTA bit for EPT PTEs */
386 if (attributes
& INTEL_PTE_NCACHE
)
387 ept_attributes
= INTEL_EPT_NCACHE
;
389 ept_attributes
= INTEL_EPT_WB
;
391 pv_h
= pai_to_pvh(pn
);
392 /* TODO: translate the PHYS_* bits to PTE bits, while they're
393 * currently identical, they may not remain so
394 * Potential optimization (here and in page_protect),
395 * parallel shootdowns, check for redundant
396 * attribute modifications.
400 * Alter attributes on all mappings
402 if (pv_h
->pmap
!= PMAP_NULL
) {
404 pvh_e
= (pv_hashed_entry_t
)pv_e
;
408 vaddr
= PVE_VA(pv_e
);
409 ptep
= pmap_pte(pmap
, vaddr
);
412 panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap
, pn
, vaddr
, kernel_pmap
);
414 is_ept
= is_ept_pmap(pmap
);
416 nexth
= (pv_hashed_entry_t
)queue_next(&pvh_e
->qlink
);
418 pmap_update_pte(ptep
, PHYS_CACHEABILITY_MASK
, attributes
);
420 pmap_update_pte(ptep
, INTEL_EPT_CACHE_MASK
, ept_attributes
);
422 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+ PAGE_SIZE
);
424 } while ((pv_e
= (pv_rooted_entry_t
)nexth
) != pv_h
);
428 void x86_filter_TLB_coherency_interrupts(boolean_t dofilter
) {
429 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
432 CPU_CR3_MARK_INACTIVE();
434 CPU_CR3_MARK_ACTIVE();
436 if (current_cpu_datap()->cpu_tlb_invalid
)
437 process_pmap_updates();
443 * Insert the given physical page (p) at
444 * the specified virtual address (v) in the
445 * target physical map with the protection requested.
447 * If specified, the page will be wired down, meaning
448 * that the related pte cannot be reclaimed.
450 * NB: This is the only routine which MAY NOT lazy-evaluate
451 * or lose information. That is, this routine must actually
452 * insert this page into the given map NOW.
458 vm_map_offset_t vaddr
,
461 vm_prot_t fault_type
,
465 (void) pmap_enter_options(pmap
, vaddr
, pn
, prot
, fault_type
, flags
, wired
, PMAP_EXPAND_OPTIONS_NONE
, NULL
);
472 vm_map_offset_t vaddr
,
475 __unused vm_prot_t fault_type
,
478 unsigned int options
,
482 pv_rooted_entry_t pv_h
;
484 pv_hashed_entry_t pvh_e
;
485 pv_hashed_entry_t pvh_new
;
488 pmap_paddr_t pa
= (pmap_paddr_t
) i386_ptob(pn
);
489 boolean_t need_tlbflush
= FALSE
;
492 boolean_t old_pa_locked
;
493 /* 2MiB mappings are confined to x86_64 by VM */
494 boolean_t superpage
= flags
& VM_MEM_SUPERPAGE
;
495 vm_object_t delpage_pm_obj
= NULL
;
496 uint64_t delpage_pde_index
= 0;
498 kern_return_t kr_expand
;
500 boolean_t is_altacct
;
504 if (pmap
== PMAP_NULL
)
505 return KERN_INVALID_ARGUMENT
;
507 is_ept
= is_ept_pmap(pmap
);
509 /* N.B. We can be supplied a zero page frame in the NOENTER case, it's an
510 * unused value for that scenario.
512 assert(pn
!= vm_page_fictitious_addr
);
514 if (pn
== vm_page_guard_addr
)
515 return KERN_INVALID_ARGUMENT
;
517 PMAP_TRACE(PMAP_CODE(PMAP__ENTER
) | DBG_FUNC_START
,
519 (uint32_t) (vaddr
>> 32), (uint32_t) vaddr
,
522 if ((prot
& VM_PROT_EXECUTE
) || !nx_enabled
|| !pmap
->nx_enabled
)
527 if (__improbable(set_NX
&& (pmap
== kernel_pmap
) && ((pmap_disable_kstack_nx
&& (flags
& VM_MEM_STACK
)) || (pmap_disable_kheap_nx
&& !(flags
& VM_MEM_STACK
))))) {
532 * Must allocate a new pvlist entry while we're unlocked;
533 * zalloc may cause pageout (which will lock the pmap system).
534 * If we determine we need a pvlist entry, we will unlock
535 * and allocate one. Then we will retry, throughing away
536 * the allocated entry later (if we no longer need it).
539 pvh_new
= PV_HASHED_ENTRY_NULL
;
541 pvh_e
= PV_HASHED_ENTRY_NULL
;
546 * Expand pmap to include this pte. Assume that
547 * pmap is always expanded to include enough hardware
548 * pages to map one VM page.
551 while ((pte
= pmap64_pde(pmap
, vaddr
)) == PD_ENTRY_NULL
) {
552 /* need room for another pde entry */
554 kr_expand
= pmap_expand_pdpt(pmap
, vaddr
, options
);
555 if (kr_expand
!= KERN_SUCCESS
)
560 while ((pte
= pmap_pte(pmap
, vaddr
)) == PT_ENTRY_NULL
) {
562 * Must unlock to expand the pmap
563 * going to grow pde level page(s)
566 kr_expand
= pmap_expand(pmap
, vaddr
, options
);
567 if (kr_expand
!= KERN_SUCCESS
)
572 if (options
& PMAP_EXPAND_OPTIONS_NOENTER
) {
577 if (superpage
&& *pte
&& !(*pte
& PTE_PS
)) {
579 * There is still an empty page table mapped that
580 * was used for a previous base page mapping.
581 * Remember the PDE and the PDE index, so that we
582 * can free the page at the end of this function.
584 delpage_pde_index
= pdeidx(pmap
, vaddr
);
585 delpage_pm_obj
= pmap
->pm_obj
;
589 old_pa
= pte_to_pa(*pte
);
590 pai
= pa_index(old_pa
);
591 old_pa_locked
= FALSE
;
594 PTE_IS_COMPRESSED(*pte
)) {
596 * "pmap" should be locked at this point, so this should
597 * not race with another pmap_enter() or pmap_remove_range().
599 assert(pmap
!= kernel_pmap
);
601 /* one less "compressed" */
602 OSAddAtomic64(-1, &pmap
->stats
.compressed
);
603 pmap_ledger_debit(pmap
, task_ledgers
.internal_compressed
,
605 if (*pte
& PTE_COMPRESSED_ALT
) {
608 task_ledgers
.alternate_accounting_compressed
,
611 /* was part of the footprint */
612 pmap_ledger_debit(pmap
, task_ledgers
.phys_footprint
,
615 /* marker will be cleared below */
619 * if we have a previous managed page, lock the pv entry now. after
620 * we lock it, check to see if someone beat us to the lock and if so
623 if ((0 != old_pa
) && IS_MANAGED_PAGE(pai
)) {
625 old_pa_locked
= TRUE
;
626 old_pa
= pte_to_pa(*pte
);
628 UNLOCK_PVH(pai
); /* another path beat us to it */
629 old_pa_locked
= FALSE
;
634 * Special case if the incoming physical page is already mapped
638 pt_entry_t old_attributes
=
639 *pte
& ~(PTE_REF(is_ept
) | PTE_MOD(is_ept
));
642 * May be changing its wired attribute or protection
645 template = pa_to_pte(pa
);
647 /* ?: WORTH ASSERTING THAT AT LEAST ONE RWX (implicit valid) PASSED FOR EPT? */
649 template |= INTEL_PTE_VALID
;
651 template |= INTEL_EPT_IPTA
;
654 template |= pmap_get_cache_attributes(pa_index(pa
), is_ept
);
657 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
659 if (!is_ept
&& (VM_MEM_NOT_CACHEABLE
==
660 (flags
& (VM_MEM_NOT_CACHEABLE
| VM_WIMG_USE_DEFAULT
)))) {
661 if (!(flags
& VM_MEM_GUARDED
))
662 template |= INTEL_PTE_PTA
;
663 template |= INTEL_PTE_NCACHE
;
665 if (pmap
!= kernel_pmap
&& !is_ept
)
666 template |= INTEL_PTE_USER
;
668 if (prot
& VM_PROT_READ
)
669 template |= PTE_READ(is_ept
);
671 if (prot
& VM_PROT_WRITE
) {
672 template |= PTE_WRITE(is_ept
);
673 if (is_ept
&& !pmap_ept_support_ad
) {
674 template |= PTE_MOD(is_ept
);
676 assert(IS_MANAGED_PAGE(pai
));
677 pmap_phys_attributes
[pai
] |= PHYS_MODIFIED
;
681 if (prot
& VM_PROT_EXECUTE
) {
683 template = pte_set_ex(template, is_ept
);
687 template = pte_remove_ex(template, is_ept
);
690 template |= PTE_WIRED
;
691 if (!iswired(old_attributes
)) {
692 OSAddAtomic(+1, &pmap
->stats
.wired_count
);
693 pmap_ledger_credit(pmap
, task_ledgers
.wired_mem
, PAGE_SIZE
);
696 if (iswired(old_attributes
)) {
697 assert(pmap
->stats
.wired_count
>= 1);
698 OSAddAtomic(-1, &pmap
->stats
.wired_count
);
699 pmap_ledger_debit(pmap
, task_ledgers
.wired_mem
, PAGE_SIZE
);
703 if (superpage
) /* this path can not be used */
704 template |= PTE_PS
; /* to change the page size! */
706 if (old_attributes
== template)
707 goto dont_update_pte
;
709 /* Determine delta, PV locked */
711 ((old_attributes
^ template) != PTE_WIRED
);
713 if (need_tlbflush
== TRUE
&& !(old_attributes
& PTE_WRITE(is_ept
))) {
714 if ((old_attributes
^ template) == PTE_WRITE(is_ept
))
715 need_tlbflush
= FALSE
;
718 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
719 if (is_ept
&& !pmap_ept_support_ad
) {
720 template |= PTE_REF(is_ept
);
722 assert(IS_MANAGED_PAGE(pai
));
723 pmap_phys_attributes
[pai
] |= PHYS_REFERENCED
;
727 /* store modified PTE and preserve RC bits */
728 pt_entry_t npte
, opte
;;
731 npte
= template | (opte
& (PTE_REF(is_ept
) | PTE_MOD(is_ept
)));
732 } while (!pmap_cmpx_pte(pte
, opte
, npte
));
736 old_pa_locked
= FALSE
;
742 * Outline of code from here:
743 * 1) If va was mapped, update TLBs, remove the mapping
744 * and remove old pvlist entry.
745 * 2) Add pvlist entry for new mapping
746 * 3) Enter new mapping.
748 * If the old physical page is not managed step 1) is skipped
749 * (except for updating the TLBs), and the mapping is
750 * overwritten at step 3). If the new physical page is not
751 * managed, step 2) is skipped.
754 if (old_pa
!= (pmap_paddr_t
) 0) {
755 boolean_t was_altacct
;
758 * Don't do anything to pages outside valid memory here.
759 * Instead convince the code that enters a new mapping
760 * to overwrite the old one.
763 /* invalidate the PTE */
764 pmap_update_pte(pte
, PTE_VALID_MASK(is_ept
), 0);
765 /* propagate invalidate everywhere */
766 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+ PAGE_SIZE
);
767 /* remember reference and change */
769 oattr
= (char) (old_pte
& (PTE_MOD(is_ept
) | PTE_REF(is_ept
)));
770 /* completely invalidate the PTE */
771 pmap_store_pte(pte
, 0);
773 if (IS_MANAGED_PAGE(pai
)) {
775 * Remove the mapping from the pvlist for
776 * this physical page.
777 * We'll end up with either a rooted pv or a
780 pvh_e
= pmap_pv_remove(pmap
, vaddr
, (ppnum_t
*) &pai
, &old_pte
, &was_altacct
);
783 if (IS_MANAGED_PAGE(pai
)) {
784 pmap_assert(old_pa_locked
== TRUE
);
785 pmap_ledger_debit(pmap
, task_ledgers
.phys_mem
, PAGE_SIZE
);
786 assert(pmap
->stats
.resident_count
>= 1);
787 OSAddAtomic(-1, &pmap
->stats
.resident_count
);
788 if (pmap
!= kernel_pmap
) {
789 /* update pmap stats */
790 if (IS_REUSABLE_PAGE(pai
)) {
792 (pmap
->stats
.reusable
> 0,
794 pmap
->stats
.reusable
));
795 OSAddAtomic(-1, &pmap
->stats
.reusable
);
796 } else if (IS_INTERNAL_PAGE(pai
)) {
798 (pmap
->stats
.internal
> 0,
800 pmap
->stats
.internal
));
801 OSAddAtomic(-1, &pmap
->stats
.internal
);
804 (pmap
->stats
.external
> 0,
806 pmap
->stats
.external
));
807 OSAddAtomic(-1, &pmap
->stats
.external
);
812 assert(IS_INTERNAL_PAGE(pai
));
813 pmap_ledger_debit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
814 pmap_ledger_debit(pmap
, task_ledgers
.alternate_accounting
, PAGE_SIZE
);
815 } else if (IS_REUSABLE_PAGE(pai
)) {
816 assert(!was_altacct
);
817 assert(IS_INTERNAL_PAGE(pai
));
818 /* was already not in phys_footprint */
819 } else if (IS_INTERNAL_PAGE(pai
)) {
820 assert(!was_altacct
);
821 assert(!IS_REUSABLE_PAGE(pai
));
822 pmap_ledger_debit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
823 pmap_ledger_debit(pmap
, task_ledgers
.phys_footprint
, PAGE_SIZE
);
825 /* not an internal page */
829 assert(pmap
->stats
.wired_count
>= 1);
830 OSAddAtomic(-1, &pmap
->stats
.wired_count
);
831 pmap_ledger_debit(pmap
, task_ledgers
.wired_mem
,
836 pmap_phys_attributes
[pai
] |= oattr
;
838 pmap_phys_attributes
[pai
] |= ept_refmod_to_physmap(oattr
);
844 * old_pa is not managed.
845 * Do removal part of accounting.
848 if (pmap
!= kernel_pmap
) {
850 assert(pmap
->stats
.device
> 0);
851 OSAddAtomic(-1, &pmap
->stats
.device
);
855 assert(pmap
->stats
.wired_count
>= 1);
856 OSAddAtomic(-1, &pmap
->stats
.wired_count
);
857 pmap_ledger_debit(pmap
, task_ledgers
.wired_mem
, PAGE_SIZE
);
863 * if we had a previously managed paged locked, unlock it now
867 old_pa_locked
= FALSE
;
870 pai
= pa_index(pa
); /* now working with new incoming phys page */
871 if (IS_MANAGED_PAGE(pai
)) {
874 * Step 2) Enter the mapping in the PV list for this
877 pv_h
= pai_to_pvh(pai
);
881 if (pv_h
->pmap
== PMAP_NULL
) {
883 * No mappings yet, use rooted pv
885 pv_h
->va_and_flags
= vaddr
;
887 queue_init(&pv_h
->qlink
);
889 if (options
& PMAP_OPTIONS_INTERNAL
) {
890 pmap_phys_attributes
[pai
] |= PHYS_INTERNAL
;
892 pmap_phys_attributes
[pai
] &= ~PHYS_INTERNAL
;
894 if (options
& PMAP_OPTIONS_REUSABLE
) {
895 pmap_phys_attributes
[pai
] |= PHYS_REUSABLE
;
897 pmap_phys_attributes
[pai
] &= ~PHYS_REUSABLE
;
899 if ((options
& PMAP_OPTIONS_ALT_ACCT
) &&
900 IS_INTERNAL_PAGE(pai
)) {
901 pv_h
->va_and_flags
|= PVE_IS_ALTACCT
;
904 pv_h
->va_and_flags
&= ~PVE_IS_ALTACCT
;
909 * Add new pv_hashed_entry after header.
911 if ((PV_HASHED_ENTRY_NULL
== pvh_e
) && pvh_new
) {
913 pvh_new
= PV_HASHED_ENTRY_NULL
;
914 } else if (PV_HASHED_ENTRY_NULL
== pvh_e
) {
915 PV_HASHED_ALLOC(&pvh_e
);
916 if (PV_HASHED_ENTRY_NULL
== pvh_e
) {
918 * the pv list is empty. if we are on
919 * the kernel pmap we'll use one of
920 * the special private kernel pv_e's,
921 * else, we need to unlock
922 * everything, zalloc a pv_e, and
923 * restart bringing in the pv_e with
926 if (kernel_pmap
== pmap
) {
927 PV_HASHED_KERN_ALLOC(&pvh_e
);
931 pmap_pv_throttle(pmap
);
932 pvh_new
= (pv_hashed_entry_t
) zalloc(pv_hashed_list_zone
);
938 if (PV_HASHED_ENTRY_NULL
== pvh_e
)
939 panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings");
941 pvh_e
->va_and_flags
= vaddr
;
944 if ((options
& PMAP_OPTIONS_ALT_ACCT
) &&
945 IS_INTERNAL_PAGE(pai
)) {
946 pvh_e
->va_and_flags
|= PVE_IS_ALTACCT
;
949 pvh_e
->va_and_flags
&= ~PVE_IS_ALTACCT
;
952 pv_hash_add(pvh_e
, pv_h
);
955 * Remember that we used the pvlist entry.
957 pvh_e
= PV_HASHED_ENTRY_NULL
;
961 * only count the mapping
962 * for 'managed memory'
964 pmap_ledger_credit(pmap
, task_ledgers
.phys_mem
, PAGE_SIZE
);
965 OSAddAtomic(+1, &pmap
->stats
.resident_count
);
966 if (pmap
->stats
.resident_count
> pmap
->stats
.resident_max
) {
967 pmap
->stats
.resident_max
= pmap
->stats
.resident_count
;
969 if (pmap
!= kernel_pmap
) {
970 /* update pmap stats */
971 if (IS_REUSABLE_PAGE(pai
)) {
972 OSAddAtomic(+1, &pmap
->stats
.reusable
);
973 PMAP_STATS_PEAK(pmap
->stats
.reusable
);
974 } else if (IS_INTERNAL_PAGE(pai
)) {
975 OSAddAtomic(+1, &pmap
->stats
.internal
);
976 PMAP_STATS_PEAK(pmap
->stats
.internal
);
978 OSAddAtomic(+1, &pmap
->stats
.external
);
979 PMAP_STATS_PEAK(pmap
->stats
.external
);
984 /* internal but also alternate accounting */
985 assert(IS_INTERNAL_PAGE(pai
));
986 pmap_ledger_credit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
987 pmap_ledger_credit(pmap
, task_ledgers
.alternate_accounting
, PAGE_SIZE
);
988 /* alternate accounting, so not in footprint */
989 } else if (IS_REUSABLE_PAGE(pai
)) {
991 assert(IS_INTERNAL_PAGE(pai
));
992 /* internal but reusable: not in footprint */
993 } else if (IS_INTERNAL_PAGE(pai
)) {
995 assert(!IS_REUSABLE_PAGE(pai
));
996 /* internal: add to footprint */
997 pmap_ledger_credit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
998 pmap_ledger_credit(pmap
, task_ledgers
.phys_footprint
, PAGE_SIZE
);
1000 /* not internal: not in footprint */
1003 } else if (last_managed_page
== 0) {
1004 /* Account for early mappings created before "managed pages"
1005 * are determined. Consider consulting the available DRAM map.
1007 pmap_ledger_credit(pmap
, task_ledgers
.phys_mem
, PAGE_SIZE
);
1008 OSAddAtomic(+1, &pmap
->stats
.resident_count
);
1009 if (pmap
!= kernel_pmap
) {
1011 OSAddAtomic(+1, &pmap
->stats
.device
);
1012 PMAP_STATS_PEAK(pmap
->stats
.device
);
1017 * Step 3) Enter the mapping.
1019 * Build a template to speed up entering -
1020 * only the pfn changes.
1022 template = pa_to_pte(pa
);
1025 template |= INTEL_PTE_VALID
;
1027 template |= INTEL_EPT_IPTA
;
1032 * DRK: It may be worth asserting on cache attribute flags that diverge
1033 * from the existing physical page attributes.
1036 template |= pmap_get_cache_attributes(pa_index(pa
), is_ept
);
1039 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
1041 if (!is_ept
&& (flags
& VM_MEM_NOT_CACHEABLE
)) {
1042 if (!(flags
& VM_MEM_GUARDED
))
1043 template |= INTEL_PTE_PTA
;
1044 template |= INTEL_PTE_NCACHE
;
1046 if (pmap
!= kernel_pmap
&& !is_ept
)
1047 template |= INTEL_PTE_USER
;
1048 if (prot
& VM_PROT_READ
)
1049 template |= PTE_READ(is_ept
);
1050 if (prot
& VM_PROT_WRITE
) {
1051 template |= PTE_WRITE(is_ept
);
1052 if (is_ept
&& !pmap_ept_support_ad
) {
1053 template |= PTE_MOD(is_ept
);
1054 if (IS_MANAGED_PAGE(pai
))
1055 pmap_phys_attributes
[pai
] |= PHYS_MODIFIED
;
1058 if (prot
& VM_PROT_EXECUTE
) {
1059 assert(set_NX
== 0);
1060 template = pte_set_ex(template, is_ept
);
1064 template = pte_remove_ex(template, is_ept
);
1066 template |= INTEL_PTE_WIRED
;
1067 OSAddAtomic(+1, & pmap
->stats
.wired_count
);
1068 pmap_ledger_credit(pmap
, task_ledgers
.wired_mem
, PAGE_SIZE
);
1071 template |= INTEL_PTE_PS
;
1073 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
1074 if (is_ept
&& !pmap_ept_support_ad
) {
1075 template |= PTE_REF(is_ept
);
1076 if (IS_MANAGED_PAGE(pai
))
1077 pmap_phys_attributes
[pai
] |= PHYS_REFERENCED
;
1080 pmap_store_pte(pte
, template);
1083 * if this was a managed page we delayed unlocking the pv until here
1084 * to prevent pmap_page_protect et al from finding it until the pte
1087 if (IS_MANAGED_PAGE(pai
)) {
1091 if (need_tlbflush
== TRUE
) {
1092 if (options
& PMAP_OPTIONS_NOFLUSH
)
1093 PMAP_UPDATE_TLBS_DELAYED(pmap
, vaddr
, vaddr
+ PAGE_SIZE
, (pmap_flush_context
*)arg
);
1095 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+ PAGE_SIZE
);
1097 if (pvh_e
!= PV_HASHED_ENTRY_NULL
) {
1098 PV_HASHED_FREE_LIST(pvh_e
, pvh_e
, 1);
1100 if (pvh_new
!= PV_HASHED_ENTRY_NULL
) {
1101 PV_HASHED_KERN_FREE_LIST(pvh_new
, pvh_new
, 1);
1105 if (delpage_pm_obj
) {
1108 vm_object_lock(delpage_pm_obj
);
1109 m
= vm_page_lookup(delpage_pm_obj
, (delpage_pde_index
* PAGE_SIZE
));
1110 if (m
== VM_PAGE_NULL
)
1111 panic("pmap_enter: pte page not in object");
1113 vm_object_unlock(delpage_pm_obj
);
1114 OSAddAtomic(-1, &inuse_ptepages_count
);
1115 PMAP_ZINFO_PFREE(pmap
, PAGE_SIZE
);
1118 PMAP_TRACE(PMAP_CODE(PMAP__ENTER
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1119 return KERN_SUCCESS
;
1123 * Remove a range of hardware page-table entries.
1124 * The entries given are the first (inclusive)
1125 * and last (exclusive) entries for the VM pages.
1126 * The virtual address is the va for the first pte.
1128 * The pmap must be locked.
1129 * If the pmap is not the kernel pmap, the range must lie
1130 * entirely within one pte-page. This is NOT checked.
1131 * Assumes that the pte-page exists.
1137 vm_map_offset_t start_vaddr
,
1141 pmap_remove_range_options(pmap
, start_vaddr
, spte
, epte
,
1142 PMAP_OPTIONS_REMOVE
);
1146 pmap_remove_range_options(
1148 vm_map_offset_t start_vaddr
,
1154 pv_hashed_entry_t pvh_et
= PV_HASHED_ENTRY_NULL
;
1155 pv_hashed_entry_t pvh_eh
= PV_HASHED_ENTRY_NULL
;
1156 pv_hashed_entry_t pvh_e
;
1158 int num_removed
, num_unwired
, num_found
, num_invalid
;
1159 int stats_external
, stats_internal
, stats_reusable
;
1160 uint64_t stats_compressed
;
1161 int ledgers_internal
, ledgers_alt_internal
;
1162 uint64_t ledgers_compressed
, ledgers_alt_compressed
;
1165 vm_map_offset_t vaddr
;
1166 boolean_t is_ept
= is_ept_pmap(pmap
);
1167 boolean_t was_altacct
;
1176 stats_compressed
= 0;
1177 ledgers_internal
= 0;
1178 ledgers_compressed
= 0;
1179 ledgers_alt_internal
= 0;
1180 ledgers_alt_compressed
= 0;
1181 /* invalidate the PTEs first to "freeze" them */
1182 for (cpte
= spte
, vaddr
= start_vaddr
;
1184 cpte
++, vaddr
+= PAGE_SIZE_64
) {
1185 pt_entry_t p
= *cpte
;
1189 if ((options
& PMAP_OPTIONS_REMOVE
) &&
1190 (PTE_IS_COMPRESSED(p
))) {
1191 assert(pmap
!= kernel_pmap
);
1192 /* one less "compressed"... */
1194 ledgers_compressed
++;
1195 if (p
& PTE_COMPRESSED_ALT
) {
1196 /* ... but it used to be "ALTACCT" */
1197 ledgers_alt_compressed
++;
1199 /* clear marker(s) */
1200 /* XXX probably does not need to be atomic! */
1201 pmap_update_pte(cpte
, INTEL_PTE_COMPRESSED_MASK
, 0);
1212 if (!IS_MANAGED_PAGE(pai
)) {
1214 * Outside range of managed physical memory.
1215 * Just remove the mappings.
1217 pmap_store_pte(cpte
, 0);
1221 if ((p
& PTE_VALID_MASK(is_ept
)) == 0)
1224 /* invalidate the PTE */
1225 pmap_update_pte(cpte
, PTE_VALID_MASK(is_ept
), 0);
1228 if (num_found
== 0) {
1229 /* nothing was changed: we're done */
1233 /* propagate the invalidates to other CPUs */
1235 PMAP_UPDATE_TLBS(pmap
, start_vaddr
, vaddr
);
1237 for (cpte
= spte
, vaddr
= start_vaddr
;
1239 cpte
++, vaddr
+= PAGE_SIZE_64
) {
1241 pa
= pte_to_pa(*cpte
);
1243 check_pte_for_compressed_marker
:
1245 * This PTE could have been replaced with a
1246 * "compressed" marker after our first "freeze"
1247 * loop above, so check again.
1249 if ((options
& PMAP_OPTIONS_REMOVE
) &&
1250 (PTE_IS_COMPRESSED(*cpte
))) {
1251 assert(pmap
!= kernel_pmap
);
1252 /* one less "compressed"... */
1254 ledgers_compressed
++;
1255 if (*cpte
& PTE_COMPRESSED_ALT
) {
1256 /* ... but it used to be "ALTACCT" */
1257 ledgers_alt_compressed
++;
1259 pmap_store_pte(cpte
, 0);
1268 pa
= pte_to_pa(*cpte
);
1271 goto check_pte_for_compressed_marker
;
1275 * Remove the mapping from the pvlist for this physical page.
1277 pvh_e
= pmap_pv_remove(pmap
, vaddr
, (ppnum_t
*) &pai
, cpte
, &was_altacct
);
1280 /* update pmap stats */
1281 if (IS_REUSABLE_PAGE(pai
)) {
1283 } else if (IS_INTERNAL_PAGE(pai
)) {
1288 /* update ledgers */
1290 /* internal and alternate accounting */
1291 assert(IS_INTERNAL_PAGE(pai
));
1293 ledgers_alt_internal
++;
1294 } else if (IS_REUSABLE_PAGE(pai
)) {
1295 /* internal but reusable */
1296 assert(!was_altacct
);
1297 assert(IS_INTERNAL_PAGE(pai
));
1298 } else if (IS_INTERNAL_PAGE(pai
)) {
1300 assert(!was_altacct
);
1301 assert(!IS_REUSABLE_PAGE(pai
));
1308 * Get the modify and reference bits, then
1309 * nuke the entry in the page table
1311 /* remember reference and change */
1313 pmap_phys_attributes
[pai
] |=
1314 *cpte
& (PHYS_MODIFIED
| PHYS_REFERENCED
);
1316 pmap_phys_attributes
[pai
] |=
1317 ept_refmod_to_physmap((*cpte
& (INTEL_EPT_REF
| INTEL_EPT_MOD
))) & (PHYS_MODIFIED
| PHYS_REFERENCED
);
1320 /* completely invalidate the PTE */
1321 pmap_store_pte(cpte
, 0);
1325 if (pvh_e
!= PV_HASHED_ENTRY_NULL
) {
1326 pvh_e
->qlink
.next
= (queue_entry_t
) pvh_eh
;
1329 if (pvh_et
== PV_HASHED_ENTRY_NULL
) {
1336 if (pvh_eh
!= PV_HASHED_ENTRY_NULL
) {
1337 PV_HASHED_FREE_LIST(pvh_eh
, pvh_et
, pvh_cnt
);
1344 if (pmap
->stats
.resident_count
< num_removed
)
1345 panic("pmap_remove_range: resident_count");
1347 pmap_ledger_debit(pmap
, task_ledgers
.phys_mem
, machine_ptob(num_removed
));
1348 PMAP_STATS_ASSERTF((pmap
->stats
.resident_count
>= num_removed
,
1349 "pmap=%p num_removed=%d stats.resident_count=%d",
1350 pmap
, num_removed
, pmap
->stats
.resident_count
));
1351 OSAddAtomic(-num_removed
, &pmap
->stats
.resident_count
);
1353 if (pmap
!= kernel_pmap
) {
1354 PMAP_STATS_ASSERTF((pmap
->stats
.external
>= stats_external
,
1355 "pmap=%p stats_external=%d stats.external=%d",
1356 pmap
, stats_external
, pmap
->stats
.external
));
1357 PMAP_STATS_ASSERTF((pmap
->stats
.internal
>= stats_internal
,
1358 "pmap=%p stats_internal=%d stats.internal=%d",
1359 pmap
, stats_internal
, pmap
->stats
.internal
));
1360 PMAP_STATS_ASSERTF((pmap
->stats
.reusable
>= stats_reusable
,
1361 "pmap=%p stats_reusable=%d stats.reusable=%d",
1362 pmap
, stats_reusable
, pmap
->stats
.reusable
));
1363 PMAP_STATS_ASSERTF((pmap
->stats
.compressed
>= stats_compressed
,
1364 "pmap=%p stats_compressed=%lld, stats.compressed=%lld",
1365 pmap
, stats_compressed
, pmap
->stats
.compressed
));
1367 /* update pmap stats */
1368 if (stats_external
) {
1369 OSAddAtomic(-stats_external
, &pmap
->stats
.external
);
1371 if (stats_internal
) {
1372 OSAddAtomic(-stats_internal
, &pmap
->stats
.internal
);
1375 OSAddAtomic(-stats_reusable
, &pmap
->stats
.reusable
);
1376 if (stats_compressed
)
1377 OSAddAtomic64(-stats_compressed
, &pmap
->stats
.compressed
);
1378 /* update ledgers */
1379 if (ledgers_internal
) {
1380 pmap_ledger_debit(pmap
,
1381 task_ledgers
.internal
,
1382 machine_ptob(ledgers_internal
));
1384 if (ledgers_compressed
) {
1385 pmap_ledger_debit(pmap
,
1386 task_ledgers
.internal_compressed
,
1387 machine_ptob(ledgers_compressed
));
1389 if (ledgers_alt_internal
) {
1390 pmap_ledger_debit(pmap
,
1391 task_ledgers
.alternate_accounting
,
1392 machine_ptob(ledgers_alt_internal
));
1394 if (ledgers_alt_compressed
) {
1395 pmap_ledger_debit(pmap
,
1396 task_ledgers
.alternate_accounting_compressed
,
1397 machine_ptob(ledgers_alt_compressed
));
1399 pmap_ledger_debit(pmap
,
1400 task_ledgers
.phys_footprint
,
1401 machine_ptob((ledgers_internal
-
1402 ledgers_alt_internal
) +
1403 (ledgers_compressed
-
1404 ledgers_alt_compressed
)));
1408 if (pmap
->stats
.wired_count
< num_unwired
)
1409 panic("pmap_remove_range: wired_count");
1411 PMAP_STATS_ASSERTF((pmap
->stats
.wired_count
>= num_unwired
,
1412 "pmap=%p num_unwired=%d stats.wired_count=%d",
1413 pmap
, num_unwired
, pmap
->stats
.wired_count
));
1414 OSAddAtomic(-num_unwired
, &pmap
->stats
.wired_count
);
1415 pmap_ledger_debit(pmap
, task_ledgers
.wired_mem
, machine_ptob(num_unwired
));
1422 * Remove the given range of addresses
1423 * from the specified map.
1425 * It is assumed that the start and end are properly
1426 * rounded to the hardware page size.
1434 pmap_remove_options(map
, s64
, e64
, PMAP_OPTIONS_REMOVE
);
1438 pmap_remove_options(
1445 pt_entry_t
*spte
, *epte
;
1452 if (map
== PMAP_NULL
|| s64
== e64
)
1455 is_ept
= is_ept_pmap(map
);
1457 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE
) | DBG_FUNC_START
,
1459 (uint32_t) (s64
>> 32), s64
,
1460 (uint32_t) (e64
>> 32), e64
);
1467 * Check that address range in the kernel does not overlap the stacks.
1468 * We initialize local static min/max variables once to avoid making
1469 * 2 function calls for every remove. Note also that these functions
1470 * both return 0 before kernel stacks have been initialized, and hence
1471 * the panic is not triggered in this case.
1473 if (map
== kernel_pmap
) {
1474 static vm_offset_t kernel_stack_min
= 0;
1475 static vm_offset_t kernel_stack_max
= 0;
1477 if (kernel_stack_min
== 0) {
1478 kernel_stack_min
= min_valid_stack_address();
1479 kernel_stack_max
= max_valid_stack_address();
1481 if ((kernel_stack_min
<= s64
&& s64
< kernel_stack_max
) ||
1482 (kernel_stack_min
< e64
&& e64
<= kernel_stack_max
))
1483 panic("pmap_remove() attempted in kernel stack");
1488 * The values of kernel_stack_min and kernel_stack_max are no longer
1489 * relevant now that we allocate kernel stacks in the kernel map,
1490 * so the old code above no longer applies. If we wanted to check that
1491 * we weren't removing a mapping of a page in a kernel stack we'd
1492 * mark the PTE with an unused bit and check that here.
1497 deadline
= rdtsc64() + max_preemption_latency_tsc
;
1500 l64
= (s64
+ pde_mapped_size
) & ~(pde_mapped_size
- 1);
1503 pde
= pmap_pde(map
, s64
);
1505 if (pde
&& (*pde
& PTE_VALID_MASK(is_ept
))) {
1506 if (*pde
& PTE_PS
) {
1508 * If we're removing a superpage, pmap_remove_range()
1509 * must work on level 2 instead of level 1; and we're
1510 * only passing a single level 2 entry instead of a
1514 epte
= spte
+1; /* excluded */
1516 spte
= pmap_pte(map
, (s64
& ~(pde_mapped_size
- 1)));
1517 spte
= &spte
[ptenum(s64
)];
1518 epte
= &spte
[intel_btop(l64
- s64
)];
1520 pmap_remove_range_options(map
, s64
, spte
, epte
,
1525 if (s64
< e64
&& rdtsc64() >= deadline
) {
1527 /* TODO: Rapid release/reacquisition can defeat
1528 * the "backoff" intent here; either consider a
1529 * fair spinlock, or a scheme whereby each lock
1530 * attempt marks the processor as within a spinlock
1531 * acquisition, and scan CPUs here to determine
1532 * if a backoff is necessary, to avoid sacrificing
1533 * performance in the common case.
1536 deadline
= rdtsc64() + max_preemption_latency_tsc
;
1542 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE
) | DBG_FUNC_END
,
1552 pmap_page_protect_options(pn
, prot
, 0, NULL
);
1556 * Routine: pmap_page_protect_options
1559 * Lower the permission for all mappings to a given
1563 pmap_page_protect_options(
1566 unsigned int options
,
1569 pv_hashed_entry_t pvh_eh
= PV_HASHED_ENTRY_NULL
;
1570 pv_hashed_entry_t pvh_et
= PV_HASHED_ENTRY_NULL
;
1571 pv_hashed_entry_t nexth
;
1573 pv_rooted_entry_t pv_h
;
1574 pv_rooted_entry_t pv_e
;
1575 pv_hashed_entry_t pvh_e
;
1580 pt_entry_t new_pte_value
;
1584 assert(pn
!= vm_page_fictitious_addr
);
1585 if (pn
== vm_page_guard_addr
)
1588 pai
= ppn_to_pai(pn
);
1590 if (!IS_MANAGED_PAGE(pai
)) {
1592 * Not a managed page.
1596 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT
) | DBG_FUNC_START
,
1600 * Determine the new protection.
1604 case VM_PROT_READ
| VM_PROT_EXECUTE
:
1608 return; /* nothing to do */
1614 pv_h
= pai_to_pvh(pai
);
1620 * Walk down PV list, if any, changing or removing all mappings.
1622 if (pv_h
->pmap
== PMAP_NULL
)
1626 pvh_e
= (pv_hashed_entry_t
) pv_e
; /* cheat */
1629 vm_map_offset_t vaddr
;
1631 if ((options
& PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
) &&
1632 (pmap_phys_attributes
[pai
] & PHYS_MODIFIED
)) {
1633 /* page was modified, so it will be compressed */
1634 options
&= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
1635 options
|= PMAP_OPTIONS_COMPRESSOR
;
1639 is_ept
= is_ept_pmap(pmap
);
1640 vaddr
= PVE_VA(pv_e
);
1641 pte
= pmap_pte(pmap
, vaddr
);
1643 pmap_assert2((pa_index(pte_to_pa(*pte
)) == pn
),
1644 "pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn
, pmap
, vaddr
, *pte
);
1647 panic("pmap_page_protect() "
1648 "pmap=%p pn=0x%x vaddr=0x%llx\n",
1651 nexth
= (pv_hashed_entry_t
) queue_next(&pvh_e
->qlink
);
1654 * Remove the mapping if new protection is NONE
1658 /* Remove per-pmap wired count */
1659 if (iswired(*pte
)) {
1660 OSAddAtomic(-1, &pmap
->stats
.wired_count
);
1661 pmap_ledger_debit(pmap
, task_ledgers
.wired_mem
, PAGE_SIZE
);
1664 if (pmap
!= kernel_pmap
&&
1665 (options
& PMAP_OPTIONS_COMPRESSOR
) &&
1666 IS_INTERNAL_PAGE(pai
)) {
1667 assert(!PTE_IS_COMPRESSED(*pte
));
1668 /* mark this PTE as having been "compressed" */
1669 new_pte_value
= PTE_COMPRESSED
;
1670 if (IS_ALTACCT_PAGE(pai
, pv_e
)) {
1671 new_pte_value
|= PTE_COMPRESSED_ALT
;
1677 if (options
& PMAP_OPTIONS_NOREFMOD
) {
1678 pmap_store_pte(pte
, new_pte_value
);
1680 if (options
& PMAP_OPTIONS_NOFLUSH
)
1681 PMAP_UPDATE_TLBS_DELAYED(pmap
, vaddr
, vaddr
+ PAGE_SIZE
, (pmap_flush_context
*)arg
);
1683 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+ PAGE_SIZE
);
1686 * Remove the mapping, collecting dirty bits.
1688 pmap_update_pte(pte
, PTE_VALID_MASK(is_ept
), 0);
1690 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+PAGE_SIZE
);
1692 pmap_phys_attributes
[pai
] |=
1693 *pte
& (PHYS_MODIFIED
|PHYS_REFERENCED
);
1695 pmap_phys_attributes
[pai
] |=
1696 ept_refmod_to_physmap((*pte
& (INTEL_EPT_REF
| INTEL_EPT_MOD
))) & (PHYS_MODIFIED
| PHYS_REFERENCED
);
1699 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
) &&
1700 IS_INTERNAL_PAGE(pai
) &&
1701 (pmap_phys_attributes
[pai
] &
1704 * Page is actually "modified" and
1705 * will be compressed. Start
1706 * accounting for it as "compressed".
1708 assert(!(options
& PMAP_OPTIONS_COMPRESSOR
));
1709 options
&= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
1710 options
|= PMAP_OPTIONS_COMPRESSOR
;
1711 assert(new_pte_value
== 0);
1712 if (pmap
!= kernel_pmap
) {
1713 new_pte_value
= PTE_COMPRESSED
;
1714 if (IS_ALTACCT_PAGE(pai
, pv_e
)) {
1715 new_pte_value
|= PTE_COMPRESSED_ALT
;
1719 pmap_store_pte(pte
, new_pte_value
);
1723 if (pmap
->stats
.resident_count
< 1)
1724 panic("pmap_page_protect: resident_count");
1726 pmap_ledger_debit(pmap
, task_ledgers
.phys_mem
, PAGE_SIZE
);
1727 assert(pmap
->stats
.resident_count
>= 1);
1728 OSAddAtomic(-1, &pmap
->stats
.resident_count
);
1731 * We only ever compress internal pages.
1733 if (options
& PMAP_OPTIONS_COMPRESSOR
) {
1734 assert(IS_INTERNAL_PAGE(pai
));
1736 if (pmap
!= kernel_pmap
) {
1737 /* update pmap stats */
1738 if (IS_REUSABLE_PAGE(pai
)) {
1739 assert(pmap
->stats
.reusable
> 0);
1740 OSAddAtomic(-1, &pmap
->stats
.reusable
);
1741 } else if (IS_INTERNAL_PAGE(pai
)) {
1742 assert(pmap
->stats
.internal
> 0);
1743 OSAddAtomic(-1, &pmap
->stats
.internal
);
1745 assert(pmap
->stats
.external
> 0);
1746 OSAddAtomic(-1, &pmap
->stats
.external
);
1748 if ((options
& PMAP_OPTIONS_COMPRESSOR
) &&
1749 IS_INTERNAL_PAGE(pai
)) {
1750 /* adjust "compressed" stats */
1751 OSAddAtomic64(+1, &pmap
->stats
.compressed
);
1752 PMAP_STATS_PEAK(pmap
->stats
.compressed
);
1753 pmap
->stats
.compressed_lifetime
++;
1756 /* update ledgers */
1757 if (IS_ALTACCT_PAGE(pai
, pv_e
)) {
1758 assert(IS_INTERNAL_PAGE(pai
));
1759 pmap_ledger_debit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
1760 pmap_ledger_debit(pmap
, task_ledgers
.alternate_accounting
, PAGE_SIZE
);
1761 if (options
& PMAP_OPTIONS_COMPRESSOR
) {
1762 pmap_ledger_credit(pmap
, task_ledgers
.internal_compressed
, PAGE_SIZE
);
1763 pmap_ledger_credit(pmap
, task_ledgers
.alternate_accounting_compressed
, PAGE_SIZE
);
1765 } else if (IS_REUSABLE_PAGE(pai
)) {
1766 assert(!IS_ALTACCT_PAGE(pai
, pv_e
));
1767 assert(IS_INTERNAL_PAGE(pai
));
1768 if (options
& PMAP_OPTIONS_COMPRESSOR
) {
1769 pmap_ledger_credit(pmap
, task_ledgers
.internal_compressed
, PAGE_SIZE
);
1770 /* was not in footprint, but is now */
1771 pmap_ledger_credit(pmap
, task_ledgers
.phys_footprint
, PAGE_SIZE
);
1773 } else if (IS_INTERNAL_PAGE(pai
)) {
1774 assert(!IS_ALTACCT_PAGE(pai
, pv_e
));
1775 assert(!IS_REUSABLE_PAGE(pai
));
1776 pmap_ledger_debit(pmap
, task_ledgers
.internal
, PAGE_SIZE
);
1778 * Update all stats related to physical
1779 * footprint, which only deals with
1782 if (options
& PMAP_OPTIONS_COMPRESSOR
) {
1784 * This removal is only being
1785 * done so we can send this page
1786 * to the compressor; therefore
1787 * it mustn't affect total task
1790 pmap_ledger_credit(pmap
, task_ledgers
.internal_compressed
, PAGE_SIZE
);
1793 * This internal page isn't
1794 * going to the compressor,
1795 * so adjust stats to keep
1796 * phys_footprint up to date.
1798 pmap_ledger_debit(pmap
, task_ledgers
.phys_footprint
, PAGE_SIZE
);
1804 * Deal with the pv_rooted_entry.
1809 * Fix up head later.
1811 pv_h
->pmap
= PMAP_NULL
;
1814 * Delete this entry.
1816 pv_hash_remove(pvh_e
);
1817 pvh_e
->qlink
.next
= (queue_entry_t
) pvh_eh
;
1820 if (pvh_et
== PV_HASHED_ENTRY_NULL
)
1826 * Write-protect, after opportunistic refmod collect
1829 pmap_phys_attributes
[pai
] |=
1830 *pte
& (PHYS_MODIFIED
|PHYS_REFERENCED
);
1832 pmap_phys_attributes
[pai
] |=
1833 ept_refmod_to_physmap((*pte
& (INTEL_EPT_REF
| INTEL_EPT_MOD
))) & (PHYS_MODIFIED
| PHYS_REFERENCED
);
1835 pmap_update_pte(pte
, PTE_WRITE(is_ept
), 0);
1837 if (options
& PMAP_OPTIONS_NOFLUSH
)
1838 PMAP_UPDATE_TLBS_DELAYED(pmap
, vaddr
, vaddr
+ PAGE_SIZE
, (pmap_flush_context
*)arg
);
1840 PMAP_UPDATE_TLBS(pmap
, vaddr
, vaddr
+PAGE_SIZE
);
1843 } while ((pv_e
= (pv_rooted_entry_t
) nexth
) != pv_h
);
1847 * If pv_head mapping was removed, fix it up.
1849 if (pv_h
->pmap
== PMAP_NULL
) {
1850 pvh_e
= (pv_hashed_entry_t
) queue_next(&pv_h
->qlink
);
1852 if (pvh_e
!= (pv_hashed_entry_t
) pv_h
) {
1853 pv_hash_remove(pvh_e
);
1854 pv_h
->pmap
= pvh_e
->pmap
;
1855 pv_h
->va_and_flags
= pvh_e
->va_and_flags
;
1856 pvh_e
->qlink
.next
= (queue_entry_t
) pvh_eh
;
1859 if (pvh_et
== PV_HASHED_ENTRY_NULL
)
1864 if (pvh_eh
!= PV_HASHED_ENTRY_NULL
) {
1865 PV_HASHED_FREE_LIST(pvh_eh
, pvh_et
, pvh_cnt
);
1870 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT
) | DBG_FUNC_END
,
1876 * Clear specified attribute bits.
1879 phys_attribute_clear(
1882 unsigned int options
,
1885 pv_rooted_entry_t pv_h
;
1886 pv_hashed_entry_t pv_e
;
1890 char attributes
= 0;
1891 boolean_t is_internal
, is_reusable
, is_altacct
, is_ept
;
1892 int ept_bits_to_clear
;
1893 boolean_t ept_keep_global_mod
= FALSE
;
1895 if ((bits
& PHYS_MODIFIED
) &&
1896 (options
& PMAP_OPTIONS_NOFLUSH
) &&
1898 panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): "
1899 "should not clear 'modified' without flushing TLBs\n",
1900 pn
, bits
, options
, arg
);
1903 /* We only support converting MOD and REF bits for EPT PTEs in this function */
1904 assert((bits
& ~(PHYS_REFERENCED
| PHYS_MODIFIED
)) == 0);
1906 ept_bits_to_clear
= (unsigned)physmap_refmod_to_ept(bits
& (PHYS_MODIFIED
| PHYS_REFERENCED
));
1909 assert(pn
!= vm_page_fictitious_addr
);
1910 if (pn
== vm_page_guard_addr
)
1913 pai
= ppn_to_pai(pn
);
1915 if (!IS_MANAGED_PAGE(pai
)) {
1917 * Not a managed page.
1922 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR
) | DBG_FUNC_START
,
1925 pv_h
= pai_to_pvh(pai
);
1931 * Walk down PV list, clearing all modify or reference bits.
1932 * We do not have to lock the pv_list because we have
1935 if (pv_h
->pmap
!= PMAP_NULL
) {
1937 * There are some mappings.
1940 is_internal
= IS_INTERNAL_PAGE(pai
);
1941 is_reusable
= IS_REUSABLE_PAGE(pai
);
1943 pv_e
= (pv_hashed_entry_t
)pv_h
;
1950 is_ept
= is_ept_pmap(pmap
);
1951 is_altacct
= IS_ALTACCT_PAGE(pai
, pv_e
);
1956 pte
= pmap_pte(pmap
, va
);
1957 /* grab ref/mod bits from this PTE */
1958 pte_bits
= (*pte
& (PTE_REF(is_ept
) | PTE_MOD(is_ept
)));
1959 /* propagate to page's global attributes */
1961 attributes
|= pte_bits
;
1963 attributes
|= ept_refmod_to_physmap(pte_bits
);
1964 if (!pmap_ept_support_ad
&& (pte_bits
& INTEL_EPT_MOD
)) {
1965 ept_keep_global_mod
= TRUE
;
1968 /* which bits to clear for this PTE? */
1972 pte_bits
&= ept_bits_to_clear
;
1977 * Clear modify and/or reference bits.
1980 pmap_update_pte(pte
, bits
, 0);
1982 /* Ensure all processors using this translation
1983 * invalidate this TLB entry. The invalidation
1984 * *must* follow the PTE update, to ensure that
1985 * the TLB shadow of the 'D' bit (in particular)
1986 * is synchronized with the updated PTE.
1988 if (! (options
& PMAP_OPTIONS_NOFLUSH
)) {
1989 /* flush TLBS now */
1990 PMAP_UPDATE_TLBS(pmap
,
1994 /* delayed TLB flush: add "pmap" info */
1995 PMAP_UPDATE_TLBS_DELAYED(
1999 (pmap_flush_context
*)arg
);
2001 /* no TLB flushing at all */
2005 /* update pmap "reusable" stats */
2006 if ((options
& PMAP_OPTIONS_CLEAR_REUSABLE
) &&
2008 pmap
!= kernel_pmap
) {
2009 /* one less "reusable" */
2010 assert(pmap
->stats
.reusable
> 0);
2011 OSAddAtomic(-1, &pmap
->stats
.reusable
);
2013 /* one more "internal" */
2014 OSAddAtomic(+1, &pmap
->stats
.internal
);
2015 PMAP_STATS_PEAK(pmap
->stats
.internal
);
2016 assert(pmap
->stats
.internal
> 0);
2018 /* no impact on ledgers */
2020 pmap_ledger_credit(pmap
,
2021 task_ledgers
.internal
,
2025 task_ledgers
.phys_footprint
,
2029 /* one more "external" */
2030 OSAddAtomic(+1, &pmap
->stats
.external
);
2031 PMAP_STATS_PEAK(pmap
->stats
.external
);
2032 assert(pmap
->stats
.external
> 0);
2034 } else if ((options
& PMAP_OPTIONS_SET_REUSABLE
) &&
2036 pmap
!= kernel_pmap
) {
2037 /* one more "reusable" */
2038 OSAddAtomic(+1, &pmap
->stats
.reusable
);
2039 PMAP_STATS_PEAK(pmap
->stats
.reusable
);
2040 assert(pmap
->stats
.reusable
> 0);
2042 /* one less "internal" */
2043 assert(pmap
->stats
.internal
> 0);
2044 OSAddAtomic(-1, &pmap
->stats
.internal
);
2046 /* no impact on footprint */
2048 pmap_ledger_debit(pmap
,
2049 task_ledgers
.internal
,
2053 task_ledgers
.phys_footprint
,
2057 /* one less "external" */
2058 assert(pmap
->stats
.external
> 0);
2059 OSAddAtomic(-1, &pmap
->stats
.external
);
2063 pv_e
= (pv_hashed_entry_t
)queue_next(&pv_e
->qlink
);
2065 } while (pv_e
!= (pv_hashed_entry_t
)pv_h
);
2067 /* Opportunistic refmod collection, annulled
2068 * if both REF and MOD are being cleared.
2071 pmap_phys_attributes
[pai
] |= attributes
;
2073 if (ept_keep_global_mod
) {
2075 * If the hardware doesn't support AD bits for EPT PTEs and someone is
2076 * requesting that we clear the modified bit for a phys page, we need
2077 * to ensure that there are no EPT mappings for the page with the
2078 * modified bit set. If there are, we cannot clear the global modified bit.
2080 bits
&= ~PHYS_MODIFIED
;
2082 pmap_phys_attributes
[pai
] &= ~(bits
);
2084 /* update this page's "reusable" status */
2085 if (options
& PMAP_OPTIONS_CLEAR_REUSABLE
) {
2086 pmap_phys_attributes
[pai
] &= ~PHYS_REUSABLE
;
2087 } else if (options
& PMAP_OPTIONS_SET_REUSABLE
) {
2088 pmap_phys_attributes
[pai
] |= PHYS_REUSABLE
;
2093 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR
) | DBG_FUNC_END
,
2098 * Check specified attribute bits.
2101 phys_attribute_test(
2105 pv_rooted_entry_t pv_h
;
2106 pv_hashed_entry_t pv_e
;
2114 assert(pn
!= vm_page_fictitious_addr
);
2115 assert((bits
& ~(PHYS_MODIFIED
| PHYS_REFERENCED
)) == 0);
2116 if (pn
== vm_page_guard_addr
)
2119 pai
= ppn_to_pai(pn
);
2121 if (!IS_MANAGED_PAGE(pai
)) {
2123 * Not a managed page.
2129 * Fast check... if bits already collected
2130 * no need to take any locks...
2131 * if not set, we need to recheck after taking
2132 * the lock in case they got pulled in while
2133 * we were waiting for the lock
2135 if ((pmap_phys_attributes
[pai
] & bits
) == bits
)
2138 pv_h
= pai_to_pvh(pai
);
2142 attributes
= pmap_phys_attributes
[pai
] & bits
;
2146 * Walk down PV list, checking the mappings until we
2147 * reach the end or we've found the desired attributes.
2149 if (attributes
!= bits
&&
2150 pv_h
->pmap
!= PMAP_NULL
) {
2152 * There are some mappings.
2154 pv_e
= (pv_hashed_entry_t
)pv_h
;
2159 is_ept
= is_ept_pmap(pmap
);
2162 * pick up modify and/or reference bits from mapping
2165 pte
= pmap_pte(pmap
, va
);
2167 attributes
|= (int)(*pte
& bits
);
2169 attributes
|= (int)(ept_refmod_to_physmap((*pte
& (INTEL_EPT_REF
| INTEL_EPT_MOD
))) & (PHYS_MODIFIED
| PHYS_REFERENCED
));
2173 pv_e
= (pv_hashed_entry_t
)queue_next(&pv_e
->qlink
);
2175 } while ((attributes
!= bits
) &&
2176 (pv_e
!= (pv_hashed_entry_t
)pv_h
));
2178 pmap_phys_attributes
[pai
] |= attributes
;
2181 return (attributes
);
2185 * Routine: pmap_change_wiring
2186 * Function: Change the wiring attribute for a map/virtual-address
2188 * In/out conditions:
2189 * The mapping must already exist in the pmap.
2194 vm_map_offset_t vaddr
,
2201 if ((pte
= pmap_pte(map
, vaddr
)) == PT_ENTRY_NULL
)
2202 panic("pmap_change_wiring(%p,0x%llx,%d): pte missing",
2205 if (wired
&& !iswired(*pte
)) {
2207 * wiring down mapping
2209 pmap_ledger_credit(map
, task_ledgers
.wired_mem
, PAGE_SIZE
);
2210 OSAddAtomic(+1, &map
->stats
.wired_count
);
2211 pmap_update_pte(pte
, 0, PTE_WIRED
);
2213 else if (!wired
&& iswired(*pte
)) {
2217 assert(map
->stats
.wired_count
>= 1);
2218 OSAddAtomic(-1, &map
->stats
.wired_count
);
2219 pmap_ledger_debit(map
, task_ledgers
.wired_mem
, PAGE_SIZE
);
2220 pmap_update_pte(pte
, PTE_WIRED
, 0);
2227 * "Backdoor" direct map routine for early mappings.
2228 * Useful for mapping memory outside the range
2229 * Sets A, D and NC if requested
2235 vm_map_offset_t start_addr
,
2236 vm_map_offset_t end_addr
,
2240 pt_entry_t
template;
2243 vm_offset_t base
= virt
;
2244 template = pa_to_pte(start_addr
)
2250 if ((flags
& (VM_MEM_NOT_CACHEABLE
| VM_WIMG_USE_DEFAULT
)) == VM_MEM_NOT_CACHEABLE
) {
2251 template |= INTEL_PTE_NCACHE
;
2252 if (!(flags
& (VM_MEM_GUARDED
)))
2253 template |= INTEL_PTE_PTA
;
2256 #if defined(__x86_64__)
2257 if ((prot
& VM_PROT_EXECUTE
) == 0)
2258 template |= INTEL_PTE_NX
;
2261 if (prot
& VM_PROT_WRITE
)
2262 template |= INTEL_PTE_WRITE
;
2264 while (start_addr
< end_addr
) {
2266 pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)virt
);
2267 if (pte
== PT_ENTRY_NULL
) {
2268 panic("pmap_map_bd: Invalid kernel address\n");
2270 pmap_store_pte(pte
, template);
2272 pte_increment_pa(template);
2274 start_addr
+= PAGE_SIZE
;
2277 PMAP_UPDATE_TLBS(kernel_pmap
, base
, base
+ end_addr
- start_addr
);
2282 pmap_query_resident(
2286 mach_vm_size_t
*compressed_bytes_p
)
2289 pt_entry_t
*spte
, *epte
;
2292 mach_vm_size_t resident_bytes
;
2293 mach_vm_size_t compressed_bytes
;
2298 if (pmap
== PMAP_NULL
|| pmap
== kernel_pmap
|| s64
== e64
) {
2299 if (compressed_bytes_p
) {
2300 *compressed_bytes_p
= 0;
2305 is_ept
= is_ept_pmap(pmap
);
2307 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT
) | DBG_FUNC_START
,
2309 (uint32_t) (s64
>> 32), s64
,
2310 (uint32_t) (e64
>> 32), e64
);
2313 compressed_bytes
= 0;
2317 deadline
= rdtsc64() + max_preemption_latency_tsc
;
2320 l64
= (s64
+ pde_mapped_size
) & ~(pde_mapped_size
- 1);
2323 pde
= pmap_pde(pmap
, s64
);
2325 if (pde
&& (*pde
& PTE_VALID_MASK(is_ept
))) {
2326 if (*pde
& PTE_PS
) {
2327 /* superpage: not supported */
2329 spte
= pmap_pte(pmap
,
2330 (s64
& ~(pde_mapped_size
- 1)));
2331 spte
= &spte
[ptenum(s64
)];
2332 epte
= &spte
[intel_btop(l64
- s64
)];
2334 for (; spte
< epte
; spte
++) {
2335 if (pte_to_pa(*spte
) != 0) {
2336 resident_bytes
+= PAGE_SIZE
;
2337 } else if (*spte
& PTE_COMPRESSED
) {
2338 compressed_bytes
+= PAGE_SIZE
;
2346 if (s64
< e64
&& rdtsc64() >= deadline
) {
2349 deadline
= rdtsc64() + max_preemption_latency_tsc
;
2355 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT
) | DBG_FUNC_END
,
2358 if (compressed_bytes_p
) {
2359 *compressed_bytes_p
= compressed_bytes
;
2361 return resident_bytes
;
2365 pmap_query_page_info(
2378 if (pmap
== PMAP_NULL
|| pmap
== kernel_pmap
) {
2380 return KERN_INVALID_ARGUMENT
;
2384 is_ept
= is_ept_pmap(pmap
);
2388 pde
= pmap_pde(pmap
, va
);
2390 !(*pde
& PTE_VALID_MASK(is_ept
)) ||
2395 pte
= pmap_pte(pmap
, va
);
2396 if (pte
== PT_ENTRY_NULL
) {
2400 pa
= pte_to_pa(*pte
);
2402 if (PTE_IS_COMPRESSED(*pte
)) {
2403 disp
|= PMAP_QUERY_PAGE_COMPRESSED
;
2404 if (*pte
& PTE_COMPRESSED_ALT
) {
2405 disp
|= PMAP_QUERY_PAGE_COMPRESSED_ALTACCT
;
2409 disp
|= PMAP_QUERY_PAGE_PRESENT
;
2411 if (!IS_MANAGED_PAGE(pai
)) {
2412 } else if (pmap_pv_is_altacct(pmap
, va
, pai
)) {
2413 assert(IS_INTERNAL_PAGE(pai
));
2414 disp
|= PMAP_QUERY_PAGE_INTERNAL
;
2415 disp
|= PMAP_QUERY_PAGE_ALTACCT
;
2416 } else if (IS_REUSABLE_PAGE(pai
)) {
2417 disp
|= PMAP_QUERY_PAGE_REUSABLE
;
2418 } else if (IS_INTERNAL_PAGE(pai
)) {
2419 disp
|= PMAP_QUERY_PAGE_INTERNAL
;
2426 return KERN_SUCCESS
;
2429 #if DEBUG || DEVELOPMENT
2431 kernel_pmap_lock(void)
2433 PMAP_LOCK(kernel_pmap
);
2437 kernel_pmap_unlock(void)
2439 PMAP_UNLOCK(kernel_pmap
);
2441 #endif /* DEBUG || DEVELOPMENT */