]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pmap_x86_common.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_x86_common.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30
31 #include <vm/pmap.h>
32 #include <vm/vm_map.h>
33 #include <kern/ledger.h>
34 #include <i386/pmap_internal.h>
35
36 void pmap_remove_range(
37 pmap_t pmap,
38 vm_map_offset_t va,
39 pt_entry_t *spte,
40 pt_entry_t *epte);
41
42 void pmap_remove_range_options(
43 pmap_t pmap,
44 vm_map_offset_t va,
45 pt_entry_t *spte,
46 pt_entry_t *epte,
47 int options);
48
49 void pmap_reusable_range(
50 pmap_t pmap,
51 vm_map_offset_t va,
52 pt_entry_t *spte,
53 pt_entry_t *epte,
54 boolean_t reusable);
55
56 uint32_t pmap_update_clear_pte_count;
57
58 /*
59 * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
60 * on a NBPDE boundary.
61 */
62
63 /* These symbols may be referenced directly by VM */
64 uint64_t pmap_nesting_size_min = NBPDE;
65 uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE;
66
67 /*
68 * kern_return_t pmap_nest(grand, subord, va_start, size)
69 *
70 * grand = the pmap that we will nest subord into
71 * subord = the pmap that goes into the grand
72 * va_start = start of range in pmap to be inserted
73 * nstart = start of range in pmap nested pmap
74 * size = Size of nest area (up to 16TB)
75 *
76 * Inserts a pmap into another. This is used to implement shared segments.
77 *
78 * Note that we depend upon higher level VM locks to insure that things don't change while
79 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
80 * or do 2 nests at once.
81 */
82
83 /*
84 * This routine can nest subtrees either at the PDPT level (1GiB) or at the
85 * PDE level (2MiB). We currently disallow disparate offsets for the "subord"
86 * container and the "grand" parent. A minor optimization to consider for the
87 * future: make the "subord" truly a container rather than a full-fledged
88 * pagetable hierarchy which can be unnecessarily sparse (DRK).
89 */
90
91 kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint64_t size) {
92 vm_map_offset_t vaddr, nvaddr;
93 pd_entry_t *pde,*npde;
94 unsigned int i;
95 uint64_t num_pde;
96
97 assert(!is_ept_pmap(grand));
98 assert(!is_ept_pmap(subord));
99
100 if ((size & (pmap_nesting_size_min-1)) ||
101 (va_start & (pmap_nesting_size_min-1)) ||
102 (nstart & (pmap_nesting_size_min-1)) ||
103 ((size >> 28) > 65536)) /* Max size we can nest is 16TB */
104 return KERN_INVALID_VALUE;
105
106 if(size == 0) {
107 panic("pmap_nest: size is invalid - %016llX\n", size);
108 }
109
110 if (va_start != nstart)
111 panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start, nstart);
112
113 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
114 (uintptr_t) grand, (uintptr_t) subord,
115 (uintptr_t) (va_start>>32), (uintptr_t) va_start, 0);
116
117 nvaddr = (vm_map_offset_t)nstart;
118 num_pde = size >> PDESHIFT;
119
120 PMAP_LOCK(subord);
121
122 subord->pm_shared = TRUE;
123
124 for (i = 0; i < num_pde;) {
125 if (((nvaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG && cpu_64bit) {
126
127 npde = pmap64_pdpt(subord, nvaddr);
128
129 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
130 PMAP_UNLOCK(subord);
131 pmap_expand_pdpt(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE);
132 PMAP_LOCK(subord);
133 npde = pmap64_pdpt(subord, nvaddr);
134 }
135 *npde |= INTEL_PDPTE_NESTED;
136 nvaddr += NBPDPT;
137 i += (uint32_t)NPDEPG;
138 }
139 else {
140 npde = pmap_pde(subord, nvaddr);
141
142 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
143 PMAP_UNLOCK(subord);
144 pmap_expand(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE);
145 PMAP_LOCK(subord);
146 npde = pmap_pde(subord, nvaddr);
147 }
148 nvaddr += NBPDE;
149 i++;
150 }
151 }
152
153 PMAP_UNLOCK(subord);
154
155 vaddr = (vm_map_offset_t)va_start;
156
157 PMAP_LOCK(grand);
158
159 for (i = 0;i < num_pde;) {
160 pd_entry_t tpde;
161
162 if (((vaddr & PDPTMASK) == 0) && ((num_pde - i) >= NPDEPG) && cpu_64bit) {
163 npde = pmap64_pdpt(subord, vaddr);
164 if (npde == 0)
165 panic("pmap_nest: no PDPT, subord %p nstart 0x%llx", subord, vaddr);
166 tpde = *npde;
167 pde = pmap64_pdpt(grand, vaddr);
168 if (0 == pde) {
169 PMAP_UNLOCK(grand);
170 pmap_expand_pml4(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
171 PMAP_LOCK(grand);
172 pde = pmap64_pdpt(grand, vaddr);
173 }
174 if (pde == 0)
175 panic("pmap_nest: no PDPT, grand %p vaddr 0x%llx", grand, vaddr);
176 pmap_store_pte(pde, tpde);
177 vaddr += NBPDPT;
178 i += (uint32_t) NPDEPG;
179 }
180 else {
181 npde = pmap_pde(subord, nstart);
182 if (npde == 0)
183 panic("pmap_nest: no npde, subord %p nstart 0x%llx", subord, nstart);
184 tpde = *npde;
185 nstart += NBPDE;
186 pde = pmap_pde(grand, vaddr);
187 if ((0 == pde) && cpu_64bit) {
188 PMAP_UNLOCK(grand);
189 pmap_expand_pdpt(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
190 PMAP_LOCK(grand);
191 pde = pmap_pde(grand, vaddr);
192 }
193
194 if (pde == 0)
195 panic("pmap_nest: no pde, grand %p vaddr 0x%llx", grand, vaddr);
196 vaddr += NBPDE;
197 pmap_store_pte(pde, tpde);
198 i++;
199 }
200 }
201
202 PMAP_UNLOCK(grand);
203
204 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
205
206 return KERN_SUCCESS;
207 }
208
209 /*
210 * kern_return_t pmap_unnest(grand, vaddr)
211 *
212 * grand = the pmap that we will un-nest subord from
213 * vaddr = start of range in pmap to be unnested
214 *
215 * Removes a pmap from another. This is used to implement shared segments.
216 */
217
218 kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
219
220 pd_entry_t *pde;
221 unsigned int i;
222 uint64_t num_pde;
223 addr64_t va_start, va_end;
224 uint64_t npdpt = PMAP_INVALID_PDPTNUM;
225
226 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START,
227 (uintptr_t) grand,
228 (uintptr_t) (vaddr>>32), (uintptr_t) vaddr, 0, 0);
229
230 if ((size & (pmap_nesting_size_min-1)) ||
231 (vaddr & (pmap_nesting_size_min-1))) {
232 panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
233 grand, vaddr, size);
234 }
235
236 assert(!is_ept_pmap(grand));
237
238 /* align everything to PDE boundaries */
239 va_start = vaddr & ~(NBPDE-1);
240 va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE-1);
241 size = va_end - va_start;
242
243 PMAP_LOCK(grand);
244
245 num_pde = size >> PDESHIFT;
246 vaddr = va_start;
247
248 for (i = 0; i < num_pde; ) {
249 if ((pdptnum(grand, vaddr) != npdpt) && cpu_64bit) {
250 npdpt = pdptnum(grand, vaddr);
251 pde = pmap64_pdpt(grand, vaddr);
252 if (pde && (*pde & INTEL_PDPTE_NESTED)) {
253 pmap_store_pte(pde, (pd_entry_t)0);
254 i += (uint32_t) NPDEPG;
255 vaddr += NBPDPT;
256 continue;
257 }
258 }
259 pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
260 if (pde == 0)
261 panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr);
262 pmap_store_pte(pde, (pd_entry_t)0);
263 i++;
264 vaddr += NBPDE;
265 }
266
267 PMAP_UPDATE_TLBS(grand, va_start, va_end);
268
269 PMAP_UNLOCK(grand);
270
271 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
272
273 return KERN_SUCCESS;
274 }
275
276 kern_return_t
277 pmap_unnest_options(
278 pmap_t grand,
279 addr64_t vaddr,
280 __unused uint64_t size,
281 __unused unsigned int options) {
282 return pmap_unnest(grand, vaddr, size);
283 }
284
285 /* Invoked by the Mach VM to determine the platform specific unnest region */
286
287 boolean_t pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) {
288 pd_entry_t *pdpte;
289 boolean_t rval = FALSE;
290
291 if (!cpu_64bit)
292 return rval;
293
294 PMAP_LOCK(p);
295
296 pdpte = pmap64_pdpt(p, *s);
297 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
298 *s &= ~(NBPDPT -1);
299 rval = TRUE;
300 }
301
302 pdpte = pmap64_pdpt(p, *e);
303 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
304 *e = ((*e + NBPDPT) & ~(NBPDPT -1));
305 rval = TRUE;
306 }
307
308 PMAP_UNLOCK(p);
309
310 return rval;
311 }
312
313 /*
314 * pmap_find_phys returns the (4K) physical page number containing a
315 * given virtual address in a given pmap.
316 * Note that pmap_pte may return a pde if this virtual address is
317 * mapped by a large page and this is taken into account in order
318 * to return the correct page number in this case.
319 */
320 ppnum_t
321 pmap_find_phys(pmap_t pmap, addr64_t va)
322 {
323 pt_entry_t *ptp;
324 pd_entry_t *pdep;
325 ppnum_t ppn = 0;
326 pd_entry_t pde;
327 pt_entry_t pte;
328 boolean_t is_ept;
329
330 is_ept = is_ept_pmap(pmap);
331
332 mp_disable_preemption();
333
334 /* This refcount test is a band-aid--several infrastructural changes
335 * are necessary to eliminate invocation of this routine from arbitrary
336 * contexts.
337 */
338
339 if (!pmap->ref_count)
340 goto pfp_exit;
341
342 pdep = pmap_pde(pmap, va);
343
344 if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) {
345 if (pde & PTE_PS) {
346 ppn = (ppnum_t) i386_btop(pte_to_pa(pde));
347 ppn += (ppnum_t) ptenum(va);
348 }
349 else {
350 ptp = pmap_pte(pmap, va);
351 if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) {
352 ppn = (ppnum_t) i386_btop(pte_to_pa(pte));
353 }
354 }
355 }
356 pfp_exit:
357 mp_enable_preemption();
358
359 return ppn;
360 }
361
362 /*
363 * Update cache attributes for all extant managed mappings.
364 * Assumes PV for this page is locked, and that the page
365 * is managed. We assume that this physical page may be mapped in
366 * both EPT and normal Intel PTEs, so we convert the attributes
367 * to the corresponding format for each pmap.
368 *
369 * We assert that the passed set of attributes is a subset of the
370 * PHYS_CACHEABILITY_MASK.
371 */
372 void
373 pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) {
374 pv_rooted_entry_t pv_h, pv_e;
375 pv_hashed_entry_t pvh_e, nexth;
376 vm_map_offset_t vaddr;
377 pmap_t pmap;
378 pt_entry_t *ptep;
379 boolean_t is_ept;
380 unsigned ept_attributes;
381
382 assert(IS_MANAGED_PAGE(pn));
383 assert(((~PHYS_CACHEABILITY_MASK) & attributes) == 0);
384
385 /* We don't support the PTA bit for EPT PTEs */
386 if (attributes & INTEL_PTE_NCACHE)
387 ept_attributes = INTEL_EPT_NCACHE;
388 else
389 ept_attributes = INTEL_EPT_WB;
390
391 pv_h = pai_to_pvh(pn);
392 /* TODO: translate the PHYS_* bits to PTE bits, while they're
393 * currently identical, they may not remain so
394 * Potential optimization (here and in page_protect),
395 * parallel shootdowns, check for redundant
396 * attribute modifications.
397 */
398
399 /*
400 * Alter attributes on all mappings
401 */
402 if (pv_h->pmap != PMAP_NULL) {
403 pv_e = pv_h;
404 pvh_e = (pv_hashed_entry_t)pv_e;
405
406 do {
407 pmap = pv_e->pmap;
408 vaddr = pv_e->va;
409 ptep = pmap_pte(pmap, vaddr);
410
411 if (0 == ptep)
412 panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
413
414 is_ept = is_ept_pmap(pmap);
415
416 nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
417 if (!is_ept) {
418 pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
419 } else {
420 pmap_update_pte(ptep, INTEL_EPT_CACHE_MASK, ept_attributes);
421 }
422 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
423 pvh_e = nexth;
424 } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
425 }
426 }
427
428 void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
429 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
430
431 if (dofilter) {
432 CPU_CR3_MARK_INACTIVE();
433 } else {
434 CPU_CR3_MARK_ACTIVE();
435 mfence();
436 if (current_cpu_datap()->cpu_tlb_invalid)
437 process_pmap_updates();
438 }
439 }
440
441
442 /*
443 * Insert the given physical page (p) at
444 * the specified virtual address (v) in the
445 * target physical map with the protection requested.
446 *
447 * If specified, the page will be wired down, meaning
448 * that the related pte cannot be reclaimed.
449 *
450 * NB: This is the only routine which MAY NOT lazy-evaluate
451 * or lose information. That is, this routine must actually
452 * insert this page into the given map NOW.
453 */
454
455 void
456 pmap_enter(
457 register pmap_t pmap,
458 vm_map_offset_t vaddr,
459 ppnum_t pn,
460 vm_prot_t prot,
461 vm_prot_t fault_type,
462 unsigned int flags,
463 boolean_t wired)
464 {
465 (void) pmap_enter_options(pmap, vaddr, pn, prot, fault_type, flags, wired, PMAP_EXPAND_OPTIONS_NONE, NULL);
466 }
467
468
469 kern_return_t
470 pmap_enter_options(
471 register pmap_t pmap,
472 vm_map_offset_t vaddr,
473 ppnum_t pn,
474 vm_prot_t prot,
475 __unused vm_prot_t fault_type,
476 unsigned int flags,
477 boolean_t wired,
478 unsigned int options,
479 void *arg)
480 {
481 pt_entry_t *pte;
482 pv_rooted_entry_t pv_h;
483 ppnum_t pai;
484 pv_hashed_entry_t pvh_e;
485 pv_hashed_entry_t pvh_new;
486 pt_entry_t template;
487 pmap_paddr_t old_pa;
488 pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn);
489 boolean_t need_tlbflush = FALSE;
490 boolean_t set_NX;
491 char oattr;
492 boolean_t old_pa_locked;
493 /* 2MiB mappings are confined to x86_64 by VM */
494 boolean_t superpage = flags & VM_MEM_SUPERPAGE;
495 vm_object_t delpage_pm_obj = NULL;
496 uint64_t delpage_pde_index = 0;
497 pt_entry_t old_pte;
498 kern_return_t kr_expand;
499 boolean_t is_ept;
500
501 pmap_intr_assert();
502
503 if (pmap == PMAP_NULL)
504 return KERN_INVALID_ARGUMENT;
505
506 is_ept = is_ept_pmap(pmap);
507
508 /* N.B. We can be supplied a zero page frame in the NOENTER case, it's an
509 * unused value for that scenario.
510 */
511 assert(pn != vm_page_fictitious_addr);
512
513 if (pn == vm_page_guard_addr)
514 return KERN_INVALID_ARGUMENT;
515
516 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
517 pmap,
518 (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
519 pn, prot);
520
521 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
522 set_NX = FALSE;
523 else
524 set_NX = TRUE;
525
526 if (__improbable(set_NX && (pmap == kernel_pmap) && ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) || (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) {
527 set_NX = FALSE;
528 }
529
530 /*
531 * Must allocate a new pvlist entry while we're unlocked;
532 * zalloc may cause pageout (which will lock the pmap system).
533 * If we determine we need a pvlist entry, we will unlock
534 * and allocate one. Then we will retry, throughing away
535 * the allocated entry later (if we no longer need it).
536 */
537
538 pvh_new = PV_HASHED_ENTRY_NULL;
539 Retry:
540 pvh_e = PV_HASHED_ENTRY_NULL;
541
542 PMAP_LOCK(pmap);
543
544 /*
545 * Expand pmap to include this pte. Assume that
546 * pmap is always expanded to include enough hardware
547 * pages to map one VM page.
548 */
549 if(superpage) {
550 while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
551 /* need room for another pde entry */
552 PMAP_UNLOCK(pmap);
553 kr_expand = pmap_expand_pdpt(pmap, vaddr, options);
554 if (kr_expand != KERN_SUCCESS)
555 return kr_expand;
556 PMAP_LOCK(pmap);
557 }
558 } else {
559 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
560 /*
561 * Must unlock to expand the pmap
562 * going to grow pde level page(s)
563 */
564 PMAP_UNLOCK(pmap);
565 kr_expand = pmap_expand(pmap, vaddr, options);
566 if (kr_expand != KERN_SUCCESS)
567 return kr_expand;
568 PMAP_LOCK(pmap);
569 }
570 }
571 if (options & PMAP_EXPAND_OPTIONS_NOENTER) {
572 PMAP_UNLOCK(pmap);
573 return KERN_SUCCESS;
574 }
575
576 if (superpage && *pte && !(*pte & PTE_PS)) {
577 /*
578 * There is still an empty page table mapped that
579 * was used for a previous base page mapping.
580 * Remember the PDE and the PDE index, so that we
581 * can free the page at the end of this function.
582 */
583 delpage_pde_index = pdeidx(pmap, vaddr);
584 delpage_pm_obj = pmap->pm_obj;
585 *pte = 0;
586 }
587
588 old_pa = pte_to_pa(*pte);
589 pai = pa_index(old_pa);
590 old_pa_locked = FALSE;
591
592 if (old_pa == 0 &&
593 (*pte & PTE_COMPRESSED)) {
594 /* one less "compressed" */
595 OSAddAtomic64(-1, &pmap->stats.compressed);
596 /* marker will be cleared below */
597 }
598
599 /*
600 * if we have a previous managed page, lock the pv entry now. after
601 * we lock it, check to see if someone beat us to the lock and if so
602 * drop the lock
603 */
604 if ((0 != old_pa) && IS_MANAGED_PAGE(pai)) {
605 LOCK_PVH(pai);
606 old_pa_locked = TRUE;
607 old_pa = pte_to_pa(*pte);
608 if (0 == old_pa) {
609 UNLOCK_PVH(pai); /* another path beat us to it */
610 old_pa_locked = FALSE;
611 }
612 }
613
614 /*
615 * Special case if the incoming physical page is already mapped
616 * at this address.
617 */
618 if (old_pa == pa) {
619 pt_entry_t old_attributes =
620 *pte & ~(PTE_REF(is_ept) | PTE_MOD(is_ept));
621
622 /*
623 * May be changing its wired attribute or protection
624 */
625
626 template = pa_to_pte(pa);
627
628 /* ?: WORTH ASSERTING THAT AT LEAST ONE RWX (implicit valid) PASSED FOR EPT? */
629 if (!is_ept) {
630 template |= INTEL_PTE_VALID;
631 } else {
632 template |= INTEL_EPT_IPTA;
633 }
634
635 template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
636
637 /*
638 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
639 */
640 if (!is_ept && (VM_MEM_NOT_CACHEABLE ==
641 (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)))) {
642 if (!(flags & VM_MEM_GUARDED))
643 template |= INTEL_PTE_PTA;
644 template |= INTEL_PTE_NCACHE;
645 }
646 if (pmap != kernel_pmap && !is_ept)
647 template |= INTEL_PTE_USER;
648
649 if (prot & VM_PROT_READ)
650 template |= PTE_READ(is_ept);
651
652 if (prot & VM_PROT_WRITE) {
653 template |= PTE_WRITE(is_ept);
654 if (is_ept && !pmap_ept_support_ad) {
655 template |= PTE_MOD(is_ept);
656 if (old_pa_locked) {
657 assert(IS_MANAGED_PAGE(pai));
658 pmap_phys_attributes[pai] |= PHYS_MODIFIED;
659 }
660 }
661 }
662 if (prot & VM_PROT_EXECUTE) {
663 assert(set_NX == 0);
664 template = pte_set_ex(template, is_ept);
665 }
666
667 if (set_NX)
668 template = pte_remove_ex(template, is_ept);
669
670 if (wired) {
671 template |= PTE_WIRED;
672 if (!iswired(old_attributes)) {
673 OSAddAtomic(+1, &pmap->stats.wired_count);
674 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
675 }
676 } else {
677 if (iswired(old_attributes)) {
678 assert(pmap->stats.wired_count >= 1);
679 OSAddAtomic(-1, &pmap->stats.wired_count);
680 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
681 }
682 }
683
684 if (superpage) /* this path can not be used */
685 template |= PTE_PS; /* to change the page size! */
686
687 if (old_attributes == template)
688 goto dont_update_pte;
689
690 /* Determine delta, PV locked */
691 need_tlbflush =
692 ((old_attributes ^ template) != PTE_WIRED);
693
694 if (need_tlbflush == TRUE && !(old_attributes & PTE_WRITE(is_ept))) {
695 if ((old_attributes ^ template) == PTE_WRITE(is_ept))
696 need_tlbflush = FALSE;
697 }
698
699 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
700 if (is_ept && !pmap_ept_support_ad) {
701 template |= PTE_REF(is_ept);
702 if (old_pa_locked) {
703 assert(IS_MANAGED_PAGE(pai));
704 pmap_phys_attributes[pai] |= PHYS_REFERENCED;
705 }
706 }
707
708 /* store modified PTE and preserve RC bits */
709 pt_entry_t npte, opte;;
710 do {
711 opte = *pte;
712 npte = template | (opte & (PTE_REF(is_ept) | PTE_MOD(is_ept)));
713 } while (!pmap_cmpx_pte(pte, opte, npte));
714 dont_update_pte:
715 if (old_pa_locked) {
716 UNLOCK_PVH(pai);
717 old_pa_locked = FALSE;
718 }
719 goto Done;
720 }
721
722 /*
723 * Outline of code from here:
724 * 1) If va was mapped, update TLBs, remove the mapping
725 * and remove old pvlist entry.
726 * 2) Add pvlist entry for new mapping
727 * 3) Enter new mapping.
728 *
729 * If the old physical page is not managed step 1) is skipped
730 * (except for updating the TLBs), and the mapping is
731 * overwritten at step 3). If the new physical page is not
732 * managed, step 2) is skipped.
733 */
734
735 if (old_pa != (pmap_paddr_t) 0) {
736
737 /*
738 * Don't do anything to pages outside valid memory here.
739 * Instead convince the code that enters a new mapping
740 * to overwrite the old one.
741 */
742
743 /* invalidate the PTE */
744 pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
745 /* propagate invalidate everywhere */
746 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
747 /* remember reference and change */
748 old_pte = *pte;
749 oattr = (char) (old_pte & (PTE_MOD(is_ept) | PTE_REF(is_ept)));
750 /* completely invalidate the PTE */
751 pmap_store_pte(pte, 0);
752
753 if (IS_MANAGED_PAGE(pai)) {
754 pmap_assert(old_pa_locked == TRUE);
755 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
756 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
757 assert(pmap->stats.resident_count >= 1);
758 OSAddAtomic(-1, &pmap->stats.resident_count);
759 if (pmap != kernel_pmap) {
760 if (IS_REUSABLE_PAGE(pai)) {
761 assert(pmap->stats.reusable > 0);
762 OSAddAtomic(-1, &pmap->stats.reusable);
763 } else if (IS_INTERNAL_PAGE(pai)) {
764 assert(pmap->stats.internal > 0);
765 OSAddAtomic(-1, &pmap->stats.internal);
766 } else {
767 assert(pmap->stats.external > 0);
768 OSAddAtomic(-1, &pmap->stats.external);
769 }
770 }
771 if (iswired(*pte)) {
772 assert(pmap->stats.wired_count >= 1);
773 OSAddAtomic(-1, &pmap->stats.wired_count);
774 pmap_ledger_debit(pmap, task_ledgers.wired_mem,
775 PAGE_SIZE);
776 }
777
778 if (!is_ept) {
779 pmap_phys_attributes[pai] |= oattr;
780 } else {
781 pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr);
782 }
783
784 /*
785 * Remove the mapping from the pvlist for
786 * this physical page.
787 * We'll end up with either a rooted pv or a
788 * hashed pv
789 */
790 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte);
791
792 } else {
793
794 /*
795 * old_pa is not managed.
796 * Do removal part of accounting.
797 */
798
799 if (pmap != kernel_pmap) {
800 #if 00
801 assert(pmap->stats.device > 0);
802 OSAddAtomic(-1, &pmap->stats.device);
803 #endif
804 }
805 if (iswired(*pte)) {
806 assert(pmap->stats.wired_count >= 1);
807 OSAddAtomic(-1, &pmap->stats.wired_count);
808 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
809 }
810 }
811 }
812
813 /*
814 * if we had a previously managed paged locked, unlock it now
815 */
816 if (old_pa_locked) {
817 UNLOCK_PVH(pai);
818 old_pa_locked = FALSE;
819 }
820
821 pai = pa_index(pa); /* now working with new incoming phys page */
822 if (IS_MANAGED_PAGE(pai)) {
823
824 /*
825 * Step 2) Enter the mapping in the PV list for this
826 * physical page.
827 */
828 pv_h = pai_to_pvh(pai);
829
830 LOCK_PVH(pai);
831
832 if (pv_h->pmap == PMAP_NULL) {
833 /*
834 * No mappings yet, use rooted pv
835 */
836 pv_h->va = vaddr;
837 pv_h->pmap = pmap;
838 queue_init(&pv_h->qlink);
839
840 if (options & PMAP_OPTIONS_INTERNAL) {
841 pmap_phys_attributes[pai] |= PHYS_INTERNAL;
842 } else {
843 pmap_phys_attributes[pai] &= ~PHYS_INTERNAL;
844 }
845 if (options & PMAP_OPTIONS_REUSABLE) {
846 pmap_phys_attributes[pai] |= PHYS_REUSABLE;
847 } else {
848 pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
849 }
850 } else {
851 /*
852 * Add new pv_hashed_entry after header.
853 */
854 if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
855 pvh_e = pvh_new;
856 pvh_new = PV_HASHED_ENTRY_NULL;
857 } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
858 PV_HASHED_ALLOC(&pvh_e);
859 if (PV_HASHED_ENTRY_NULL == pvh_e) {
860 /*
861 * the pv list is empty. if we are on
862 * the kernel pmap we'll use one of
863 * the special private kernel pv_e's,
864 * else, we need to unlock
865 * everything, zalloc a pv_e, and
866 * restart bringing in the pv_e with
867 * us.
868 */
869 if (kernel_pmap == pmap) {
870 PV_HASHED_KERN_ALLOC(&pvh_e);
871 } else {
872 UNLOCK_PVH(pai);
873 PMAP_UNLOCK(pmap);
874 pmap_pv_throttle(pmap);
875 pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
876 goto Retry;
877 }
878 }
879 }
880
881 if (PV_HASHED_ENTRY_NULL == pvh_e)
882 panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings");
883
884 pvh_e->va = vaddr;
885 pvh_e->pmap = pmap;
886 pvh_e->ppn = pn;
887 pv_hash_add(pvh_e, pv_h);
888
889 /*
890 * Remember that we used the pvlist entry.
891 */
892 pvh_e = PV_HASHED_ENTRY_NULL;
893 }
894
895 /*
896 * only count the mapping
897 * for 'managed memory'
898 */
899 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
900 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
901 OSAddAtomic(+1, &pmap->stats.resident_count);
902 if (pmap->stats.resident_count > pmap->stats.resident_max) {
903 pmap->stats.resident_max = pmap->stats.resident_count;
904 }
905 if (pmap != kernel_pmap) {
906 if (IS_REUSABLE_PAGE(pai)) {
907 OSAddAtomic(+1, &pmap->stats.reusable);
908 PMAP_STATS_PEAK(pmap->stats.reusable);
909 } else if (IS_INTERNAL_PAGE(pai)) {
910 OSAddAtomic(+1, &pmap->stats.internal);
911 PMAP_STATS_PEAK(pmap->stats.internal);
912 } else {
913 OSAddAtomic(+1, &pmap->stats.external);
914 PMAP_STATS_PEAK(pmap->stats.external);
915 }
916 }
917 } else if (last_managed_page == 0) {
918 /* Account for early mappings created before "managed pages"
919 * are determined. Consider consulting the available DRAM map.
920 */
921 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
922 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
923 OSAddAtomic(+1, &pmap->stats.resident_count);
924 if (pmap != kernel_pmap) {
925 #if 00
926 OSAddAtomic(+1, &pmap->stats.device);
927 PMAP_STATS_PEAK(pmap->stats.device);
928 #endif
929 }
930 }
931 /*
932 * Step 3) Enter the mapping.
933 *
934 * Build a template to speed up entering -
935 * only the pfn changes.
936 */
937 template = pa_to_pte(pa);
938
939 if (!is_ept) {
940 template |= INTEL_PTE_VALID;
941 } else {
942 template |= INTEL_EPT_IPTA;
943 }
944
945
946 /*
947 * DRK: It may be worth asserting on cache attribute flags that diverge
948 * from the existing physical page attributes.
949 */
950
951 template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
952
953 /*
954 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
955 */
956 if (!is_ept && (flags & VM_MEM_NOT_CACHEABLE)) {
957 if (!(flags & VM_MEM_GUARDED))
958 template |= INTEL_PTE_PTA;
959 template |= INTEL_PTE_NCACHE;
960 }
961 if (pmap != kernel_pmap && !is_ept)
962 template |= INTEL_PTE_USER;
963 if (prot & VM_PROT_READ)
964 template |= PTE_READ(is_ept);
965 if (prot & VM_PROT_WRITE) {
966 template |= PTE_WRITE(is_ept);
967 if (is_ept && !pmap_ept_support_ad) {
968 template |= PTE_MOD(is_ept);
969 if (IS_MANAGED_PAGE(pai))
970 pmap_phys_attributes[pai] |= PHYS_MODIFIED;
971 }
972 }
973 if (prot & VM_PROT_EXECUTE) {
974 assert(set_NX == 0);
975 template = pte_set_ex(template, is_ept);
976 }
977
978 if (set_NX)
979 template = pte_remove_ex(template, is_ept);
980 if (wired) {
981 template |= INTEL_PTE_WIRED;
982 OSAddAtomic(+1, & pmap->stats.wired_count);
983 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
984 }
985 if (superpage)
986 template |= INTEL_PTE_PS;
987
988 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
989 if (is_ept && !pmap_ept_support_ad) {
990 template |= PTE_REF(is_ept);
991 if (IS_MANAGED_PAGE(pai))
992 pmap_phys_attributes[pai] |= PHYS_REFERENCED;
993 }
994
995 pmap_store_pte(pte, template);
996
997 /*
998 * if this was a managed page we delayed unlocking the pv until here
999 * to prevent pmap_page_protect et al from finding it until the pte
1000 * has been stored
1001 */
1002 if (IS_MANAGED_PAGE(pai)) {
1003 UNLOCK_PVH(pai);
1004 }
1005 Done:
1006 if (need_tlbflush == TRUE) {
1007 if (options & PMAP_OPTIONS_NOFLUSH)
1008 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
1009 else
1010 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
1011 }
1012 if (pvh_e != PV_HASHED_ENTRY_NULL) {
1013 PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
1014 }
1015 if (pvh_new != PV_HASHED_ENTRY_NULL) {
1016 PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
1017 }
1018 PMAP_UNLOCK(pmap);
1019
1020 if (delpage_pm_obj) {
1021 vm_page_t m;
1022
1023 vm_object_lock(delpage_pm_obj);
1024 m = vm_page_lookup(delpage_pm_obj, (delpage_pde_index * PAGE_SIZE));
1025 if (m == VM_PAGE_NULL)
1026 panic("pmap_enter: pte page not in object");
1027 VM_PAGE_FREE(m);
1028 vm_object_unlock(delpage_pm_obj);
1029 OSAddAtomic(-1, &inuse_ptepages_count);
1030 PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
1031 }
1032
1033 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
1034 return KERN_SUCCESS;
1035 }
1036
1037 /*
1038 * Remove a range of hardware page-table entries.
1039 * The entries given are the first (inclusive)
1040 * and last (exclusive) entries for the VM pages.
1041 * The virtual address is the va for the first pte.
1042 *
1043 * The pmap must be locked.
1044 * If the pmap is not the kernel pmap, the range must lie
1045 * entirely within one pte-page. This is NOT checked.
1046 * Assumes that the pte-page exists.
1047 */
1048
1049 void
1050 pmap_remove_range(
1051 pmap_t pmap,
1052 vm_map_offset_t start_vaddr,
1053 pt_entry_t *spte,
1054 pt_entry_t *epte)
1055 {
1056 pmap_remove_range_options(pmap, start_vaddr, spte, epte,
1057 PMAP_OPTIONS_REMOVE);
1058 }
1059
1060 void
1061 pmap_remove_range_options(
1062 pmap_t pmap,
1063 vm_map_offset_t start_vaddr,
1064 pt_entry_t *spte,
1065 pt_entry_t *epte,
1066 int options)
1067 {
1068 pt_entry_t *cpte;
1069 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
1070 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
1071 pv_hashed_entry_t pvh_e;
1072 int pvh_cnt = 0;
1073 int num_removed, num_unwired, num_found, num_invalid;
1074 int num_device, num_external, num_internal, num_reusable;
1075 uint64_t num_compressed;
1076 ppnum_t pai;
1077 pmap_paddr_t pa;
1078 vm_map_offset_t vaddr;
1079 boolean_t is_ept = is_ept_pmap(pmap);
1080
1081 num_removed = 0;
1082 num_unwired = 0;
1083 num_found = 0;
1084 num_invalid = 0;
1085 num_device = 0;
1086 num_external = 0;
1087 num_internal = 0;
1088 num_reusable = 0;
1089 num_compressed = 0;
1090 /* invalidate the PTEs first to "freeze" them */
1091 for (cpte = spte, vaddr = start_vaddr;
1092 cpte < epte;
1093 cpte++, vaddr += PAGE_SIZE_64) {
1094 pt_entry_t p = *cpte;
1095
1096 pa = pte_to_pa(p);
1097 if (pa == 0) {
1098 if (pmap != kernel_pmap &&
1099 (options & PMAP_OPTIONS_REMOVE) &&
1100 (p & PTE_COMPRESSED)) {
1101 /* one less "compressed" */
1102 num_compressed++;
1103 /* clear marker */
1104 /* XXX probably does not need to be atomic! */
1105 pmap_update_pte(cpte, PTE_COMPRESSED, 0);
1106 }
1107 continue;
1108 }
1109 num_found++;
1110
1111 if (iswired(p))
1112 num_unwired++;
1113
1114 pai = pa_index(pa);
1115
1116 if (!IS_MANAGED_PAGE(pai)) {
1117 /*
1118 * Outside range of managed physical memory.
1119 * Just remove the mappings.
1120 */
1121 pmap_store_pte(cpte, 0);
1122 num_device++;
1123 continue;
1124 }
1125
1126 if ((p & PTE_VALID_MASK(is_ept)) == 0)
1127 num_invalid++;
1128
1129 /* invalidate the PTE */
1130 pmap_update_pte(cpte, PTE_VALID_MASK(is_ept), 0);
1131 }
1132
1133 if (num_found == 0) {
1134 /* nothing was changed: we're done */
1135 goto update_counts;
1136 }
1137
1138 /* propagate the invalidates to other CPUs */
1139
1140 PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
1141
1142 for (cpte = spte, vaddr = start_vaddr;
1143 cpte < epte;
1144 cpte++, vaddr += PAGE_SIZE_64) {
1145
1146 pa = pte_to_pa(*cpte);
1147 if (pa == 0)
1148 continue;
1149
1150 pai = pa_index(pa);
1151
1152 LOCK_PVH(pai);
1153
1154 pa = pte_to_pa(*cpte);
1155 if (pa == 0) {
1156 UNLOCK_PVH(pai);
1157 continue;
1158 }
1159 num_removed++;
1160 if (IS_REUSABLE_PAGE(pai)) {
1161 num_reusable++;
1162 } else if (IS_INTERNAL_PAGE(pai)) {
1163 num_internal++;
1164 } else {
1165 num_external++;
1166 }
1167
1168 /*
1169 * Get the modify and reference bits, then
1170 * nuke the entry in the page table
1171 */
1172 /* remember reference and change */
1173 if (!is_ept) {
1174 pmap_phys_attributes[pai] |=
1175 *cpte & (PHYS_MODIFIED | PHYS_REFERENCED);
1176 } else {
1177 pmap_phys_attributes[pai] |=
1178 ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
1179 }
1180
1181 /*
1182 * Remove the mapping from the pvlist for this physical page.
1183 */
1184 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte);
1185
1186 /* completely invalidate the PTE */
1187 pmap_store_pte(cpte, 0);
1188
1189 UNLOCK_PVH(pai);
1190
1191 if (pvh_e != PV_HASHED_ENTRY_NULL) {
1192 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1193 pvh_eh = pvh_e;
1194
1195 if (pvh_et == PV_HASHED_ENTRY_NULL) {
1196 pvh_et = pvh_e;
1197 }
1198 pvh_cnt++;
1199 }
1200 } /* for loop */
1201
1202 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
1203 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
1204 }
1205 update_counts:
1206 /*
1207 * Update the counts
1208 */
1209 #if TESTING
1210 if (pmap->stats.resident_count < num_removed)
1211 panic("pmap_remove_range: resident_count");
1212 #endif
1213 pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed));
1214 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(num_removed));
1215 assert(pmap->stats.resident_count >= num_removed);
1216 OSAddAtomic(-num_removed, &pmap->stats.resident_count);
1217
1218 if (pmap != kernel_pmap) {
1219 #if 00
1220 assert(pmap->stats.device >= num_device);
1221 if (num_device)
1222 OSAddAtomic(-num_device, &pmap->stats.device);
1223 #endif /* 00 */
1224 assert(pmap->stats.external >= num_external);
1225 if (num_external)
1226 OSAddAtomic(-num_external, &pmap->stats.external);
1227 assert(pmap->stats.internal >= num_internal);
1228 if (num_internal)
1229 OSAddAtomic(-num_internal, &pmap->stats.internal);
1230 assert(pmap->stats.reusable >= num_reusable);
1231 if (num_reusable)
1232 OSAddAtomic(-num_reusable, &pmap->stats.reusable);
1233 assert(pmap->stats.compressed >= num_compressed);
1234 if (num_compressed)
1235 OSAddAtomic64(-num_compressed, &pmap->stats.compressed);
1236 }
1237
1238 #if TESTING
1239 if (pmap->stats.wired_count < num_unwired)
1240 panic("pmap_remove_range: wired_count");
1241 #endif
1242 assert(pmap->stats.wired_count >= num_unwired);
1243 OSAddAtomic(-num_unwired, &pmap->stats.wired_count);
1244 pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired));
1245
1246 return;
1247 }
1248
1249
1250 /*
1251 * Remove the given range of addresses
1252 * from the specified map.
1253 *
1254 * It is assumed that the start and end are properly
1255 * rounded to the hardware page size.
1256 */
1257 void
1258 pmap_remove(
1259 pmap_t map,
1260 addr64_t s64,
1261 addr64_t e64)
1262 {
1263 pmap_remove_options(map, s64, e64, PMAP_OPTIONS_REMOVE);
1264 }
1265
1266 void
1267 pmap_remove_options(
1268 pmap_t map,
1269 addr64_t s64,
1270 addr64_t e64,
1271 int options)
1272 {
1273 pt_entry_t *pde;
1274 pt_entry_t *spte, *epte;
1275 addr64_t l64;
1276 uint64_t deadline;
1277 boolean_t is_ept;
1278
1279 pmap_intr_assert();
1280
1281 if (map == PMAP_NULL || s64 == e64)
1282 return;
1283
1284 is_ept = is_ept_pmap(map);
1285
1286 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
1287 map,
1288 (uint32_t) (s64 >> 32), s64,
1289 (uint32_t) (e64 >> 32), e64);
1290
1291
1292 PMAP_LOCK(map);
1293
1294 #if 0
1295 /*
1296 * Check that address range in the kernel does not overlap the stacks.
1297 * We initialize local static min/max variables once to avoid making
1298 * 2 function calls for every remove. Note also that these functions
1299 * both return 0 before kernel stacks have been initialized, and hence
1300 * the panic is not triggered in this case.
1301 */
1302 if (map == kernel_pmap) {
1303 static vm_offset_t kernel_stack_min = 0;
1304 static vm_offset_t kernel_stack_max = 0;
1305
1306 if (kernel_stack_min == 0) {
1307 kernel_stack_min = min_valid_stack_address();
1308 kernel_stack_max = max_valid_stack_address();
1309 }
1310 if ((kernel_stack_min <= s64 && s64 < kernel_stack_max) ||
1311 (kernel_stack_min < e64 && e64 <= kernel_stack_max))
1312 panic("pmap_remove() attempted in kernel stack");
1313 }
1314 #else
1315
1316 /*
1317 * The values of kernel_stack_min and kernel_stack_max are no longer
1318 * relevant now that we allocate kernel stacks in the kernel map,
1319 * so the old code above no longer applies. If we wanted to check that
1320 * we weren't removing a mapping of a page in a kernel stack we'd
1321 * mark the PTE with an unused bit and check that here.
1322 */
1323
1324 #endif
1325
1326 deadline = rdtsc64() + max_preemption_latency_tsc;
1327
1328 while (s64 < e64) {
1329 l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
1330 if (l64 > e64)
1331 l64 = e64;
1332 pde = pmap_pde(map, s64);
1333
1334 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
1335 if (*pde & PTE_PS) {
1336 /*
1337 * If we're removing a superpage, pmap_remove_range()
1338 * must work on level 2 instead of level 1; and we're
1339 * only passing a single level 2 entry instead of a
1340 * level 1 range.
1341 */
1342 spte = pde;
1343 epte = spte+1; /* excluded */
1344 } else {
1345 spte = pmap_pte(map, (s64 & ~(pde_mapped_size - 1)));
1346 spte = &spte[ptenum(s64)];
1347 epte = &spte[intel_btop(l64 - s64)];
1348 }
1349 pmap_remove_range_options(map, s64, spte, epte,
1350 options);
1351 }
1352 s64 = l64;
1353
1354 if (s64 < e64 && rdtsc64() >= deadline) {
1355 PMAP_UNLOCK(map)
1356 /* TODO: Rapid release/reacquisition can defeat
1357 * the "backoff" intent here; either consider a
1358 * fair spinlock, or a scheme whereby each lock
1359 * attempt marks the processor as within a spinlock
1360 * acquisition, and scan CPUs here to determine
1361 * if a backoff is necessary, to avoid sacrificing
1362 * performance in the common case.
1363 */
1364 PMAP_LOCK(map)
1365 deadline = rdtsc64() + max_preemption_latency_tsc;
1366 }
1367 }
1368
1369 PMAP_UNLOCK(map);
1370
1371 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END,
1372 map, 0, 0, 0, 0);
1373
1374 }
1375
1376 void
1377 pmap_page_protect(
1378 ppnum_t pn,
1379 vm_prot_t prot)
1380 {
1381 pmap_page_protect_options(pn, prot, 0, NULL);
1382 }
1383
1384 /*
1385 * Routine: pmap_page_protect_options
1386 *
1387 * Function:
1388 * Lower the permission for all mappings to a given
1389 * page.
1390 */
1391 void
1392 pmap_page_protect_options(
1393 ppnum_t pn,
1394 vm_prot_t prot,
1395 unsigned int options,
1396 void *arg)
1397 {
1398 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
1399 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
1400 pv_hashed_entry_t nexth;
1401 int pvh_cnt = 0;
1402 pv_rooted_entry_t pv_h;
1403 pv_rooted_entry_t pv_e;
1404 pv_hashed_entry_t pvh_e;
1405 pt_entry_t *pte;
1406 int pai;
1407 pmap_t pmap;
1408 boolean_t remove;
1409 pt_entry_t new_pte_value;
1410 boolean_t is_ept;
1411
1412 pmap_intr_assert();
1413 assert(pn != vm_page_fictitious_addr);
1414 if (pn == vm_page_guard_addr)
1415 return;
1416
1417 pai = ppn_to_pai(pn);
1418
1419 if (!IS_MANAGED_PAGE(pai)) {
1420 /*
1421 * Not a managed page.
1422 */
1423 return;
1424 }
1425 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START,
1426 pn, prot, 0, 0, 0);
1427
1428 /*
1429 * Determine the new protection.
1430 */
1431 switch (prot) {
1432 case VM_PROT_READ:
1433 case VM_PROT_READ | VM_PROT_EXECUTE:
1434 remove = FALSE;
1435 break;
1436 case VM_PROT_ALL:
1437 return; /* nothing to do */
1438 default:
1439 remove = TRUE;
1440 break;
1441 }
1442
1443 pv_h = pai_to_pvh(pai);
1444
1445 LOCK_PVH(pai);
1446
1447
1448 /*
1449 * Walk down PV list, if any, changing or removing all mappings.
1450 */
1451 if (pv_h->pmap == PMAP_NULL)
1452 goto done;
1453
1454 pv_e = pv_h;
1455 pvh_e = (pv_hashed_entry_t) pv_e; /* cheat */
1456
1457 do {
1458 vm_map_offset_t vaddr;
1459
1460 if ((options & PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
1461 (pmap_phys_attributes[pai] & PHYS_MODIFIED)) {
1462 /* page was modified, so it will be compressed */
1463 options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1464 options |= PMAP_OPTIONS_COMPRESSOR;
1465 }
1466
1467 pmap = pv_e->pmap;
1468 is_ept = is_ept_pmap(pmap);
1469 vaddr = pv_e->va;
1470 pte = pmap_pte(pmap, vaddr);
1471
1472 pmap_assert2((pa_index(pte_to_pa(*pte)) == pn),
1473 "pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
1474
1475 if (0 == pte) {
1476 panic("pmap_page_protect() "
1477 "pmap=%p pn=0x%x vaddr=0x%llx\n",
1478 pmap, pn, vaddr);
1479 }
1480 nexth = (pv_hashed_entry_t) queue_next(&pvh_e->qlink);
1481
1482 /*
1483 * Remove the mapping if new protection is NONE
1484 */
1485 if (remove) {
1486
1487 /* Remove per-pmap wired count */
1488 if (iswired(*pte)) {
1489 OSAddAtomic(-1, &pmap->stats.wired_count);
1490 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
1491 }
1492
1493 if (pmap != kernel_pmap &&
1494 (options & PMAP_OPTIONS_COMPRESSOR) &&
1495 IS_INTERNAL_PAGE(pai)) {
1496 /* mark this PTE as having been "reclaimed" */
1497 new_pte_value = PTE_COMPRESSED;
1498 } else {
1499 new_pte_value = 0;
1500 }
1501
1502 if (options & PMAP_OPTIONS_NOREFMOD) {
1503 pmap_store_pte(pte, new_pte_value);
1504
1505 if (options & PMAP_OPTIONS_NOFLUSH)
1506 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
1507 else
1508 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
1509 } else {
1510 /*
1511 * Remove the mapping, collecting dirty bits.
1512 */
1513 pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
1514
1515 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1516 if ((options &
1517 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
1518 ! (pmap_phys_attributes[pai] &
1519 PHYS_MODIFIED) &&
1520 (*pte & PHYS_MODIFIED)) {
1521 /*
1522 * Page is actually "modified" and
1523 * will be compressed. Start
1524 * accounting for it as "compressed".
1525 */
1526 options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1527 options |= PMAP_OPTIONS_COMPRESSOR;
1528 new_pte_value = PTE_COMPRESSED;
1529 }
1530 if (!is_ept) {
1531 pmap_phys_attributes[pai] |=
1532 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1533 } else {
1534 pmap_phys_attributes[pai] |=
1535 ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
1536 }
1537 pmap_store_pte(pte, new_pte_value);
1538 }
1539
1540 if (new_pte_value == PTE_COMPRESSED) {
1541 /* one more "compressed" page */
1542 OSAddAtomic64(+1, &pmap->stats.compressed);
1543 PMAP_STATS_PEAK(pmap->stats.compressed);
1544 pmap->stats.compressed_lifetime++;
1545 }
1546
1547 #if TESTING
1548 if (pmap->stats.resident_count < 1)
1549 panic("pmap_page_protect: resident_count");
1550 #endif
1551 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
1552 assert(pmap->stats.resident_count >= 1);
1553 OSAddAtomic(-1, &pmap->stats.resident_count);
1554 if (options & PMAP_OPTIONS_COMPRESSOR) {
1555 /*
1556 * This removal is only being done so we can send this page to
1557 * the compressor; therefore it mustn't affect total task footprint.
1558 */
1559 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
1560 } else {
1561 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
1562 }
1563
1564 if (pmap != kernel_pmap) {
1565 if (IS_REUSABLE_PAGE(pai)) {
1566 assert(pmap->stats.reusable > 0);
1567 OSAddAtomic(-1, &pmap->stats.reusable);
1568 } else if (IS_INTERNAL_PAGE(pai)) {
1569 assert(pmap->stats.internal > 0);
1570 OSAddAtomic(-1, &pmap->stats.internal);
1571 } else {
1572 assert(pmap->stats.external > 0);
1573 OSAddAtomic(-1, &pmap->stats.external);
1574 }
1575 }
1576
1577 /*
1578 * Deal with the pv_rooted_entry.
1579 */
1580
1581 if (pv_e == pv_h) {
1582 /*
1583 * Fix up head later.
1584 */
1585 pv_h->pmap = PMAP_NULL;
1586 } else {
1587 /*
1588 * Delete this entry.
1589 */
1590 pv_hash_remove(pvh_e);
1591 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1592 pvh_eh = pvh_e;
1593
1594 if (pvh_et == PV_HASHED_ENTRY_NULL)
1595 pvh_et = pvh_e;
1596 pvh_cnt++;
1597 }
1598 } else {
1599 /*
1600 * Write-protect, after opportunistic refmod collect
1601 */
1602 if (!is_ept) {
1603 pmap_phys_attributes[pai] |=
1604 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1605 } else {
1606 pmap_phys_attributes[pai] |=
1607 ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
1608 }
1609 pmap_update_pte(pte, PTE_WRITE(is_ept), 0);
1610
1611 if (options & PMAP_OPTIONS_NOFLUSH)
1612 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
1613 else
1614 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1615 }
1616 pvh_e = nexth;
1617 } while ((pv_e = (pv_rooted_entry_t) nexth) != pv_h);
1618
1619
1620 /*
1621 * If pv_head mapping was removed, fix it up.
1622 */
1623 if (pv_h->pmap == PMAP_NULL) {
1624 pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
1625
1626 if (pvh_e != (pv_hashed_entry_t) pv_h) {
1627 pv_hash_remove(pvh_e);
1628 pv_h->pmap = pvh_e->pmap;
1629 pv_h->va = pvh_e->va;
1630 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1631 pvh_eh = pvh_e;
1632
1633 if (pvh_et == PV_HASHED_ENTRY_NULL)
1634 pvh_et = pvh_e;
1635 pvh_cnt++;
1636 }
1637 }
1638 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
1639 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
1640 }
1641 done:
1642 UNLOCK_PVH(pai);
1643
1644 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END,
1645 0, 0, 0, 0, 0);
1646 }
1647
1648
1649 /*
1650 * Clear specified attribute bits.
1651 */
1652 void
1653 phys_attribute_clear(
1654 ppnum_t pn,
1655 int bits,
1656 unsigned int options,
1657 void *arg)
1658 {
1659 pv_rooted_entry_t pv_h;
1660 pv_hashed_entry_t pv_e;
1661 pt_entry_t *pte;
1662 int pai;
1663 pmap_t pmap;
1664 char attributes = 0;
1665 boolean_t is_internal, is_reusable, is_ept;
1666 int ept_bits_to_clear;
1667 boolean_t ept_keep_global_mod = FALSE;
1668
1669 if ((bits & PHYS_MODIFIED) &&
1670 (options & PMAP_OPTIONS_NOFLUSH) &&
1671 arg == NULL) {
1672 panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): "
1673 "should not clear 'modified' without flushing TLBs\n",
1674 pn, bits, options, arg);
1675 }
1676
1677 /* We only support converting MOD and REF bits for EPT PTEs in this function */
1678 assert((bits & ~(PHYS_REFERENCED | PHYS_MODIFIED)) == 0);
1679
1680 ept_bits_to_clear = (unsigned)physmap_refmod_to_ept(bits & (PHYS_MODIFIED | PHYS_REFERENCED));
1681
1682 pmap_intr_assert();
1683 assert(pn != vm_page_fictitious_addr);
1684 if (pn == vm_page_guard_addr)
1685 return;
1686
1687 pai = ppn_to_pai(pn);
1688
1689 if (!IS_MANAGED_PAGE(pai)) {
1690 /*
1691 * Not a managed page.
1692 */
1693 return;
1694 }
1695
1696 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START,
1697 pn, bits, 0, 0, 0);
1698
1699 pv_h = pai_to_pvh(pai);
1700
1701 LOCK_PVH(pai);
1702
1703
1704 /*
1705 * Walk down PV list, clearing all modify or reference bits.
1706 * We do not have to lock the pv_list because we have
1707 * the per-pmap lock
1708 */
1709 if (pv_h->pmap != PMAP_NULL) {
1710 /*
1711 * There are some mappings.
1712 */
1713
1714 is_internal = IS_INTERNAL_PAGE(pai);
1715 is_reusable = IS_REUSABLE_PAGE(pai);
1716
1717 pv_e = (pv_hashed_entry_t)pv_h;
1718
1719 do {
1720 vm_map_offset_t va;
1721 char pte_bits;
1722
1723 pmap = pv_e->pmap;
1724 is_ept = is_ept_pmap(pmap);
1725 va = pv_e->va;
1726 pte_bits = 0;
1727
1728 if (bits) {
1729 pte = pmap_pte(pmap, va);
1730 /* grab ref/mod bits from this PTE */
1731 pte_bits = (*pte & (PTE_REF(is_ept) | PTE_MOD(is_ept)));
1732 /* propagate to page's global attributes */
1733 if (!is_ept) {
1734 attributes |= pte_bits;
1735 } else {
1736 attributes |= ept_refmod_to_physmap(pte_bits);
1737 if (!pmap_ept_support_ad && (pte_bits & INTEL_EPT_MOD)) {
1738 ept_keep_global_mod = TRUE;
1739 }
1740 }
1741 /* which bits to clear for this PTE? */
1742 if (!is_ept) {
1743 pte_bits &= bits;
1744 } else {
1745 pte_bits &= ept_bits_to_clear;
1746 }
1747 }
1748
1749 /*
1750 * Clear modify and/or reference bits.
1751 */
1752 if (pte_bits) {
1753 pmap_update_pte(pte, bits, 0);
1754
1755 /* Ensure all processors using this translation
1756 * invalidate this TLB entry. The invalidation
1757 * *must* follow the PTE update, to ensure that
1758 * the TLB shadow of the 'D' bit (in particular)
1759 * is synchronized with the updated PTE.
1760 */
1761 if (! (options & PMAP_OPTIONS_NOFLUSH)) {
1762 /* flush TLBS now */
1763 PMAP_UPDATE_TLBS(pmap,
1764 va,
1765 va + PAGE_SIZE);
1766 } else if (arg) {
1767 /* delayed TLB flush: add "pmap" info */
1768 PMAP_UPDATE_TLBS_DELAYED(
1769 pmap,
1770 va,
1771 va + PAGE_SIZE,
1772 (pmap_flush_context *)arg);
1773 } else {
1774 /* no TLB flushing at all */
1775 }
1776 }
1777
1778 /* update pmap "reusable" stats */
1779 if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) &&
1780 is_reusable &&
1781 pmap != kernel_pmap) {
1782 /* one less "reusable" */
1783 assert(pmap->stats.reusable > 0);
1784 OSAddAtomic(-1, &pmap->stats.reusable);
1785 if (is_internal) {
1786 /* one more "internal" */
1787 OSAddAtomic(+1, &pmap->stats.internal);
1788 PMAP_STATS_PEAK(pmap->stats.internal);
1789 } else {
1790 /* one more "external" */
1791 OSAddAtomic(+1, &pmap->stats.external);
1792 PMAP_STATS_PEAK(pmap->stats.external);
1793 }
1794 } else if ((options & PMAP_OPTIONS_SET_REUSABLE) &&
1795 !is_reusable &&
1796 pmap != kernel_pmap) {
1797 /* one more "reusable" */
1798 OSAddAtomic(+1, &pmap->stats.reusable);
1799 PMAP_STATS_PEAK(pmap->stats.reusable);
1800 if (is_internal) {
1801 /* one less "internal" */
1802 assert(pmap->stats.internal > 0);
1803 OSAddAtomic(-1, &pmap->stats.internal);
1804 } else {
1805 /* one less "external" */
1806 assert(pmap->stats.external > 0);
1807 OSAddAtomic(-1, &pmap->stats.external);
1808 }
1809 }
1810
1811 pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
1812
1813 } while (pv_e != (pv_hashed_entry_t)pv_h);
1814 }
1815 /* Opportunistic refmod collection, annulled
1816 * if both REF and MOD are being cleared.
1817 */
1818
1819 pmap_phys_attributes[pai] |= attributes;
1820
1821 if (ept_keep_global_mod) {
1822 /*
1823 * If the hardware doesn't support AD bits for EPT PTEs and someone is
1824 * requesting that we clear the modified bit for a phys page, we need
1825 * to ensure that there are no EPT mappings for the page with the
1826 * modified bit set. If there are, we cannot clear the global modified bit.
1827 */
1828 bits &= ~PHYS_MODIFIED;
1829 }
1830 pmap_phys_attributes[pai] &= ~(bits);
1831
1832 /* update this page's "reusable" status */
1833 if (options & PMAP_OPTIONS_CLEAR_REUSABLE) {
1834 pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
1835 } else if (options & PMAP_OPTIONS_SET_REUSABLE) {
1836 pmap_phys_attributes[pai] |= PHYS_REUSABLE;
1837 }
1838
1839 UNLOCK_PVH(pai);
1840
1841 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END,
1842 0, 0, 0, 0, 0);
1843 }
1844
1845 /*
1846 * Check specified attribute bits.
1847 */
1848 int
1849 phys_attribute_test(
1850 ppnum_t pn,
1851 int bits)
1852 {
1853 pv_rooted_entry_t pv_h;
1854 pv_hashed_entry_t pv_e;
1855 pt_entry_t *pte;
1856 int pai;
1857 pmap_t pmap;
1858 int attributes = 0;
1859 boolean_t is_ept;
1860
1861 pmap_intr_assert();
1862 assert(pn != vm_page_fictitious_addr);
1863 assert((bits & ~(PHYS_MODIFIED | PHYS_REFERENCED)) == 0);
1864 if (pn == vm_page_guard_addr)
1865 return 0;
1866
1867 pai = ppn_to_pai(pn);
1868
1869 if (!IS_MANAGED_PAGE(pai)) {
1870 /*
1871 * Not a managed page.
1872 */
1873 return 0;
1874 }
1875
1876 /*
1877 * Fast check... if bits already collected
1878 * no need to take any locks...
1879 * if not set, we need to recheck after taking
1880 * the lock in case they got pulled in while
1881 * we were waiting for the lock
1882 */
1883 if ((pmap_phys_attributes[pai] & bits) == bits)
1884 return bits;
1885
1886 pv_h = pai_to_pvh(pai);
1887
1888 LOCK_PVH(pai);
1889
1890 attributes = pmap_phys_attributes[pai] & bits;
1891
1892
1893 /*
1894 * Walk down PV list, checking the mappings until we
1895 * reach the end or we've found the desired attributes.
1896 */
1897 if (attributes != bits &&
1898 pv_h->pmap != PMAP_NULL) {
1899 /*
1900 * There are some mappings.
1901 */
1902 pv_e = (pv_hashed_entry_t)pv_h;
1903 do {
1904 vm_map_offset_t va;
1905
1906 pmap = pv_e->pmap;
1907 is_ept = is_ept_pmap(pmap);
1908 va = pv_e->va;
1909 /*
1910 * pick up modify and/or reference bits from mapping
1911 */
1912
1913 pte = pmap_pte(pmap, va);
1914 if (!is_ept) {
1915 attributes |= (int)(*pte & bits);
1916 } else {
1917 attributes |= (int)(ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED));
1918
1919 }
1920
1921 pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
1922
1923 } while ((attributes != bits) &&
1924 (pv_e != (pv_hashed_entry_t)pv_h));
1925 }
1926 pmap_phys_attributes[pai] |= attributes;
1927
1928 UNLOCK_PVH(pai);
1929 return (attributes);
1930 }
1931
1932 /*
1933 * Routine: pmap_change_wiring
1934 * Function: Change the wiring attribute for a map/virtual-address
1935 * pair.
1936 * In/out conditions:
1937 * The mapping must already exist in the pmap.
1938 */
1939 void
1940 pmap_change_wiring(
1941 pmap_t map,
1942 vm_map_offset_t vaddr,
1943 boolean_t wired)
1944 {
1945 pt_entry_t *pte;
1946
1947 PMAP_LOCK(map);
1948
1949 if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
1950 panic("pmap_change_wiring(%p,0x%llx,%d): pte missing",
1951 map, vaddr, wired);
1952
1953 if (wired && !iswired(*pte)) {
1954 /*
1955 * wiring down mapping
1956 */
1957 pmap_ledger_credit(map, task_ledgers.wired_mem, PAGE_SIZE);
1958 OSAddAtomic(+1, &map->stats.wired_count);
1959 pmap_update_pte(pte, 0, PTE_WIRED);
1960 }
1961 else if (!wired && iswired(*pte)) {
1962 /*
1963 * unwiring mapping
1964 */
1965 assert(map->stats.wired_count >= 1);
1966 OSAddAtomic(-1, &map->stats.wired_count);
1967 pmap_ledger_debit(map, task_ledgers.wired_mem, PAGE_SIZE);
1968 pmap_update_pte(pte, PTE_WIRED, 0);
1969 }
1970
1971 PMAP_UNLOCK(map);
1972 }
1973
1974 /*
1975 * "Backdoor" direct map routine for early mappings.
1976 * Useful for mapping memory outside the range
1977 * Sets A, D and NC if requested
1978 */
1979
1980 vm_offset_t
1981 pmap_map_bd(
1982 vm_offset_t virt,
1983 vm_map_offset_t start_addr,
1984 vm_map_offset_t end_addr,
1985 vm_prot_t prot,
1986 unsigned int flags)
1987 {
1988 pt_entry_t template;
1989 pt_entry_t *pte;
1990 spl_t spl;
1991 vm_offset_t base = virt;
1992 template = pa_to_pte(start_addr)
1993 | INTEL_PTE_REF
1994 | INTEL_PTE_MOD
1995 | INTEL_PTE_WIRED
1996 | INTEL_PTE_VALID;
1997
1998 if ((flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) == VM_MEM_NOT_CACHEABLE) {
1999 template |= INTEL_PTE_NCACHE;
2000 if (!(flags & (VM_MEM_GUARDED)))
2001 template |= INTEL_PTE_PTA;
2002 }
2003
2004 #if defined(__x86_64__)
2005 if ((prot & VM_PROT_EXECUTE) == 0)
2006 template |= INTEL_PTE_NX;
2007 #endif
2008
2009 if (prot & VM_PROT_WRITE)
2010 template |= INTEL_PTE_WRITE;
2011
2012 while (start_addr < end_addr) {
2013 spl = splhigh();
2014 pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
2015 if (pte == PT_ENTRY_NULL) {
2016 panic("pmap_map_bd: Invalid kernel address\n");
2017 }
2018 pmap_store_pte(pte, template);
2019 splx(spl);
2020 pte_increment_pa(template);
2021 virt += PAGE_SIZE;
2022 start_addr += PAGE_SIZE;
2023 }
2024 flush_tlb_raw();
2025 PMAP_UPDATE_TLBS(kernel_pmap, base, base + end_addr - start_addr);
2026 return(virt);
2027 }
2028
2029 mach_vm_size_t
2030 pmap_query_resident(
2031 pmap_t pmap,
2032 addr64_t s64,
2033 addr64_t e64,
2034 mach_vm_size_t *compressed_bytes_p)
2035 {
2036 pt_entry_t *pde;
2037 pt_entry_t *spte, *epte;
2038 addr64_t l64;
2039 uint64_t deadline;
2040 mach_vm_size_t resident_bytes;
2041 mach_vm_size_t compressed_bytes;
2042 boolean_t is_ept;
2043
2044 pmap_intr_assert();
2045
2046 if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64) {
2047 if (compressed_bytes_p) {
2048 *compressed_bytes_p = 0;
2049 }
2050 return 0;
2051 }
2052
2053 is_ept = is_ept_pmap(pmap);
2054
2055 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START,
2056 pmap,
2057 (uint32_t) (s64 >> 32), s64,
2058 (uint32_t) (e64 >> 32), e64);
2059
2060 resident_bytes = 0;
2061 compressed_bytes = 0;
2062
2063 PMAP_LOCK(pmap);
2064
2065 deadline = rdtsc64() + max_preemption_latency_tsc;
2066
2067 while (s64 < e64) {
2068 l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
2069 if (l64 > e64)
2070 l64 = e64;
2071 pde = pmap_pde(pmap, s64);
2072
2073 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
2074 if (*pde & PTE_PS) {
2075 /* superpage: not supported */
2076 } else {
2077 spte = pmap_pte(pmap,
2078 (s64 & ~(pde_mapped_size - 1)));
2079 spte = &spte[ptenum(s64)];
2080 epte = &spte[intel_btop(l64 - s64)];
2081
2082 for (; spte < epte; spte++) {
2083 if (pte_to_pa(*spte) != 0) {
2084 resident_bytes += PAGE_SIZE;
2085 } else if (*spte & PTE_COMPRESSED) {
2086 compressed_bytes += PAGE_SIZE;
2087 }
2088 }
2089
2090 }
2091 }
2092 s64 = l64;
2093
2094 if (s64 < e64 && rdtsc64() >= deadline) {
2095 PMAP_UNLOCK(pmap);
2096 PMAP_LOCK(pmap);
2097 deadline = rdtsc64() + max_preemption_latency_tsc;
2098 }
2099 }
2100
2101 PMAP_UNLOCK(pmap);
2102
2103 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END,
2104 pmap, 0, 0, 0, 0);
2105
2106 if (compressed_bytes_p) {
2107 *compressed_bytes_p = compressed_bytes;
2108 }
2109 return resident_bytes;
2110 }
2111
2112 #if MACH_ASSERT
2113 void
2114 pmap_set_process(
2115 __unused pmap_t pmap,
2116 __unused int pid,
2117 __unused char *procname)
2118 {
2119 }
2120 #endif /* MACH_ASSERT */