]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap_x86_common.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_x86_common.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <vm/pmap.h>
29#include <vm/vm_map.h>
30#include <i386/pmap_internal.h>
b7266188
A
31
32
33void pmap_remove_range(
34 pmap_t pmap,
35 vm_map_offset_t va,
36 pt_entry_t *spte,
37 pt_entry_t *epte);
38
39pv_rooted_entry_t pv_head_table; /* array of entries, one per
40 * page */
41thread_call_t mapping_adjust_call;
42static thread_call_data_t mapping_adjust_call_data;
43uint32_t mappingrecurse = 0;
44
45pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
46uint32_t pmap_pagetable_corruption_incidents;
47uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
48uint64_t pmap_pagetable_corruption_interval_abstime;
49thread_call_t pmap_pagetable_corruption_log_call;
50static thread_call_data_t pmap_pagetable_corruption_log_call_data;
51boolean_t pmap_pagetable_corruption_timeout = FALSE;
52
b0d623f7
A
53/*
54 * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
55 * on a NBPDE boundary.
56 */
57
58/* These symbols may be referenced directly by VM */
59uint64_t pmap_nesting_size_min = NBPDE;
60uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE;
61
62/*
63 * kern_return_t pmap_nest(grand, subord, va_start, size)
64 *
65 * grand = the pmap that we will nest subord into
66 * subord = the pmap that goes into the grand
67 * va_start = start of range in pmap to be inserted
68 * nstart = start of range in pmap nested pmap
69 * size = Size of nest area (up to 16TB)
70 *
71 * Inserts a pmap into another. This is used to implement shared segments.
72 *
73 * Note that we depend upon higher level VM locks to insure that things don't change while
74 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
75 * or do 2 nests at once.
76 */
77
78/*
79 * This routine can nest subtrees either at the PDPT level (1GiB) or at the
80 * PDE level (2MiB). We currently disallow disparate offsets for the "subord"
81 * container and the "grand" parent. A minor optimization to consider for the
82 * future: make the "subord" truly a container rather than a full-fledged
83 * pagetable hierarchy which can be unnecessarily sparse (DRK).
84 */
85
86kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint64_t size) {
87 vm_map_offset_t vaddr, nvaddr;
88 pd_entry_t *pde,*npde;
89 unsigned int i;
90 uint64_t num_pde;
91
92 if ((size & (pmap_nesting_size_min-1)) ||
93 (va_start & (pmap_nesting_size_min-1)) ||
94 (nstart & (pmap_nesting_size_min-1)) ||
95 ((size >> 28) > 65536)) /* Max size we can nest is 16TB */
96 return KERN_INVALID_VALUE;
97
98 if(size == 0) {
99 panic("pmap_nest: size is invalid - %016llX\n", size);
100 }
101
102 if (va_start != nstart)
103 panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start, nstart);
104
105 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
106 (int) grand, (int) subord,
107 (int) (va_start>>32), (int) va_start, 0);
108
109 nvaddr = (vm_map_offset_t)nstart;
110 num_pde = size >> PDESHIFT;
111
112 PMAP_LOCK(subord);
113
114 subord->pm_shared = TRUE;
115
116 for (i = 0; i < num_pde;) {
117 if (((nvaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG && cpu_64bit) {
118
119 npde = pmap64_pdpt(subord, nvaddr);
120
121 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
122 PMAP_UNLOCK(subord);
123 pmap_expand_pdpt(subord, nvaddr);
124 PMAP_LOCK(subord);
125 npde = pmap64_pdpt(subord, nvaddr);
126 }
127 *npde |= INTEL_PDPTE_NESTED;
128 nvaddr += NBPDPT;
129 i += (uint32_t)NPDEPG;
130 }
131 else {
132 npde = pmap_pde(subord, nvaddr);
133
134 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
135 PMAP_UNLOCK(subord);
136 pmap_expand(subord, nvaddr);
137 PMAP_LOCK(subord);
138 npde = pmap_pde(subord, nvaddr);
139 }
140 nvaddr += NBPDE;
141 i++;
142 }
143 }
144
145 PMAP_UNLOCK(subord);
146
147 vaddr = (vm_map_offset_t)va_start;
148
149 PMAP_LOCK(grand);
150
151 for (i = 0;i < num_pde;) {
152 pd_entry_t tpde;
153
154 if (((vaddr & PDPTMASK) == 0) && ((num_pde - i) >= NPDEPG) && cpu_64bit) {
155 npde = pmap64_pdpt(subord, vaddr);
156 if (npde == 0)
157 panic("pmap_nest: no PDPT, subord %p nstart 0x%llx", subord, vaddr);
158 tpde = *npde;
159 pde = pmap64_pdpt(grand, vaddr);
160 if (0 == pde) {
161 PMAP_UNLOCK(grand);
162 pmap_expand_pml4(grand, vaddr);
163 PMAP_LOCK(grand);
164 pde = pmap64_pdpt(grand, vaddr);
165 }
166 if (pde == 0)
167 panic("pmap_nest: no PDPT, grand %p vaddr 0x%llx", grand, vaddr);
168 pmap_store_pte(pde, tpde);
169 vaddr += NBPDPT;
170 i += (uint32_t) NPDEPG;
171 }
172 else {
173 npde = pmap_pde(subord, nstart);
174 if (npde == 0)
175 panic("pmap_nest: no npde, subord %p nstart 0x%llx", subord, nstart);
176 tpde = *npde;
177 nstart += NBPDE;
178 pde = pmap_pde(grand, vaddr);
179 if ((0 == pde) && cpu_64bit) {
180 PMAP_UNLOCK(grand);
181 pmap_expand_pdpt(grand, vaddr);
182 PMAP_LOCK(grand);
183 pde = pmap_pde(grand, vaddr);
184 }
185
186 if (pde == 0)
187 panic("pmap_nest: no pde, grand %p vaddr 0x%llx", grand, vaddr);
188 vaddr += NBPDE;
189 pmap_store_pte(pde, tpde);
190 i++;
191 }
192 }
193
194 PMAP_UNLOCK(grand);
195
196 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
197
198 return KERN_SUCCESS;
199}
200
201/*
202 * kern_return_t pmap_unnest(grand, vaddr)
203 *
204 * grand = the pmap that we will un-nest subord from
205 * vaddr = start of range in pmap to be unnested
206 *
207 * Removes a pmap from another. This is used to implement shared segments.
208 */
209
210kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) {
211
212 pd_entry_t *pde;
213 unsigned int i;
214 uint64_t num_pde;
215 addr64_t va_start, va_end;
216 uint64_t npdpt = PMAP_INVALID_PDPTNUM;
217
218 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START,
219 (int) grand,
220 (int) (vaddr>>32), (int) vaddr, 0, 0);
221
222 if ((size & (pmap_nesting_size_min-1)) ||
223 (vaddr & (pmap_nesting_size_min-1))) {
224 panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
225 grand, vaddr, size);
226 }
227
228 /* align everything to PDE boundaries */
229 va_start = vaddr & ~(NBPDE-1);
230 va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE-1);
231 size = va_end - va_start;
232
233 PMAP_LOCK(grand);
234
235 num_pde = size >> PDESHIFT;
236 vaddr = va_start;
237
238 for (i = 0; i < num_pde; ) {
239 if ((pdptnum(grand, vaddr) != npdpt) && cpu_64bit) {
240 npdpt = pdptnum(grand, vaddr);
241 pde = pmap64_pdpt(grand, vaddr);
242 if (pde && (*pde & INTEL_PDPTE_NESTED)) {
243 pmap_store_pte(pde, (pd_entry_t)0);
244 i += (uint32_t) NPDEPG;
245 vaddr += NBPDPT;
246 continue;
247 }
248 }
249 pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
250 if (pde == 0)
251 panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr);
252 pmap_store_pte(pde, (pd_entry_t)0);
253 i++;
254 vaddr += NBPDE;
255 }
256
257 PMAP_UPDATE_TLBS(grand, va_start, va_end);
258
259 PMAP_UNLOCK(grand);
260
261 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_END, 0, 0, 0, 0, 0);
262
263 return KERN_SUCCESS;
264}
265
266/* Invoked by the Mach VM to determine the platform specific unnest region */
267
268boolean_t pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) {
269 pd_entry_t *pdpte;
270 boolean_t rval = FALSE;
271
272 if (!cpu_64bit)
273 return rval;
274
275 PMAP_LOCK(p);
276
277 pdpte = pmap64_pdpt(p, *s);
278 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
279 *s &= ~(NBPDPT -1);
280 rval = TRUE;
281 }
282
283 pdpte = pmap64_pdpt(p, *e);
284 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
285 *e = ((*e + NBPDPT) & ~(NBPDPT -1));
286 rval = TRUE;
287 }
288
289 PMAP_UNLOCK(p);
290
291 return rval;
292}
293
294/*
295 * pmap_find_phys returns the (4K) physical page number containing a
296 * given virtual address in a given pmap.
297 * Note that pmap_pte may return a pde if this virtual address is
298 * mapped by a large page and this is taken into account in order
299 * to return the correct page number in this case.
300 */
301ppnum_t
302pmap_find_phys(pmap_t pmap, addr64_t va)
303{
304 pt_entry_t *ptp;
305 pd_entry_t *pdep;
306 ppnum_t ppn = 0;
307 pd_entry_t pde;
308 pt_entry_t pte;
309
310 mp_disable_preemption();
311
312 /* This refcount test is a band-aid--several infrastructural changes
313 * are necessary to eliminate invocation of this routine from arbitrary
314 * contexts.
315 */
316
317 if (!pmap->ref_count)
318 goto pfp_exit;
319
320 pdep = pmap_pde(pmap, va);
321
322 if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & INTEL_PTE_VALID)) {
323 if (pde & INTEL_PTE_PS) {
324 ppn = (ppnum_t) i386_btop(pte_to_pa(pde));
325 ppn += (ppnum_t) ptenum(va);
326 }
327 else {
328 ptp = pmap_pte(pmap, va);
329 if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & INTEL_PTE_VALID) != 0)) {
330 ppn = (ppnum_t) i386_btop(pte_to_pa(pte));
331 }
332 }
333 }
334pfp_exit:
335 mp_enable_preemption();
336
337 return ppn;
338}
339
b7266188
A
340/*
341 * Insert the given physical page (p) at
342 * the specified virtual address (v) in the
343 * target physical map with the protection requested.
344 *
345 * If specified, the page will be wired down, meaning
346 * that the related pte cannot be reclaimed.
347 *
348 * NB: This is the only routine which MAY NOT lazy-evaluate
349 * or lose information. That is, this routine must actually
350 * insert this page into the given map NOW.
351 */
352void
353pmap_enter(
354 register pmap_t pmap,
355 vm_map_offset_t vaddr,
356 ppnum_t pn,
357 vm_prot_t prot,
358 unsigned int flags,
359 boolean_t wired)
360{
361 pt_entry_t *pte;
362 pv_rooted_entry_t pv_h;
363 int pai;
364 pv_hashed_entry_t pvh_e;
365 pv_hashed_entry_t pvh_new;
366 pt_entry_t template;
367 pmap_paddr_t old_pa;
368 pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn);
369 boolean_t need_tlbflush = FALSE;
370 boolean_t set_NX;
371 char oattr;
372 boolean_t old_pa_locked;
373 /* 2MiB mappings are confined to x86_64 by VM */
374 boolean_t superpage = flags & VM_MEM_SUPERPAGE;
375 vm_object_t delpage_pm_obj = NULL;
376 int delpage_pde_index = 0;
377 pt_entry_t old_pte;
378
379 pmap_intr_assert();
380 assert(pn != vm_page_fictitious_addr);
381
382 if (pmap == PMAP_NULL)
383 return;
384 if (pn == vm_page_guard_addr)
385 return;
386
387 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
388 pmap,
389 (uint32_t) (vaddr >> 32), (uint32_t) vaddr,
390 pn, prot);
391
392 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
393 set_NX = FALSE;
394 else
395 set_NX = TRUE;
396
397 /*
398 * Must allocate a new pvlist entry while we're unlocked;
399 * zalloc may cause pageout (which will lock the pmap system).
400 * If we determine we need a pvlist entry, we will unlock
401 * and allocate one. Then we will retry, throughing away
402 * the allocated entry later (if we no longer need it).
403 */
404
405 pvh_new = PV_HASHED_ENTRY_NULL;
406Retry:
407 pvh_e = PV_HASHED_ENTRY_NULL;
408
409 PMAP_LOCK(pmap);
410
411 /*
412 * Expand pmap to include this pte. Assume that
413 * pmap is always expanded to include enough hardware
414 * pages to map one VM page.
415 */
416 if(superpage) {
417 while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
418 /* need room for another pde entry */
419 PMAP_UNLOCK(pmap);
420 pmap_expand_pdpt(pmap, vaddr);
421 PMAP_LOCK(pmap);
422 }
423 } else {
424 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
425 /*
426 * Must unlock to expand the pmap
427 * going to grow pde level page(s)
428 */
429 PMAP_UNLOCK(pmap);
430 pmap_expand(pmap, vaddr);
431 PMAP_LOCK(pmap);
432 }
433 }
434
435 if (superpage && *pte && !(*pte & INTEL_PTE_PS)) {
436 /*
437 * There is still an empty page table mapped that
438 * was used for a previous base page mapping.
439 * Remember the PDE and the PDE index, so that we
440 * can free the page at the end of this function.
441 */
442 delpage_pde_index = (int)pdeidx(pmap, vaddr);
443 delpage_pm_obj = pmap->pm_obj;
444 *pte = 0;
445 }
446
447
448 old_pa = pte_to_pa(*pte);
449 pai = pa_index(old_pa);
450 old_pa_locked = FALSE;
451
452 /*
453 * if we have a previous managed page, lock the pv entry now. after
454 * we lock it, check to see if someone beat us to the lock and if so
455 * drop the lock
456 */
457 if ((0 != old_pa) && IS_MANAGED_PAGE(pai)) {
458 LOCK_PVH(pai);
459 old_pa_locked = TRUE;
460 old_pa = pte_to_pa(*pte);
461 if (0 == old_pa) {
462 UNLOCK_PVH(pai); /* another path beat us to it */
463 old_pa_locked = FALSE;
464 }
465 }
466
467 /*
468 * Special case if the incoming physical page is already mapped
469 * at this address.
470 */
471 if (old_pa == pa) {
472
473 /*
474 * May be changing its wired attribute or protection
475 */
476
477 template = pa_to_pte(pa) | INTEL_PTE_VALID;
478
479 if (VM_MEM_NOT_CACHEABLE ==
480 (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT))) {
481 if (!(flags & VM_MEM_GUARDED))
482 template |= INTEL_PTE_PTA;
483 template |= INTEL_PTE_NCACHE;
484 }
485 if (pmap != kernel_pmap)
486 template |= INTEL_PTE_USER;
487 if (prot & VM_PROT_WRITE)
488 template |= INTEL_PTE_WRITE;
489
490 if (set_NX)
491 template |= INTEL_PTE_NX;
492
493 if (wired) {
494 template |= INTEL_PTE_WIRED;
495 if (!iswired(*pte))
496 OSAddAtomic(+1,
497 &pmap->stats.wired_count);
498 } else {
499 if (iswired(*pte)) {
500 assert(pmap->stats.wired_count >= 1);
501 OSAddAtomic(-1,
502 &pmap->stats.wired_count);
503 }
504 }
505 if (superpage) /* this path can not be used */
506 template |= INTEL_PTE_PS; /* to change the page size! */
507
508 /* store modified PTE and preserve RC bits */
509 pmap_update_pte(pte, *pte,
510 template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
511 if (old_pa_locked) {
512 UNLOCK_PVH(pai);
513 old_pa_locked = FALSE;
514 }
515 need_tlbflush = TRUE;
516 goto Done;
517 }
518
519 /*
520 * Outline of code from here:
521 * 1) If va was mapped, update TLBs, remove the mapping
522 * and remove old pvlist entry.
523 * 2) Add pvlist entry for new mapping
524 * 3) Enter new mapping.
525 *
526 * If the old physical page is not managed step 1) is skipped
527 * (except for updating the TLBs), and the mapping is
528 * overwritten at step 3). If the new physical page is not
529 * managed, step 2) is skipped.
530 */
531
532 if (old_pa != (pmap_paddr_t) 0) {
533
534 /*
535 * Don't do anything to pages outside valid memory here.
536 * Instead convince the code that enters a new mapping
537 * to overwrite the old one.
538 */
539
540 /* invalidate the PTE */
541 pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
542 /* propagate invalidate everywhere */
543 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
544 /* remember reference and change */
545 old_pte = *pte;
546 oattr = (char) (old_pte & (PHYS_MODIFIED | PHYS_REFERENCED));
547 /* completely invalidate the PTE */
548 pmap_store_pte(pte, 0);
549
550 if (IS_MANAGED_PAGE(pai)) {
551#if TESTING
552 if (pmap->stats.resident_count < 1)
553 panic("pmap_enter: resident_count");
554#endif
555 assert(pmap->stats.resident_count >= 1);
556 OSAddAtomic(-1,
557 &pmap->stats.resident_count);
558
559 if (iswired(*pte)) {
560#if TESTING
561 if (pmap->stats.wired_count < 1)
562 panic("pmap_enter: wired_count");
563#endif
564 assert(pmap->stats.wired_count >= 1);
565 OSAddAtomic(-1,
566 &pmap->stats.wired_count);
567 }
568 pmap_phys_attributes[pai] |= oattr;
569
570 /*
571 * Remove the mapping from the pvlist for
572 * this physical page.
573 * We'll end up with either a rooted pv or a
574 * hashed pv
575 */
576 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte);
577
578 } else {
579
580 /*
581 * old_pa is not managed.
582 * Do removal part of accounting.
583 */
584
585 if (iswired(*pte)) {
586 assert(pmap->stats.wired_count >= 1);
587 OSAddAtomic(-1,
588 &pmap->stats.wired_count);
589 }
590 }
591 }
592
593 /*
594 * if we had a previously managed paged locked, unlock it now
595 */
596 if (old_pa_locked) {
597 UNLOCK_PVH(pai);
598 old_pa_locked = FALSE;
599 }
600
601 pai = pa_index(pa); /* now working with new incoming phys page */
602 if (IS_MANAGED_PAGE(pai)) {
603
604 /*
605 * Step 2) Enter the mapping in the PV list for this
606 * physical page.
607 */
608 pv_h = pai_to_pvh(pai);
609
610 LOCK_PVH(pai);
611
612 if (pv_h->pmap == PMAP_NULL) {
613 /*
614 * No mappings yet, use rooted pv
615 */
616 pv_h->va = vaddr;
617 pv_h->pmap = pmap;
618 queue_init(&pv_h->qlink);
619 } else {
620 /*
621 * Add new pv_hashed_entry after header.
622 */
623 if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
624 pvh_e = pvh_new;
625 pvh_new = PV_HASHED_ENTRY_NULL;
626 } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
627 PV_HASHED_ALLOC(pvh_e);
628 if (PV_HASHED_ENTRY_NULL == pvh_e) {
629 /*
630 * the pv list is empty. if we are on
631 * the kernel pmap we'll use one of
632 * the special private kernel pv_e's,
633 * else, we need to unlock
634 * everything, zalloc a pv_e, and
635 * restart bringing in the pv_e with
636 * us.
637 */
638 if (kernel_pmap == pmap) {
639 PV_HASHED_KERN_ALLOC(pvh_e);
640 } else {
641 UNLOCK_PVH(pai);
642 PMAP_UNLOCK(pmap);
643 pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
644 goto Retry;
645 }
646 }
647 }
648
649 if (PV_HASHED_ENTRY_NULL == pvh_e)
650 panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings");
651
652 pvh_e->va = vaddr;
653 pvh_e->pmap = pmap;
654 pvh_e->ppn = pn;
655 pv_hash_add(pvh_e, pv_h);
656
657 /*
658 * Remember that we used the pvlist entry.
659 */
660 pvh_e = PV_HASHED_ENTRY_NULL;
661 }
662
663 /*
664 * only count the mapping
665 * for 'managed memory'
666 */
667 OSAddAtomic(+1, & pmap->stats.resident_count);
668 if (pmap->stats.resident_count > pmap->stats.resident_max) {
669 pmap->stats.resident_max = pmap->stats.resident_count;
670 }
060df5ea
A
671 } else if (last_managed_page == 0) {
672 /* Account for early mappings created before "managed pages"
673 * are determined. Consider consulting the available DRAM map.
674 */
675 OSAddAtomic(+1, &pmap->stats.resident_count);
b7266188
A
676 }
677 /*
678 * Step 3) Enter the mapping.
679 *
680 * Build a template to speed up entering -
681 * only the pfn changes.
682 */
683 template = pa_to_pte(pa) | INTEL_PTE_VALID;
684
685 if (flags & VM_MEM_NOT_CACHEABLE) {
686 if (!(flags & VM_MEM_GUARDED))
687 template |= INTEL_PTE_PTA;
688 template |= INTEL_PTE_NCACHE;
689 }
690 if (pmap != kernel_pmap)
691 template |= INTEL_PTE_USER;
692 if (prot & VM_PROT_WRITE)
693 template |= INTEL_PTE_WRITE;
694 if (set_NX)
695 template |= INTEL_PTE_NX;
696 if (wired) {
697 template |= INTEL_PTE_WIRED;
698 OSAddAtomic(+1, & pmap->stats.wired_count);
699 }
700 if (superpage)
701 template |= INTEL_PTE_PS;
702 pmap_store_pte(pte, template);
703
704 /*
705 * if this was a managed page we delayed unlocking the pv until here
706 * to prevent pmap_page_protect et al from finding it until the pte
707 * has been stored
708 */
709 if (IS_MANAGED_PAGE(pai)) {
710 UNLOCK_PVH(pai);
711 }
712Done:
713 if (need_tlbflush == TRUE)
714 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
715
716 if (pvh_e != PV_HASHED_ENTRY_NULL) {
717 PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
718 }
719 if (pvh_new != PV_HASHED_ENTRY_NULL) {
720 PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
721 }
722 PMAP_UNLOCK(pmap);
723
724 if (delpage_pm_obj) {
725 vm_page_t m;
726
727 vm_object_lock(delpage_pm_obj);
728 m = vm_page_lookup(delpage_pm_obj, delpage_pde_index);
729 if (m == VM_PAGE_NULL)
730 panic("pmap_enter: pte page not in object");
731 VM_PAGE_FREE(m);
732 OSAddAtomic(-1, &inuse_ptepages_count);
733 vm_object_unlock(delpage_pm_obj);
734 }
735
736 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, 0, 0, 0, 0, 0);
737}
738
739/*
740 * Remove a range of hardware page-table entries.
741 * The entries given are the first (inclusive)
742 * and last (exclusive) entries for the VM pages.
743 * The virtual address is the va for the first pte.
744 *
745 * The pmap must be locked.
746 * If the pmap is not the kernel pmap, the range must lie
747 * entirely within one pte-page. This is NOT checked.
748 * Assumes that the pte-page exists.
749 */
750
751void
752pmap_remove_range(
753 pmap_t pmap,
754 vm_map_offset_t start_vaddr,
755 pt_entry_t *spte,
756 pt_entry_t *epte)
757{
758 pt_entry_t *cpte;
759 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
760 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
761 pv_hashed_entry_t pvh_e;
762 int pvh_cnt = 0;
763 int num_removed, num_unwired, num_found, num_invalid;
764 int pai;
765 pmap_paddr_t pa;
766 vm_map_offset_t vaddr;
767
768 num_removed = 0;
769 num_unwired = 0;
770 num_found = 0;
771 num_invalid = 0;
772#if defined(__i386__)
773 if (pmap != kernel_pmap &&
774 pmap->pm_task_map == TASK_MAP_32BIT &&
775 start_vaddr >= HIGH_MEM_BASE) {
776 /*
777 * The range is in the "high_shared_pde" which is shared
778 * between the kernel and all 32-bit tasks. It holds
779 * the 32-bit commpage but also the trampolines, GDT, etc...
780 * so we can't let user tasks remove anything from it.
781 */
782 return;
783 }
784#endif
785 /* invalidate the PTEs first to "freeze" them */
786 for (cpte = spte, vaddr = start_vaddr;
787 cpte < epte;
788 cpte++, vaddr += PAGE_SIZE_64) {
789 pt_entry_t p = *cpte;
790
791 pa = pte_to_pa(p);
792 if (pa == 0)
793 continue;
794 num_found++;
795
796 if (iswired(p))
797 num_unwired++;
798
799 pai = pa_index(pa);
800
801 if (!IS_MANAGED_PAGE(pai)) {
802 /*
803 * Outside range of managed physical memory.
804 * Just remove the mappings.
805 */
806 pmap_store_pte(cpte, 0);
807 continue;
808 }
809
810 if ((p & INTEL_PTE_VALID) == 0)
811 num_invalid++;
812
813 /* invalidate the PTE */
814 pmap_update_pte(cpte, *cpte, (*cpte & ~INTEL_PTE_VALID));
815 }
816
817 if (num_found == 0) {
818 /* nothing was changed: we're done */
819 goto update_counts;
820 }
821
822 /* propagate the invalidates to other CPUs */
823
824 PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
825
826 for (cpte = spte, vaddr = start_vaddr;
827 cpte < epte;
828 cpte++, vaddr += PAGE_SIZE_64) {
829
830 pa = pte_to_pa(*cpte);
831 if (pa == 0)
832 continue;
833
834 pai = pa_index(pa);
835
836 LOCK_PVH(pai);
837
838 pa = pte_to_pa(*cpte);
839 if (pa == 0) {
840 UNLOCK_PVH(pai);
841 continue;
842 }
843 num_removed++;
844
845 /*
846 * Get the modify and reference bits, then
847 * nuke the entry in the page table
848 */
849 /* remember reference and change */
850 pmap_phys_attributes[pai] |=
851 (char) (*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
852
853 /*
854 * Remove the mapping from the pvlist for this physical page.
855 */
856 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte);
857
858 /* completely invalidate the PTE */
859 pmap_store_pte(cpte, 0);
860
861 UNLOCK_PVH(pai);
862
863 if (pvh_e != PV_HASHED_ENTRY_NULL) {
864 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
865 pvh_eh = pvh_e;
866
867 if (pvh_et == PV_HASHED_ENTRY_NULL) {
868 pvh_et = pvh_e;
869 }
870 pvh_cnt++;
871 }
872 } /* for loop */
873
874 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
875 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
876 }
877update_counts:
878 /*
879 * Update the counts
880 */
881#if TESTING
882 if (pmap->stats.resident_count < num_removed)
883 panic("pmap_remove_range: resident_count");
884#endif
885 assert(pmap->stats.resident_count >= num_removed);
886 OSAddAtomic(-num_removed, &pmap->stats.resident_count);
887
888#if TESTING
889 if (pmap->stats.wired_count < num_unwired)
890 panic("pmap_remove_range: wired_count");
891#endif
892 assert(pmap->stats.wired_count >= num_unwired);
893 OSAddAtomic(-num_unwired, &pmap->stats.wired_count);
894
895 return;
896}
897
898
899/*
900 * Remove the given range of addresses
901 * from the specified map.
902 *
903 * It is assumed that the start and end are properly
904 * rounded to the hardware page size.
905 */
906void
907pmap_remove(
908 pmap_t map,
909 addr64_t s64,
910 addr64_t e64)
911{
912 pt_entry_t *pde;
913 pt_entry_t *spte, *epte;
914 addr64_t l64;
915 uint64_t deadline;
916
917 pmap_intr_assert();
918
919 if (map == PMAP_NULL || s64 == e64)
920 return;
921
922 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
923 map,
924 (uint32_t) (s64 >> 32), s64,
925 (uint32_t) (e64 >> 32), e64);
926
927
928 PMAP_LOCK(map);
929
930#if 0
931 /*
932 * Check that address range in the kernel does not overlap the stacks.
933 * We initialize local static min/max variables once to avoid making
934 * 2 function calls for every remove. Note also that these functions
935 * both return 0 before kernel stacks have been initialized, and hence
936 * the panic is not triggered in this case.
937 */
938 if (map == kernel_pmap) {
939 static vm_offset_t kernel_stack_min = 0;
940 static vm_offset_t kernel_stack_max = 0;
941
942 if (kernel_stack_min == 0) {
943 kernel_stack_min = min_valid_stack_address();
944 kernel_stack_max = max_valid_stack_address();
945 }
946 if ((kernel_stack_min <= s64 && s64 < kernel_stack_max) ||
947 (kernel_stack_min < e64 && e64 <= kernel_stack_max))
948 panic("pmap_remove() attempted in kernel stack");
949 }
950#else
951
952 /*
953 * The values of kernel_stack_min and kernel_stack_max are no longer
954 * relevant now that we allocate kernel stacks in the kernel map,
955 * so the old code above no longer applies. If we wanted to check that
956 * we weren't removing a mapping of a page in a kernel stack we'd
957 * mark the PTE with an unused bit and check that here.
958 */
959
960#endif
961
962 deadline = rdtsc64() + max_preemption_latency_tsc;
963
964 while (s64 < e64) {
965 l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1);
966 if (l64 > e64)
967 l64 = e64;
968 pde = pmap_pde(map, s64);
969
970 if (pde && (*pde & INTEL_PTE_VALID)) {
971 if (*pde & INTEL_PTE_PS) {
972 /*
973 * If we're removing a superpage, pmap_remove_range()
974 * must work on level 2 instead of level 1; and we're
975 * only passing a single level 2 entry instead of a
976 * level 1 range.
977 */
978 spte = pde;
979 epte = spte+1; /* excluded */
980 } else {
981 spte = pmap_pte(map, (s64 & ~(pde_mapped_size - 1)));
982 spte = &spte[ptenum(s64)];
983 epte = &spte[intel_btop(l64 - s64)];
984 }
985 pmap_remove_range(map, s64, spte, epte);
986 }
987 s64 = l64;
988
989 if (s64 < e64 && rdtsc64() >= deadline) {
990 PMAP_UNLOCK(map)
991 PMAP_LOCK(map)
992 deadline = rdtsc64() + max_preemption_latency_tsc;
993 }
994 }
995
996 PMAP_UNLOCK(map);
997
998 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END,
999 map, 0, 0, 0, 0);
1000
1001}
1002
1003/*
1004 * Routine: pmap_page_protect
1005 *
1006 * Function:
1007 * Lower the permission for all mappings to a given
1008 * page.
1009 */
1010void
1011pmap_page_protect(
1012 ppnum_t pn,
1013 vm_prot_t prot)
1014{
1015 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
1016 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
1017 pv_hashed_entry_t nexth;
1018 int pvh_cnt = 0;
1019 pv_rooted_entry_t pv_h;
1020 pv_rooted_entry_t pv_e;
1021 pv_hashed_entry_t pvh_e;
1022 pt_entry_t *pte;
1023 int pai;
1024 pmap_t pmap;
1025 boolean_t remove;
1026
1027 pmap_intr_assert();
1028 assert(pn != vm_page_fictitious_addr);
1029 if (pn == vm_page_guard_addr)
1030 return;
1031
1032 pai = ppn_to_pai(pn);
1033
1034 if (!IS_MANAGED_PAGE(pai)) {
1035 /*
1036 * Not a managed page.
1037 */
1038 return;
1039 }
1040 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START,
1041 pn, prot, 0, 0, 0);
1042
1043 /*
1044 * Determine the new protection.
1045 */
1046 switch (prot) {
1047 case VM_PROT_READ:
1048 case VM_PROT_READ | VM_PROT_EXECUTE:
1049 remove = FALSE;
1050 break;
1051 case VM_PROT_ALL:
1052 return; /* nothing to do */
1053 default:
1054 remove = TRUE;
1055 break;
1056 }
1057
1058 pv_h = pai_to_pvh(pai);
1059
1060 LOCK_PVH(pai);
1061
1062
1063 /*
1064 * Walk down PV list, if any, changing or removing all mappings.
1065 */
1066 if (pv_h->pmap == PMAP_NULL)
1067 goto done;
1068
1069 pv_e = pv_h;
1070 pvh_e = (pv_hashed_entry_t) pv_e; /* cheat */
1071
1072 do {
1073 vm_map_offset_t vaddr;
1074
1075 pmap = pv_e->pmap;
1076 vaddr = pv_e->va;
1077 pte = pmap_pte(pmap, vaddr);
1078
1079#if DEBUG
1080 if (pa_index(pte_to_pa(*pte)) != pn)
1081 panic("pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
1082#endif
1083 if (0 == pte) {
1084 panic("pmap_page_protect() "
1085 "pmap=%p pn=0x%x vaddr=0x%llx\n",
1086 pmap, pn, vaddr);
1087 }
1088 nexth = (pv_hashed_entry_t) queue_next(&pvh_e->qlink);
1089
1090 /*
1091 * Remove the mapping if new protection is NONE
1092 * or if write-protecting a kernel mapping.
1093 */
1094 if (remove || pmap == kernel_pmap) {
1095 /*
1096 * Remove the mapping, collecting dirty bits.
1097 */
1098 pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_VALID);
1099 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1100 pmap_phys_attributes[pai] |=
1101 *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
1102 pmap_store_pte(pte, 0);
1103
1104#if TESTING
1105 if (pmap->stats.resident_count < 1)
1106 panic("pmap_page_protect: resident_count");
1107#endif
1108 assert(pmap->stats.resident_count >= 1);
1109 OSAddAtomic(-1, &pmap->stats.resident_count);
1110
1111 /*
1112 * Deal with the pv_rooted_entry.
1113 */
1114
1115 if (pv_e == pv_h) {
1116 /*
1117 * Fix up head later.
1118 */
1119 pv_h->pmap = PMAP_NULL;
0b4c1975
A
1120
1121 pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
b7266188
A
1122 } else {
1123 /*
1124 * Delete this entry.
1125 */
1126 pv_hash_remove(pvh_e);
1127 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1128 pvh_eh = pvh_e;
1129
1130 if (pvh_et == PV_HASHED_ENTRY_NULL)
1131 pvh_et = pvh_e;
1132 pvh_cnt++;
1133 }
1134 } else {
1135 /*
1136 * Write-protect.
1137 */
1138 pmap_update_pte(pte, *pte, *pte & ~INTEL_PTE_WRITE);
1139 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1140 }
1141 pvh_e = nexth;
1142 } while ((pv_e = (pv_rooted_entry_t) nexth) != pv_h);
1143
1144
1145 /*
1146 * If pv_head mapping was removed, fix it up.
1147 */
1148 if (pv_h->pmap == PMAP_NULL) {
1149 pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
1150
1151 if (pvh_e != (pv_hashed_entry_t) pv_h) {
1152 pv_hash_remove(pvh_e);
1153 pv_h->pmap = pvh_e->pmap;
1154 pv_h->va = pvh_e->va;
1155 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1156 pvh_eh = pvh_e;
1157
1158 if (pvh_et == PV_HASHED_ENTRY_NULL)
1159 pvh_et = pvh_e;
1160 pvh_cnt++;
1161 }
1162 }
1163 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
1164 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
1165 }
1166done:
1167 UNLOCK_PVH(pai);
1168
1169 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END,
1170 0, 0, 0, 0, 0);
1171}
1172
1173__private_extern__ void
1174pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1,2)) {
1175 if (pmap_pagetable_corruption_incidents > 0) {
1176 int i, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
1177 (*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
1178 for (i = 0; i < e; i++) {
1179 (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n", pmap_pagetable_corruption_records[i].incident, pmap_pagetable_corruption_records[i].reason, pmap_pagetable_corruption_records[i].action, pmap_pagetable_corruption_records[i].abstime);
1180 }
1181 }
1182}
1183
1184void
1185mapping_free_prime(void)
1186{
1187 int i;
1188 pv_hashed_entry_t pvh_e;
1189 pv_hashed_entry_t pvh_eh;
1190 pv_hashed_entry_t pvh_et;
1191 int pv_cnt;
1192
1193 pv_cnt = 0;
1194 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
1195 for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK); i++) {
1196 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
1197
1198 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
1199 pvh_eh = pvh_e;
1200
1201 if (pvh_et == PV_HASHED_ENTRY_NULL)
1202 pvh_et = pvh_e;
1203 pv_cnt++;
1204 }
1205 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
1206
1207 pv_cnt = 0;
1208 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
1209 for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
1210 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
1211
1212 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
1213 pvh_eh = pvh_e;
1214
1215 if (pvh_et == PV_HASHED_ENTRY_NULL)
1216 pvh_et = pvh_e;
1217 pv_cnt++;
1218 }
1219 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
1220
1221}
1222
1223static inline void
1224pmap_pagetable_corruption_log_setup(void) {
1225 if (pmap_pagetable_corruption_log_call == NULL) {
1226 nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
1227 thread_call_setup(&pmap_pagetable_corruption_log_call_data,
1228 (thread_call_func_t) pmap_pagetable_corruption_msg_log,
1229 (thread_call_param_t) &printf);
1230 pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
1231 }
1232}
1233
1234void
1235mapping_adjust(void)
1236{
1237 pv_hashed_entry_t pvh_e;
1238 pv_hashed_entry_t pvh_eh;
1239 pv_hashed_entry_t pvh_et;
1240 int pv_cnt;
1241 int i;
1242
1243 if (mapping_adjust_call == NULL) {
1244 thread_call_setup(&mapping_adjust_call_data,
1245 (thread_call_func_t) mapping_adjust,
1246 (thread_call_param_t) NULL);
1247 mapping_adjust_call = &mapping_adjust_call_data;
1248 }
1249
1250 pmap_pagetable_corruption_log_setup();
1251
1252 pv_cnt = 0;
1253 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
1254 if (pv_hashed_kern_free_count < PV_HASHED_KERN_LOW_WATER_MARK) {
1255 for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK; i++) {
1256 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
1257
1258 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
1259 pvh_eh = pvh_e;
1260
1261 if (pvh_et == PV_HASHED_ENTRY_NULL)
1262 pvh_et = pvh_e;
1263 pv_cnt++;
1264 }
1265 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
1266 }
1267
1268 pv_cnt = 0;
1269 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
1270 if (pv_hashed_free_count < PV_HASHED_LOW_WATER_MARK) {
1271 for (i = 0; i < PV_HASHED_ALLOC_CHUNK; i++) {
1272 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
1273
1274 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
1275 pvh_eh = pvh_e;
1276
1277 if (pvh_et == PV_HASHED_ENTRY_NULL)
1278 pvh_et = pvh_e;
1279 pv_cnt++;
1280 }
1281 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
1282 }
1283 mappingrecurse = 0;
1284}
1285
0b4c1975
A
1286
1287boolean_t
1288pmap_is_noencrypt(ppnum_t pn)
1289{
1290 int pai;
1291
1292 pai = ppn_to_pai(pn);
1293
1294 if (!IS_MANAGED_PAGE(pai))
1295 return (TRUE);
1296
1297 if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT)
1298 return (TRUE);
1299
1300 return (FALSE);
1301}
1302
1303
1304void
1305pmap_set_noencrypt(ppnum_t pn)
1306{
1307 int pai;
1308
1309 pai = ppn_to_pai(pn);
1310
1311 if (IS_MANAGED_PAGE(pai)) {
1312 LOCK_PVH(pai);
1313
1314 pmap_phys_attributes[pai] |= PHYS_NOENCRYPT;
1315
1316 UNLOCK_PVH(pai);
1317 }
1318}
1319
1320
1321void
1322pmap_clear_noencrypt(ppnum_t pn)
1323{
1324 int pai;
1325
1326 pai = ppn_to_pai(pn);
1327
1328 if (IS_MANAGED_PAGE(pai)) {
1329 LOCK_PVH(pai);
1330
1331 pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
1332
1333 UNLOCK_PVH(pai);
1334 }
1335}
1336
060df5ea
A
1337void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) {
1338 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
1339
1340 if (dofilter) {
1341 CPU_CR3_MARK_INACTIVE();
1342 } else {
1343 CPU_CR3_MARK_ACTIVE();
1344 __asm__ volatile("mfence");
1345 if (current_cpu_datap()->cpu_tlb_invalid)
1346 process_pmap_updates();
1347 }
1348}
1349