]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap_x86_common.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_x86_common.c
CommitLineData
b0d623f7 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
fe8ab488
A
28
29#include <mach_assert.h>
30
b0d623f7
A
31#include <vm/pmap.h>
32#include <vm/vm_map.h>
39037602 33#include <vm/vm_kern.h>
316670eb 34#include <kern/ledger.h>
b0d623f7 35#include <i386/pmap_internal.h>
b7266188 36
0a7de745
A
37void pmap_remove_range(
38 pmap_t pmap,
39 vm_map_offset_t va,
40 pt_entry_t *spte,
41 pt_entry_t *epte);
42
f427ee49 43static void pmap_remove_range_options(
0a7de745
A
44 pmap_t pmap,
45 vm_map_offset_t va,
46 pt_entry_t *spte,
47 pt_entry_t *epte,
48 int options);
49
50void pmap_reusable_range(
51 pmap_t pmap,
52 vm_map_offset_t va,
53 pt_entry_t *spte,
54 pt_entry_t *epte,
55 boolean_t reusable);
39236c6e 56
316670eb
A
57uint32_t pmap_update_clear_pte_count;
58
b0d623f7
A
59/*
60 * The Intel platform can nest at the PDE level, so NBPDE (i.e. 2MB) at a time,
61 * on a NBPDE boundary.
62 */
63
f427ee49
A
64uint64_t
65pmap_shared_region_size_min(__unused pmap_t pmap)
66{
67 return NBPDE;
68}
69
70uint64_t
71pmap_commpage_size_min(__unused pmap_t pmap)
72{
73 return NBPDE;
74}
75
76uint64_t
77pmap_nesting_size_max(__unused pmap_t pmap)
78{
79 return 0llu - (uint64_t)NBPDE;
80}
b0d623f7
A
81
82/*
83 * kern_return_t pmap_nest(grand, subord, va_start, size)
84 *
85 * grand = the pmap that we will nest subord into
86 * subord = the pmap that goes into the grand
87 * va_start = start of range in pmap to be inserted
b0d623f7
A
88 * size = Size of nest area (up to 16TB)
89 *
90 * Inserts a pmap into another. This is used to implement shared segments.
91 *
92 * Note that we depend upon higher level VM locks to insure that things don't change while
93 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
94 * or do 2 nests at once.
95 */
96
97/*
98 * This routine can nest subtrees either at the PDPT level (1GiB) or at the
99 * PDE level (2MiB). We currently disallow disparate offsets for the "subord"
100 * container and the "grand" parent. A minor optimization to consider for the
101 * future: make the "subord" truly a container rather than a full-fledged
102 * pagetable hierarchy which can be unnecessarily sparse (DRK).
103 */
104
0a7de745 105kern_return_t
f427ee49 106pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, uint64_t size)
0a7de745 107{
f427ee49 108 vm_map_offset_t vaddr;
0a7de745
A
109 pd_entry_t *pde, *npde;
110 unsigned int i;
111 uint64_t num_pde;
b0d623f7 112
3e170ce0
A
113 assert(!is_ept_pmap(grand));
114 assert(!is_ept_pmap(subord));
115
f427ee49
A
116 if ((size & (pmap_shared_region_size_min(grand) - 1)) ||
117 (va_start & (pmap_shared_region_size_min(grand) - 1)) ||
0a7de745 118 ((size >> 28) > 65536)) { /* Max size we can nest is 16TB */
b0d623f7 119 return KERN_INVALID_VALUE;
0a7de745 120 }
b0d623f7 121
0a7de745 122 if (size == 0) {
b0d623f7
A
123 panic("pmap_nest: size is invalid - %016llX\n", size);
124 }
125
b0d623f7 126 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
0a7de745
A
127 VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord),
128 VM_KERNEL_ADDRHIDE(va_start));
b0d623f7 129
f427ee49 130 vaddr = (vm_map_offset_t)va_start;
b0d623f7
A
131 num_pde = size >> PDESHIFT;
132
0a7de745 133 PMAP_LOCK_EXCLUSIVE(subord);
b0d623f7
A
134
135 subord->pm_shared = TRUE;
136
137 for (i = 0; i < num_pde;) {
f427ee49
A
138 if (((vaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG) {
139 npde = pmap64_pdpt(subord, vaddr);
b0d623f7
A
140
141 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
0a7de745 142 PMAP_UNLOCK_EXCLUSIVE(subord);
f427ee49 143 pmap_expand_pdpt(subord, vaddr, PMAP_EXPAND_OPTIONS_NONE);
0a7de745 144 PMAP_LOCK_EXCLUSIVE(subord);
f427ee49 145 npde = pmap64_pdpt(subord, vaddr);
b0d623f7
A
146 }
147 *npde |= INTEL_PDPTE_NESTED;
f427ee49 148 vaddr += NBPDPT;
b0d623f7 149 i += (uint32_t)NPDEPG;
0a7de745 150 } else {
f427ee49 151 npde = pmap_pde(subord, vaddr);
b0d623f7
A
152
153 while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) {
0a7de745 154 PMAP_UNLOCK_EXCLUSIVE(subord);
f427ee49 155 pmap_expand(subord, vaddr, PMAP_EXPAND_OPTIONS_NONE);
0a7de745 156 PMAP_LOCK_EXCLUSIVE(subord);
f427ee49 157 npde = pmap_pde(subord, vaddr);
b0d623f7 158 }
f427ee49 159 vaddr += NBPDE;
b0d623f7
A
160 i++;
161 }
162 }
163
0a7de745 164 PMAP_UNLOCK_EXCLUSIVE(subord);
b0d623f7
A
165
166 vaddr = (vm_map_offset_t)va_start;
167
0a7de745 168 PMAP_LOCK_EXCLUSIVE(grand);
b0d623f7 169
0a7de745 170 for (i = 0; i < num_pde;) {
b0d623f7
A
171 pd_entry_t tpde;
172
0a7de745 173 if (((vaddr & PDPTMASK) == 0) && ((num_pde - i) >= NPDEPG)) {
b0d623f7 174 npde = pmap64_pdpt(subord, vaddr);
0a7de745 175 if (npde == 0) {
b0d623f7 176 panic("pmap_nest: no PDPT, subord %p nstart 0x%llx", subord, vaddr);
0a7de745 177 }
b0d623f7
A
178 tpde = *npde;
179 pde = pmap64_pdpt(grand, vaddr);
180 if (0 == pde) {
0a7de745 181 PMAP_UNLOCK_EXCLUSIVE(grand);
316670eb 182 pmap_expand_pml4(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
0a7de745 183 PMAP_LOCK_EXCLUSIVE(grand);
b0d623f7
A
184 pde = pmap64_pdpt(grand, vaddr);
185 }
0a7de745 186 if (pde == 0) {
b0d623f7 187 panic("pmap_nest: no PDPT, grand %p vaddr 0x%llx", grand, vaddr);
0a7de745 188 }
b0d623f7
A
189 pmap_store_pte(pde, tpde);
190 vaddr += NBPDPT;
191 i += (uint32_t) NPDEPG;
0a7de745 192 } else {
39037602 193 npde = pmap_pde(subord, vaddr);
0a7de745 194 if (npde == 0) {
39037602 195 panic("pmap_nest: no npde, subord %p vaddr 0x%llx", subord, vaddr);
0a7de745 196 }
b0d623f7 197 tpde = *npde;
b0d623f7 198 pde = pmap_pde(grand, vaddr);
0a7de745
A
199 if (0 == pde) {
200 PMAP_UNLOCK_EXCLUSIVE(grand);
316670eb 201 pmap_expand_pdpt(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE);
0a7de745 202 PMAP_LOCK_EXCLUSIVE(grand);
b0d623f7
A
203 pde = pmap_pde(grand, vaddr);
204 }
205
0a7de745 206 if (pde == 0) {
b0d623f7 207 panic("pmap_nest: no pde, grand %p vaddr 0x%llx", grand, vaddr);
0a7de745 208 }
b0d623f7
A
209 vaddr += NBPDE;
210 pmap_store_pte(pde, tpde);
211 i++;
212 }
213 }
214
0a7de745 215 PMAP_UNLOCK_EXCLUSIVE(grand);
b0d623f7 216
5ba3f43e 217 PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, KERN_SUCCESS);
b0d623f7
A
218
219 return KERN_SUCCESS;
220}
221
222/*
223 * kern_return_t pmap_unnest(grand, vaddr)
224 *
225 * grand = the pmap that we will un-nest subord from
226 * vaddr = start of range in pmap to be unnested
227 *
228 * Removes a pmap from another. This is used to implement shared segments.
229 */
230
0a7de745
A
231kern_return_t
232pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size)
233{
b0d623f7
A
234 pd_entry_t *pde;
235 unsigned int i;
236 uint64_t num_pde;
237 addr64_t va_start, va_end;
238 uint64_t npdpt = PMAP_INVALID_PDPTNUM;
239
240 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START,
0a7de745 241 VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr));
b0d623f7 242
f427ee49
A
243 if ((size & (pmap_shared_region_size_min(grand) - 1)) ||
244 (vaddr & (pmap_shared_region_size_min(grand) - 1))) {
b0d623f7
A
245 panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n",
246 grand, vaddr, size);
247 }
248
3e170ce0
A
249 assert(!is_ept_pmap(grand));
250
b0d623f7 251 /* align everything to PDE boundaries */
0a7de745
A
252 va_start = vaddr & ~(NBPDE - 1);
253 va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE - 1);
b0d623f7
A
254 size = va_end - va_start;
255
0a7de745 256 PMAP_LOCK_EXCLUSIVE(grand);
b0d623f7
A
257
258 num_pde = size >> PDESHIFT;
259 vaddr = va_start;
260
0a7de745
A
261 for (i = 0; i < num_pde;) {
262 if (pdptnum(grand, vaddr) != npdpt) {
b0d623f7
A
263 npdpt = pdptnum(grand, vaddr);
264 pde = pmap64_pdpt(grand, vaddr);
265 if (pde && (*pde & INTEL_PDPTE_NESTED)) {
266 pmap_store_pte(pde, (pd_entry_t)0);
267 i += (uint32_t) NPDEPG;
268 vaddr += NBPDPT;
269 continue;
270 }
271 }
272 pde = pmap_pde(grand, (vm_map_offset_t)vaddr);
0a7de745 273 if (pde == 0) {
b0d623f7 274 panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr);
0a7de745 275 }
b0d623f7
A
276 pmap_store_pte(pde, (pd_entry_t)0);
277 i++;
278 vaddr += NBPDE;
279 }
280
281 PMAP_UPDATE_TLBS(grand, va_start, va_end);
282
0a7de745 283 PMAP_UNLOCK_EXCLUSIVE(grand);
5ba3f43e
A
284
285 PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_END, KERN_SUCCESS);
b0d623f7
A
286
287 return KERN_SUCCESS;
288}
289
3e170ce0
A
290kern_return_t
291pmap_unnest_options(
292 pmap_t grand,
293 addr64_t vaddr,
294 __unused uint64_t size,
0a7de745
A
295 __unused unsigned int options)
296{
3e170ce0
A
297 return pmap_unnest(grand, vaddr, size);
298}
299
b0d623f7
A
300/* Invoked by the Mach VM to determine the platform specific unnest region */
301
0a7de745
A
302boolean_t
303pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e)
304{
b0d623f7
A
305 pd_entry_t *pdpte;
306 boolean_t rval = FALSE;
307
0a7de745 308 PMAP_LOCK_EXCLUSIVE(p);
b0d623f7
A
309
310 pdpte = pmap64_pdpt(p, *s);
311 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
0a7de745 312 *s &= ~(NBPDPT - 1);
b0d623f7
A
313 rval = TRUE;
314 }
315
316 pdpte = pmap64_pdpt(p, *e);
317 if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) {
0a7de745 318 *e = ((*e + NBPDPT) & ~(NBPDPT - 1));
b0d623f7
A
319 rval = TRUE;
320 }
321
0a7de745 322 PMAP_UNLOCK_EXCLUSIVE(p);
b0d623f7
A
323
324 return rval;
325}
326
f427ee49
A
327pmap_paddr_t
328pmap_find_pa(pmap_t pmap, addr64_t va)
329{
330 pt_entry_t *ptp;
331 pd_entry_t *pdep;
332 pd_entry_t pde;
333 pt_entry_t pte;
334 boolean_t is_ept, locked = FALSE;
335 pmap_paddr_t pa = 0;
336
337 is_ept = is_ept_pmap(pmap);
338
339 if ((pmap != kernel_pmap) && not_in_kdp) {
340 PMAP_LOCK_EXCLUSIVE(pmap);
341 locked = TRUE;
342 } else {
343 mp_disable_preemption();
344 }
345
346 if (os_ref_get_count(&pmap->ref_count) == 0) {
347 goto pfp_exit;
348 }
349
350 pdep = pmap_pde(pmap, va);
351
352 if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) {
353 if (pde & PTE_PS) {
354 pa = pte_to_pa(pde) + (va & I386_LPGMASK);
355 } else {
356 ptp = pmap_pte(pmap, va);
357 if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) {
358 pa = pte_to_pa(pte) + (va & PAGE_MASK);
359 }
360 }
361 }
362pfp_exit:
363 if (locked) {
364 PMAP_UNLOCK_EXCLUSIVE(pmap);
365 } else {
366 mp_enable_preemption();
367 }
368
369 return pa;
370}
371
b0d623f7
A
372/*
373 * pmap_find_phys returns the (4K) physical page number containing a
374 * given virtual address in a given pmap.
375 * Note that pmap_pte may return a pde if this virtual address is
376 * mapped by a large page and this is taken into account in order
377 * to return the correct page number in this case.
378 */
379ppnum_t
380pmap_find_phys(pmap_t pmap, addr64_t va)
f427ee49
A
381{
382 ppnum_t ppn = 0;
383 pmap_paddr_t pa = 0;
384
385 pa = pmap_find_pa(pmap, va);
386 ppn = (ppnum_t) i386_btop(pa);
387
388 return ppn;
389}
390
391ppnum_t
392pmap_find_phys_nofault(pmap_t pmap, addr64_t va)
393{
394 if ((pmap == kernel_pmap) ||
395 ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map)))) {
396 return pmap_find_phys(pmap, va);
397 }
398 return 0;
399}
400
401/*
402 * pmap_get_prot returns the equivalent Vm page protections
403 * set on a given address, 'va'. This function is used in the
404 * ml_static_verify_page_protections() routine which is used
405 * by the kext loading code to validate that the TEXT segment
406 * of a kext is mapped executable.
407 */
408kern_return_t
409pmap_get_prot(pmap_t pmap, addr64_t va, vm_prot_t *protp)
b0d623f7 410{
0a7de745
A
411 pt_entry_t *ptp;
412 pd_entry_t *pdep;
0a7de745
A
413 pd_entry_t pde;
414 pt_entry_t pte;
415 boolean_t is_ept, locked = FALSE;
f427ee49
A
416 kern_return_t retval = KERN_FAILURE;
417 vm_prot_t prot = 0;
3e170ce0
A
418
419 is_ept = is_ept_pmap(pmap);
b0d623f7 420
0a7de745
A
421 if ((pmap != kernel_pmap) && not_in_kdp) {
422 PMAP_LOCK_EXCLUSIVE(pmap);
423 locked = TRUE;
424 } else {
425 mp_disable_preemption();
426 }
b0d623f7 427
cb323159 428 if (os_ref_get_count(&pmap->ref_count) == 0) {
b0d623f7 429 goto pfp_exit;
0a7de745 430 }
b0d623f7
A
431
432 pdep = pmap_pde(pmap, va);
433
3e170ce0
A
434 if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) {
435 if (pde & PTE_PS) {
f427ee49
A
436 prot = VM_PROT_READ;
437
438 if (pde & PTE_WRITE(is_ept)) {
439 prot |= VM_PROT_WRITE;
440 }
441 if (PTE_IS_EXECUTABLE(is_ept, pde)) {
442 prot |= VM_PROT_EXECUTE;
443 }
444 retval = KERN_SUCCESS;
0a7de745 445 } else {
b0d623f7 446 ptp = pmap_pte(pmap, va);
3e170ce0 447 if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) {
f427ee49
A
448 prot = VM_PROT_READ;
449
450 if (pte & PTE_WRITE(is_ept)) {
451 prot |= VM_PROT_WRITE;
452 }
453 if (PTE_IS_EXECUTABLE(is_ept, pte)) {
454 prot |= VM_PROT_EXECUTE;
455 }
456 retval = KERN_SUCCESS;
b0d623f7
A
457 }
458 }
459 }
f427ee49 460
0a7de745
A
461pfp_exit:
462 if (locked) {
463 PMAP_UNLOCK_EXCLUSIVE(pmap);
464 } else {
465 mp_enable_preemption();
466 }
b0d623f7 467
f427ee49
A
468 if (protp) {
469 *protp = prot;
470 }
471
472 return retval;
b0d623f7
A
473}
474
6d2010ae
A
475/*
476 * Update cache attributes for all extant managed mappings.
477 * Assumes PV for this page is locked, and that the page
3e170ce0
A
478 * is managed. We assume that this physical page may be mapped in
479 * both EPT and normal Intel PTEs, so we convert the attributes
480 * to the corresponding format for each pmap.
481 *
482 * We assert that the passed set of attributes is a subset of the
483 * PHYS_CACHEABILITY_MASK.
6d2010ae 484 */
6d2010ae 485void
0a7de745
A
486pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes)
487{
488 pv_rooted_entry_t pv_h, pv_e;
6d2010ae
A
489 pv_hashed_entry_t pvh_e, nexth;
490 vm_map_offset_t vaddr;
0a7de745
A
491 pmap_t pmap;
492 pt_entry_t *ptep;
493 boolean_t is_ept;
494 unsigned ept_attributes;
495
6d2010ae 496 assert(IS_MANAGED_PAGE(pn));
3e170ce0
A
497 assert(((~PHYS_CACHEABILITY_MASK) & attributes) == 0);
498
0a7de745
A
499 /* We don't support the PAT bit for EPT PTEs */
500 if (attributes & INTEL_PTE_NCACHE) {
3e170ce0 501 ept_attributes = INTEL_EPT_NCACHE;
0a7de745 502 } else {
3e170ce0 503 ept_attributes = INTEL_EPT_WB;
0a7de745 504 }
6d2010ae
A
505
506 pv_h = pai_to_pvh(pn);
507 /* TODO: translate the PHYS_* bits to PTE bits, while they're
508 * currently identical, they may not remain so
509 * Potential optimization (here and in page_protect),
510 * parallel shootdowns, check for redundant
511 * attribute modifications.
512 */
0a7de745 513
6d2010ae
A
514 /*
515 * Alter attributes on all mappings
516 */
517 if (pv_h->pmap != PMAP_NULL) {
518 pv_e = pv_h;
519 pvh_e = (pv_hashed_entry_t)pv_e;
520
521 do {
522 pmap = pv_e->pmap;
39037602 523 vaddr = PVE_VA(pv_e);
6d2010ae 524 ptep = pmap_pte(pmap, vaddr);
0a7de745
A
525
526 if (0 == ptep) {
6d2010ae 527 panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
0a7de745 528 }
6d2010ae 529
3e170ce0
A
530 is_ept = is_ept_pmap(pmap);
531
6d2010ae 532 nexth = (pv_hashed_entry_t)queue_next(&pvh_e->qlink);
3e170ce0
A
533 if (!is_ept) {
534 pmap_update_pte(ptep, PHYS_CACHEABILITY_MASK, attributes);
535 } else {
536 pmap_update_pte(ptep, INTEL_EPT_CACHE_MASK, ept_attributes);
537 }
6d2010ae
A
538 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
539 pvh_e = nexth;
540 } while ((pv_e = (pv_rooted_entry_t)nexth) != pv_h);
541 }
542}
543
0a7de745
A
544void
545x86_filter_TLB_coherency_interrupts(boolean_t dofilter)
546{
6d2010ae
A
547 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
548
549 if (dofilter) {
550 CPU_CR3_MARK_INACTIVE();
551 } else {
552 CPU_CR3_MARK_ACTIVE();
39236c6e 553 mfence();
0a7de745 554 pmap_update_interrupt();
6d2010ae
A
555 }
556}
557
558
b7266188
A
559/*
560 * Insert the given physical page (p) at
561 * the specified virtual address (v) in the
562 * target physical map with the protection requested.
563 *
564 * If specified, the page will be wired down, meaning
565 * that the related pte cannot be reclaimed.
566 *
567 * NB: This is the only routine which MAY NOT lazy-evaluate
568 * or lose information. That is, this routine must actually
569 * insert this page into the given map NOW.
570 */
316670eb 571
5ba3f43e 572kern_return_t
b7266188 573pmap_enter(
0a7de745
A
574 pmap_t pmap,
575 vm_map_offset_t vaddr,
b7266188 576 ppnum_t pn,
0a7de745
A
577 vm_prot_t prot,
578 vm_prot_t fault_type,
579 unsigned int flags,
580 boolean_t wired)
316670eb 581{
5ba3f43e 582 return pmap_enter_options(pmap, vaddr, pn, prot, fault_type, flags, wired, PMAP_EXPAND_OPTIONS_NONE, NULL);
316670eb
A
583}
584
0a7de745
A
585#define PTE_LOCK(EPT) INTEL_PTE_SWLOCK
586
587static inline void PTE_LOCK_LOCK(pt_entry_t *);
588static inline void PTE_LOCK_UNLOCK(pt_entry_t *);
589
590void
591PTE_LOCK_LOCK(pt_entry_t *lpte)
592{
593 pt_entry_t pte;
594plretry:
595 while ((pte = __c11_atomic_load((_Atomic pt_entry_t *)lpte, memory_order_relaxed)) & PTE_LOCK(0)) {
596 __builtin_ia32_pause();
597 }
598 if (__c11_atomic_compare_exchange_strong((_Atomic pt_entry_t *)lpte, &pte, pte | PTE_LOCK(0), memory_order_acquire_smp, TRUE)) {
599 return;
600 }
601
602 goto plretry;
603}
604
605void
606PTE_LOCK_UNLOCK(pt_entry_t *lpte)
607{
608 __c11_atomic_fetch_and((_Atomic pt_entry_t *)lpte, ~PTE_LOCK(0), memory_order_release_smp);
609}
39236c6e 610
f427ee49
A
611kern_return_t
612pmap_enter_options_addr(
613 pmap_t pmap,
614 vm_map_address_t v,
615 pmap_paddr_t pa,
616 vm_prot_t prot,
617 vm_prot_t fault_type,
618 unsigned int flags,
619 boolean_t wired,
620 unsigned int options,
621 __unused void *arg)
622{
623 return pmap_enter_options(pmap, v, intel_btop(pa), prot, fault_type, flags, wired, options, arg);
624}
625
316670eb
A
626kern_return_t
627pmap_enter_options(
0a7de745
A
628 pmap_t pmap,
629 vm_map_offset_t vaddr,
316670eb 630 ppnum_t pn,
0a7de745
A
631 vm_prot_t prot,
632 __unused vm_prot_t fault_type,
633 unsigned int flags,
634 boolean_t wired,
635 unsigned int options,
636 void *arg)
b7266188 637{
0a7de745
A
638 pt_entry_t *pte = NULL;
639 pv_rooted_entry_t pv_h;
640 ppnum_t pai;
641 pv_hashed_entry_t pvh_e;
642 pv_hashed_entry_t pvh_new;
643 pt_entry_t template;
644 pmap_paddr_t old_pa;
645 pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn);
646 boolean_t need_tlbflush = FALSE;
647 boolean_t set_NX;
648 char oattr;
649 boolean_t old_pa_locked;
b7266188 650 /* 2MiB mappings are confined to x86_64 by VM */
0a7de745
A
651 boolean_t superpage = flags & VM_MEM_SUPERPAGE;
652 vm_object_t delpage_pm_obj = NULL;
653 uint64_t delpage_pde_index = 0;
654 pt_entry_t old_pte;
655 kern_return_t kr = KERN_FAILURE;
656 boolean_t is_ept;
657 boolean_t is_altacct;
658 boolean_t ptelocked = FALSE;
5ba3f43e 659
b7266188 660 pmap_intr_assert();
b7266188 661
0a7de745
A
662 if (__improbable(pmap == PMAP_NULL)) {
663 return KERN_INVALID_ARGUMENT;
664 }
665 if (__improbable(pn == vm_page_guard_addr)) {
316670eb 666 return KERN_INVALID_ARGUMENT;
0a7de745 667 }
316670eb 668
3e170ce0
A
669 is_ept = is_ept_pmap(pmap);
670
316670eb
A
671 /* N.B. We can be supplied a zero page frame in the NOENTER case, it's an
672 * unused value for that scenario.
673 */
674 assert(pn != vm_page_fictitious_addr);
675
b7266188
A
676
677 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
0a7de745
A
678 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(vaddr), pn,
679 prot);
b7266188 680
0a7de745 681 if ((prot & VM_PROT_EXECUTE)) {
b7266188 682 set_NX = FALSE;
0a7de745 683 } else {
b7266188 684 set_NX = TRUE;
0a7de745 685 }
b7266188 686
0a7de745
A
687#if DEVELOPMENT || DEBUG
688 if (__improbable(set_NX && (!nx_enabled || !pmap->nx_enabled))) {
316670eb
A
689 set_NX = FALSE;
690 }
691
0a7de745
A
692 if (__improbable(set_NX && (pmap == kernel_pmap) &&
693 ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) ||
694 (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) {
695 set_NX = FALSE;
696 }
697#endif
b7266188
A
698
699 pvh_new = PV_HASHED_ENTRY_NULL;
700Retry:
701 pvh_e = PV_HASHED_ENTRY_NULL;
702
0a7de745 703 PMAP_LOCK_SHARED(pmap);
b7266188
A
704
705 /*
706 * Expand pmap to include this pte. Assume that
707 * pmap is always expanded to include enough hardware
708 * pages to map one VM page.
709 */
0a7de745
A
710 if (__improbable(superpage)) {
711 while ((pte = pmap_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
b7266188 712 /* need room for another pde entry */
0a7de745 713 PMAP_UNLOCK_SHARED(pmap);
5ba3f43e 714 kr = pmap_expand_pdpt(pmap, vaddr, options);
0a7de745
A
715 if (kr != KERN_SUCCESS) {
716 goto done1;
717 }
718 PMAP_LOCK_SHARED(pmap);
b7266188
A
719 }
720 } else {
721 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
722 /*
723 * Must unlock to expand the pmap
724 * going to grow pde level page(s)
725 */
0a7de745 726 PMAP_UNLOCK_SHARED(pmap);
5ba3f43e 727 kr = pmap_expand(pmap, vaddr, options);
0a7de745
A
728 if (kr != KERN_SUCCESS) {
729 goto done1;
730 }
731 PMAP_LOCK_SHARED(pmap);
b7266188
A
732 }
733 }
0a7de745
A
734
735 if (__improbable(options & PMAP_EXPAND_OPTIONS_NOENTER)) {
736 PMAP_UNLOCK_SHARED(pmap);
5ba3f43e 737 kr = KERN_SUCCESS;
0a7de745 738 goto done1;
316670eb 739 }
b7266188 740
0a7de745 741 if (__improbable(superpage && *pte && !(*pte & PTE_PS))) {
b7266188
A
742 /*
743 * There is still an empty page table mapped that
744 * was used for a previous base page mapping.
745 * Remember the PDE and the PDE index, so that we
746 * can free the page at the end of this function.
747 */
22ba694c 748 delpage_pde_index = pdeidx(pmap, vaddr);
b7266188 749 delpage_pm_obj = pmap->pm_obj;
0a7de745 750 pmap_store_pte(pte, 0);
b7266188
A
751 }
752
0a7de745
A
753 PTE_LOCK_LOCK(pte);
754 ptelocked = TRUE;
755
b7266188
A
756 old_pa = pte_to_pa(*pte);
757 pai = pa_index(old_pa);
758 old_pa_locked = FALSE;
759
39236c6e 760 if (old_pa == 0 &&
cb323159 761 PTE_IS_COMPRESSED(*pte, pte, pmap, vaddr)) {
39037602
A
762 /*
763 * "pmap" should be locked at this point, so this should
764 * not race with another pmap_enter() or pmap_remove_range().
765 */
766 assert(pmap != kernel_pmap);
767
39236c6e
A
768 /* one less "compressed" */
769 OSAddAtomic64(-1, &pmap->stats.compressed);
39037602 770 pmap_ledger_debit(pmap, task_ledgers.internal_compressed,
0a7de745 771 PAGE_SIZE);
39037602
A
772 if (*pte & PTE_COMPRESSED_ALT) {
773 pmap_ledger_debit(
774 pmap,
775 task_ledgers.alternate_accounting_compressed,
776 PAGE_SIZE);
777 } else {
778 /* was part of the footprint */
779 pmap_ledger_debit(pmap, task_ledgers.phys_footprint,
0a7de745 780 PAGE_SIZE);
39037602 781 }
39236c6e
A
782 /* marker will be cleared below */
783 }
784
b7266188
A
785 /*
786 * if we have a previous managed page, lock the pv entry now. after
787 * we lock it, check to see if someone beat us to the lock and if so
788 * drop the lock
789 */
790 if ((0 != old_pa) && IS_MANAGED_PAGE(pai)) {
791 LOCK_PVH(pai);
792 old_pa_locked = TRUE;
793 old_pa = pte_to_pa(*pte);
794 if (0 == old_pa) {
0a7de745 795 UNLOCK_PVH(pai); /* another path beat us to it */
b7266188
A
796 old_pa_locked = FALSE;
797 }
798 }
799
800 /*
801 * Special case if the incoming physical page is already mapped
802 * at this address.
803 */
804 if (old_pa == pa) {
6d2010ae 805 pt_entry_t old_attributes =
0a7de745 806 *pte & ~(PTE_REF(is_ept) | PTE_MOD(is_ept) | PTE_LOCK(is_ept));
b7266188
A
807
808 /*
0a7de745
A
809 * May be changing its wired attribute or protection
810 */
b7266188 811
3e170ce0
A
812 template = pa_to_pte(pa);
813
0a7de745 814 if (__probable(!is_ept)) {
3e170ce0
A
815 template |= INTEL_PTE_VALID;
816 } else {
0a7de745 817 template |= INTEL_EPT_IPAT;
3e170ce0
A
818 }
819
820 template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
b7266188 821
3e170ce0
A
822 /*
823 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
824 */
825 if (!is_ept && (VM_MEM_NOT_CACHEABLE ==
826 (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)))) {
0a7de745
A
827 if (!(flags & VM_MEM_GUARDED)) {
828 template |= INTEL_PTE_PAT;
829 }
b7266188
A
830 template |= INTEL_PTE_NCACHE;
831 }
0a7de745 832 if (pmap != kernel_pmap && !is_ept) {
b7266188 833 template |= INTEL_PTE_USER;
0a7de745 834 }
3e170ce0 835
0a7de745 836 if (prot & VM_PROT_READ) {
3e170ce0 837 template |= PTE_READ(is_ept);
0a7de745 838 }
3e170ce0 839
39236c6e 840 if (prot & VM_PROT_WRITE) {
3e170ce0
A
841 template |= PTE_WRITE(is_ept);
842 if (is_ept && !pmap_ept_support_ad) {
843 template |= PTE_MOD(is_ept);
844 if (old_pa_locked) {
845 assert(IS_MANAGED_PAGE(pai));
846 pmap_phys_attributes[pai] |= PHYS_MODIFIED;
847 }
848 }
849 }
850 if (prot & VM_PROT_EXECUTE) {
851 assert(set_NX == 0);
852 template = pte_set_ex(template, is_ept);
39236c6e 853 }
b7266188 854
0a7de745 855 if (set_NX) {
3e170ce0 856 template = pte_remove_ex(template, is_ept);
0a7de745 857 }
b7266188
A
858
859 if (wired) {
3e170ce0 860 template |= PTE_WIRED;
0a7de745 861 if (!iswired(old_attributes)) {
316670eb
A
862 OSAddAtomic(+1, &pmap->stats.wired_count);
863 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
864 }
b7266188 865 } else {
6d2010ae 866 if (iswired(old_attributes)) {
b7266188 867 assert(pmap->stats.wired_count >= 1);
316670eb
A
868 OSAddAtomic(-1, &pmap->stats.wired_count);
869 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
b7266188
A
870 }
871 }
3e170ce0 872
0a7de745
A
873 if (superpage) { /* this path can not be used */
874 template |= PTE_PS; /* to change the page size! */
875 }
876 if (old_attributes == template) {
15129b1c 877 goto dont_update_pte;
0a7de745 878 }
15129b1c 879
6d2010ae
A
880 /* Determine delta, PV locked */
881 need_tlbflush =
3e170ce0 882 ((old_attributes ^ template) != PTE_WIRED);
5ba3f43e 883
0a7de745 884 /* Optimisation: avoid TLB flush when adding writability */
3e170ce0 885 if (need_tlbflush == TRUE && !(old_attributes & PTE_WRITE(is_ept))) {
0a7de745 886 if ((old_attributes ^ template) == PTE_WRITE(is_ept)) {
39236c6e 887 need_tlbflush = FALSE;
0a7de745 888 }
39236c6e 889 }
b7266188 890
3e170ce0 891 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
0a7de745 892 if (__improbable(is_ept && !pmap_ept_support_ad)) {
3e170ce0
A
893 template |= PTE_REF(is_ept);
894 if (old_pa_locked) {
895 assert(IS_MANAGED_PAGE(pai));
896 pmap_phys_attributes[pai] |= PHYS_REFERENCED;
897 }
898 }
899
b7266188 900 /* store modified PTE and preserve RC bits */
0a7de745
A
901 pt_entry_t npte, opte;
902
903 assert((*pte & PTE_LOCK(is_ept)) != 0);
904
316670eb
A
905 do {
906 opte = *pte;
0a7de745
A
907 npte = template | (opte & (PTE_REF(is_ept) |
908 PTE_MOD(is_ept))) | PTE_LOCK(is_ept);
316670eb 909 } while (!pmap_cmpx_pte(pte, opte, npte));
0a7de745 910
15129b1c 911dont_update_pte:
b7266188
A
912 if (old_pa_locked) {
913 UNLOCK_PVH(pai);
914 old_pa_locked = FALSE;
915 }
0a7de745 916 goto done2;
b7266188
A
917 }
918
919 /*
920 * Outline of code from here:
921 * 1) If va was mapped, update TLBs, remove the mapping
922 * and remove old pvlist entry.
923 * 2) Add pvlist entry for new mapping
924 * 3) Enter new mapping.
925 *
926 * If the old physical page is not managed step 1) is skipped
927 * (except for updating the TLBs), and the mapping is
928 * overwritten at step 3). If the new physical page is not
929 * managed, step 2) is skipped.
930 */
0a7de745 931 /* TODO: add opportunistic refmod collect */
b7266188 932 if (old_pa != (pmap_paddr_t) 0) {
0a7de745 933 boolean_t was_altacct = FALSE;
b7266188
A
934
935 /*
0a7de745
A
936 * Don't do anything to pages outside valid memory here.
937 * Instead convince the code that enters a new mapping
938 * to overwrite the old one.
939 */
b7266188
A
940
941 /* invalidate the PTE */
3e170ce0 942 pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
b7266188
A
943 /* propagate invalidate everywhere */
944 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
945 /* remember reference and change */
0a7de745 946 old_pte = *pte;
3e170ce0 947 oattr = (char) (old_pte & (PTE_MOD(is_ept) | PTE_REF(is_ept)));
b7266188 948 /* completely invalidate the PTE */
0a7de745 949 pmap_store_pte(pte, PTE_LOCK(is_ept));
b7266188 950
d190cdc3
A
951 if (IS_MANAGED_PAGE(pai)) {
952 /*
953 * Remove the mapping from the pvlist for
954 * this physical page.
955 * We'll end up with either a rooted pv or a
956 * hashed pv
957 */
958 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte, &was_altacct);
959 }
960
b7266188 961 if (IS_MANAGED_PAGE(pai)) {
6d2010ae 962 pmap_assert(old_pa_locked == TRUE);
316670eb 963 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
b7266188 964 assert(pmap->stats.resident_count >= 1);
316670eb 965 OSAddAtomic(-1, &pmap->stats.resident_count);
39236c6e 966 if (pmap != kernel_pmap) {
d190cdc3 967 /* update pmap stats */
39236c6e 968 if (IS_REUSABLE_PAGE(pai)) {
39037602
A
969 PMAP_STATS_ASSERTF(
970 (pmap->stats.reusable > 0,
0a7de745
A
971 "reusable %d",
972 pmap->stats.reusable));
39236c6e
A
973 OSAddAtomic(-1, &pmap->stats.reusable);
974 } else if (IS_INTERNAL_PAGE(pai)) {
39037602
A
975 PMAP_STATS_ASSERTF(
976 (pmap->stats.internal > 0,
0a7de745
A
977 "internal %d",
978 pmap->stats.internal));
39236c6e
A
979 OSAddAtomic(-1, &pmap->stats.internal);
980 } else {
39037602
A
981 PMAP_STATS_ASSERTF(
982 (pmap->stats.external > 0,
0a7de745
A
983 "external %d",
984 pmap->stats.external));
39236c6e
A
985 OSAddAtomic(-1, &pmap->stats.external);
986 }
d190cdc3
A
987
988 /* update ledgers */
989 if (was_altacct) {
990 assert(IS_INTERNAL_PAGE(pai));
991 pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
992 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
993 } else if (IS_REUSABLE_PAGE(pai)) {
994 assert(!was_altacct);
995 assert(IS_INTERNAL_PAGE(pai));
996 /* was already not in phys_footprint */
997 } else if (IS_INTERNAL_PAGE(pai)) {
998 assert(!was_altacct);
999 assert(!IS_REUSABLE_PAGE(pai));
1000 pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
1001 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
1002 } else {
1003 /* not an internal page */
1004 }
39236c6e 1005 }
b7266188 1006 if (iswired(*pte)) {
b7266188 1007 assert(pmap->stats.wired_count >= 1);
316670eb
A
1008 OSAddAtomic(-1, &pmap->stats.wired_count);
1009 pmap_ledger_debit(pmap, task_ledgers.wired_mem,
1010 PAGE_SIZE);
b7266188 1011 }
3e170ce0
A
1012
1013 if (!is_ept) {
1014 pmap_phys_attributes[pai] |= oattr;
1015 } else {
1016 pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr);
1017 }
b7266188 1018 } else {
b7266188
A
1019 /*
1020 * old_pa is not managed.
1021 * Do removal part of accounting.
1022 */
1023
39236c6e
A
1024 if (pmap != kernel_pmap) {
1025#if 00
1026 assert(pmap->stats.device > 0);
1027 OSAddAtomic(-1, &pmap->stats.device);
1028#endif
1029 }
b7266188
A
1030 if (iswired(*pte)) {
1031 assert(pmap->stats.wired_count >= 1);
316670eb
A
1032 OSAddAtomic(-1, &pmap->stats.wired_count);
1033 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
b7266188
A
1034 }
1035 }
1036 }
1037
1038 /*
1039 * if we had a previously managed paged locked, unlock it now
1040 */
1041 if (old_pa_locked) {
1042 UNLOCK_PVH(pai);
1043 old_pa_locked = FALSE;
1044 }
1045
0a7de745 1046 pai = pa_index(pa); /* now working with new incoming phys page */
b7266188 1047 if (IS_MANAGED_PAGE(pai)) {
b7266188 1048 /*
0a7de745
A
1049 * Step 2) Enter the mapping in the PV list for this
1050 * physical page.
1051 */
b7266188
A
1052 pv_h = pai_to_pvh(pai);
1053
1054 LOCK_PVH(pai);
1055
1056 if (pv_h->pmap == PMAP_NULL) {
1057 /*
1058 * No mappings yet, use rooted pv
1059 */
39037602 1060 pv_h->va_and_flags = vaddr;
b7266188
A
1061 pv_h->pmap = pmap;
1062 queue_init(&pv_h->qlink);
39236c6e
A
1063
1064 if (options & PMAP_OPTIONS_INTERNAL) {
1065 pmap_phys_attributes[pai] |= PHYS_INTERNAL;
1066 } else {
1067 pmap_phys_attributes[pai] &= ~PHYS_INTERNAL;
1068 }
1069 if (options & PMAP_OPTIONS_REUSABLE) {
1070 pmap_phys_attributes[pai] |= PHYS_REUSABLE;
1071 } else {
1072 pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
1073 }
39037602
A
1074 if ((options & PMAP_OPTIONS_ALT_ACCT) &&
1075 IS_INTERNAL_PAGE(pai)) {
39037602 1076 pv_h->va_and_flags |= PVE_IS_ALTACCT;
d190cdc3 1077 is_altacct = TRUE;
39037602
A
1078 } else {
1079 pv_h->va_and_flags &= ~PVE_IS_ALTACCT;
d190cdc3 1080 is_altacct = FALSE;
39037602 1081 }
b7266188
A
1082 } else {
1083 /*
1084 * Add new pv_hashed_entry after header.
1085 */
1086 if ((PV_HASHED_ENTRY_NULL == pvh_e) && pvh_new) {
1087 pvh_e = pvh_new;
1088 pvh_new = PV_HASHED_ENTRY_NULL;
1089 } else if (PV_HASHED_ENTRY_NULL == pvh_e) {
6d2010ae 1090 PV_HASHED_ALLOC(&pvh_e);
b7266188
A
1091 if (PV_HASHED_ENTRY_NULL == pvh_e) {
1092 /*
1093 * the pv list is empty. if we are on
1094 * the kernel pmap we'll use one of
1095 * the special private kernel pv_e's,
1096 * else, we need to unlock
1097 * everything, zalloc a pv_e, and
1098 * restart bringing in the pv_e with
1099 * us.
1100 */
1101 if (kernel_pmap == pmap) {
6d2010ae 1102 PV_HASHED_KERN_ALLOC(&pvh_e);
b7266188
A
1103 } else {
1104 UNLOCK_PVH(pai);
0a7de745
A
1105 PTE_LOCK_UNLOCK(pte);
1106 PMAP_UNLOCK_SHARED(pmap);
6d2010ae 1107 pmap_pv_throttle(pmap);
b7266188
A
1108 pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
1109 goto Retry;
1110 }
1111 }
1112 }
5ba3f43e 1113
0a7de745 1114 if (PV_HASHED_ENTRY_NULL == pvh_e) {
b7266188 1115 panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings");
0a7de745 1116 }
b7266188 1117
39037602 1118 pvh_e->va_and_flags = vaddr;
b7266188
A
1119 pvh_e->pmap = pmap;
1120 pvh_e->ppn = pn;
d190cdc3
A
1121 if ((options & PMAP_OPTIONS_ALT_ACCT) &&
1122 IS_INTERNAL_PAGE(pai)) {
1123 pvh_e->va_and_flags |= PVE_IS_ALTACCT;
1124 is_altacct = TRUE;
1125 } else {
1126 pvh_e->va_and_flags &= ~PVE_IS_ALTACCT;
1127 is_altacct = FALSE;
1128 }
b7266188
A
1129 pv_hash_add(pvh_e, pv_h);
1130
1131 /*
1132 * Remember that we used the pvlist entry.
1133 */
1134 pvh_e = PV_HASHED_ENTRY_NULL;
1135 }
1136
1137 /*
0a7de745
A
1138 * only count the mapping
1139 * for 'managed memory'
1140 */
316670eb 1141 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
0a7de745 1142 OSAddAtomic(+1, &pmap->stats.resident_count);
b7266188
A
1143 if (pmap->stats.resident_count > pmap->stats.resident_max) {
1144 pmap->stats.resident_max = pmap->stats.resident_count;
1145 }
39236c6e 1146 if (pmap != kernel_pmap) {
d190cdc3 1147 /* update pmap stats */
39236c6e
A
1148 if (IS_REUSABLE_PAGE(pai)) {
1149 OSAddAtomic(+1, &pmap->stats.reusable);
1150 PMAP_STATS_PEAK(pmap->stats.reusable);
1151 } else if (IS_INTERNAL_PAGE(pai)) {
1152 OSAddAtomic(+1, &pmap->stats.internal);
1153 PMAP_STATS_PEAK(pmap->stats.internal);
1154 } else {
1155 OSAddAtomic(+1, &pmap->stats.external);
1156 PMAP_STATS_PEAK(pmap->stats.external);
1157 }
d190cdc3
A
1158
1159 /* update ledgers */
1160 if (is_altacct) {
1161 /* internal but also alternate accounting */
1162 assert(IS_INTERNAL_PAGE(pai));
1163 pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
1164 pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
1165 /* alternate accounting, so not in footprint */
1166 } else if (IS_REUSABLE_PAGE(pai)) {
1167 assert(!is_altacct);
1168 assert(IS_INTERNAL_PAGE(pai));
1169 /* internal but reusable: not in footprint */
1170 } else if (IS_INTERNAL_PAGE(pai)) {
1171 assert(!is_altacct);
1172 assert(!IS_REUSABLE_PAGE(pai));
1173 /* internal: add to footprint */
1174 pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
1175 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
1176 } else {
1177 /* not internal: not in footprint */
1178 }
39236c6e 1179 }
060df5ea
A
1180 } else if (last_managed_page == 0) {
1181 /* Account for early mappings created before "managed pages"
1182 * are determined. Consider consulting the available DRAM map.
1183 */
316670eb 1184 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
0a7de745 1185 OSAddAtomic(+1, &pmap->stats.resident_count);
39236c6e
A
1186 if (pmap != kernel_pmap) {
1187#if 00
1188 OSAddAtomic(+1, &pmap->stats.device);
1189 PMAP_STATS_PEAK(pmap->stats.device);
1190#endif
1191 }
b7266188
A
1192 }
1193 /*
1194 * Step 3) Enter the mapping.
1195 *
1196 * Build a template to speed up entering -
1197 * only the pfn changes.
1198 */
3e170ce0
A
1199 template = pa_to_pte(pa);
1200
1201 if (!is_ept) {
1202 template |= INTEL_PTE_VALID;
1203 } else {
0a7de745 1204 template |= INTEL_EPT_IPAT;
3e170ce0
A
1205 }
1206
1207
6d2010ae
A
1208 /*
1209 * DRK: It may be worth asserting on cache attribute flags that diverge
1210 * from the existing physical page attributes.
1211 */
b7266188 1212
3e170ce0
A
1213 template |= pmap_get_cache_attributes(pa_index(pa), is_ept);
1214
1215 /*
1216 * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs
1217 */
1218 if (!is_ept && (flags & VM_MEM_NOT_CACHEABLE)) {
0a7de745
A
1219 if (!(flags & VM_MEM_GUARDED)) {
1220 template |= INTEL_PTE_PAT;
1221 }
b7266188
A
1222 template |= INTEL_PTE_NCACHE;
1223 }
0a7de745 1224 if (pmap != kernel_pmap && !is_ept) {
b7266188 1225 template |= INTEL_PTE_USER;
0a7de745
A
1226 }
1227 if (prot & VM_PROT_READ) {
3e170ce0 1228 template |= PTE_READ(is_ept);
0a7de745 1229 }
3e170ce0
A
1230 if (prot & VM_PROT_WRITE) {
1231 template |= PTE_WRITE(is_ept);
1232 if (is_ept && !pmap_ept_support_ad) {
1233 template |= PTE_MOD(is_ept);
0a7de745 1234 if (IS_MANAGED_PAGE(pai)) {
3e170ce0 1235 pmap_phys_attributes[pai] |= PHYS_MODIFIED;
0a7de745 1236 }
3e170ce0
A
1237 }
1238 }
1239 if (prot & VM_PROT_EXECUTE) {
1240 assert(set_NX == 0);
1241 template = pte_set_ex(template, is_ept);
1242 }
1243
0a7de745 1244 if (set_NX) {
3e170ce0 1245 template = pte_remove_ex(template, is_ept);
0a7de745 1246 }
b7266188
A
1247 if (wired) {
1248 template |= INTEL_PTE_WIRED;
0a7de745 1249 OSAddAtomic(+1, &pmap->stats.wired_count);
316670eb 1250 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
b7266188 1251 }
0a7de745 1252 if (__improbable(superpage)) {
b7266188 1253 template |= INTEL_PTE_PS;
0a7de745 1254 }
3e170ce0
A
1255
1256 /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */
0a7de745 1257 if (__improbable(is_ept && !pmap_ept_support_ad)) {
3e170ce0 1258 template |= PTE_REF(is_ept);
0a7de745 1259 if (IS_MANAGED_PAGE(pai)) {
3e170ce0 1260 pmap_phys_attributes[pai] |= PHYS_REFERENCED;
0a7de745 1261 }
3e170ce0 1262 }
0a7de745 1263 template |= PTE_LOCK(is_ept);
b7266188
A
1264 pmap_store_pte(pte, template);
1265
1266 /*
1267 * if this was a managed page we delayed unlocking the pv until here
1268 * to prevent pmap_page_protect et al from finding it until the pte
1269 * has been stored
1270 */
1271 if (IS_MANAGED_PAGE(pai)) {
1272 UNLOCK_PVH(pai);
1273 }
0a7de745 1274done2:
39236c6e 1275 if (need_tlbflush == TRUE) {
0a7de745 1276 if (options & PMAP_OPTIONS_NOFLUSH) {
39236c6e 1277 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
0a7de745 1278 } else {
39236c6e 1279 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
0a7de745
A
1280 }
1281 }
1282 if (ptelocked) {
1283 PTE_LOCK_UNLOCK(pte);
39236c6e 1284 }
0a7de745
A
1285 PMAP_UNLOCK_SHARED(pmap);
1286
b7266188
A
1287 if (pvh_e != PV_HASHED_ENTRY_NULL) {
1288 PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1);
1289 }
1290 if (pvh_new != PV_HASHED_ENTRY_NULL) {
1291 PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1);
1292 }
b7266188
A
1293
1294 if (delpage_pm_obj) {
1295 vm_page_t m;
1296
1297 vm_object_lock(delpage_pm_obj);
22ba694c 1298 m = vm_page_lookup(delpage_pm_obj, (delpage_pde_index * PAGE_SIZE));
0a7de745
A
1299 if (m == VM_PAGE_NULL) {
1300 panic("pmap_enter: pte page not in object");
1301 }
b7266188 1302 VM_PAGE_FREE(m);
3e170ce0 1303 vm_object_unlock(delpage_pm_obj);
0a7de745 1304 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb 1305 PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
b7266188
A
1306 }
1307
5ba3f43e 1308 kr = KERN_SUCCESS;
0a7de745 1309done1:
5ba3f43e
A
1310 PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, kr);
1311 return kr;
b7266188
A
1312}
1313
1314/*
1315 * Remove a range of hardware page-table entries.
1316 * The entries given are the first (inclusive)
1317 * and last (exclusive) entries for the VM pages.
1318 * The virtual address is the va for the first pte.
1319 *
1320 * The pmap must be locked.
1321 * If the pmap is not the kernel pmap, the range must lie
1322 * entirely within one pte-page. This is NOT checked.
1323 * Assumes that the pte-page exists.
1324 */
1325
1326void
1327pmap_remove_range(
0a7de745
A
1328 pmap_t pmap,
1329 vm_map_offset_t start_vaddr,
1330 pt_entry_t *spte,
1331 pt_entry_t *epte)
39236c6e 1332{
3e170ce0 1333 pmap_remove_range_options(pmap, start_vaddr, spte, epte,
0a7de745 1334 PMAP_OPTIONS_REMOVE);
39236c6e
A
1335}
1336
f427ee49 1337static void
39236c6e 1338pmap_remove_range_options(
0a7de745
A
1339 pmap_t pmap,
1340 vm_map_offset_t start_vaddr,
1341 pt_entry_t *spte,
1342 pt_entry_t *epte,
1343 int options)
b7266188 1344{
0a7de745 1345 pt_entry_t *cpte;
b7266188
A
1346 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
1347 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
1348 pv_hashed_entry_t pvh_e;
0a7de745
A
1349 int pvh_cnt = 0;
1350 int num_removed, num_unwired, num_found, num_invalid;
1351 int stats_external, stats_internal, stats_reusable;
1352 uint64_t stats_compressed;
1353 int ledgers_internal, ledgers_alt_internal;
1354 uint64_t ledgers_compressed, ledgers_alt_compressed;
1355 ppnum_t pai;
1356 pmap_paddr_t pa;
1357 vm_map_offset_t vaddr;
1358 boolean_t is_ept = is_ept_pmap(pmap);
1359 boolean_t was_altacct;
b7266188
A
1360
1361 num_removed = 0;
1362 num_unwired = 0;
1363 num_found = 0;
1364 num_invalid = 0;
d190cdc3
A
1365 stats_external = 0;
1366 stats_internal = 0;
1367 stats_reusable = 0;
1368 stats_compressed = 0;
1369 ledgers_internal = 0;
1370 ledgers_compressed = 0;
1371 ledgers_alt_internal = 0;
1372 ledgers_alt_compressed = 0;
f427ee49 1373
b7266188
A
1374 /* invalidate the PTEs first to "freeze" them */
1375 for (cpte = spte, vaddr = start_vaddr;
0a7de745
A
1376 cpte < epte;
1377 cpte++, vaddr += PAGE_SIZE_64) {
b7266188
A
1378 pt_entry_t p = *cpte;
1379
1380 pa = pte_to_pa(p);
39236c6e 1381 if (pa == 0) {
d190cdc3 1382 if ((options & PMAP_OPTIONS_REMOVE) &&
cb323159 1383 (PTE_IS_COMPRESSED(p, cpte, pmap, vaddr))) {
d190cdc3 1384 assert(pmap != kernel_pmap);
39037602 1385 /* one less "compressed"... */
d190cdc3
A
1386 stats_compressed++;
1387 ledgers_compressed++;
39037602
A
1388 if (p & PTE_COMPRESSED_ALT) {
1389 /* ... but it used to be "ALTACCT" */
d190cdc3 1390 ledgers_alt_compressed++;
39037602
A
1391 }
1392 /* clear marker(s) */
39236c6e 1393 /* XXX probably does not need to be atomic! */
39037602 1394 pmap_update_pte(cpte, INTEL_PTE_COMPRESSED_MASK, 0);
39236c6e 1395 }
b7266188 1396 continue;
39236c6e 1397 }
b7266188
A
1398 num_found++;
1399
0a7de745 1400 if (iswired(p)) {
b7266188 1401 num_unwired++;
0a7de745
A
1402 }
1403
b7266188
A
1404 pai = pa_index(pa);
1405
1406 if (!IS_MANAGED_PAGE(pai)) {
1407 /*
1408 * Outside range of managed physical memory.
1409 * Just remove the mappings.
1410 */
1411 pmap_store_pte(cpte, 0);
1412 continue;
1413 }
1414
0a7de745 1415 if ((p & PTE_VALID_MASK(is_ept)) == 0) {
b7266188 1416 num_invalid++;
0a7de745 1417 }
b7266188 1418
316670eb 1419 /* invalidate the PTE */
3e170ce0 1420 pmap_update_pte(cpte, PTE_VALID_MASK(is_ept), 0);
b7266188
A
1421 }
1422
1423 if (num_found == 0) {
1424 /* nothing was changed: we're done */
0a7de745 1425 goto update_counts;
b7266188
A
1426 }
1427
1428 /* propagate the invalidates to other CPUs */
1429
1430 PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
1431
1432 for (cpte = spte, vaddr = start_vaddr;
0a7de745
A
1433 cpte < epte;
1434 cpte++, vaddr += PAGE_SIZE_64) {
b7266188 1435 pa = pte_to_pa(*cpte);
39037602 1436 if (pa == 0) {
0a7de745 1437check_pte_for_compressed_marker:
39037602
A
1438 /*
1439 * This PTE could have been replaced with a
1440 * "compressed" marker after our first "freeze"
1441 * loop above, so check again.
1442 */
d190cdc3 1443 if ((options & PMAP_OPTIONS_REMOVE) &&
cb323159 1444 (PTE_IS_COMPRESSED(*cpte, cpte, pmap, vaddr))) {
d190cdc3 1445 assert(pmap != kernel_pmap);
39037602 1446 /* one less "compressed"... */
d190cdc3
A
1447 stats_compressed++;
1448 ledgers_compressed++;
39037602
A
1449 if (*cpte & PTE_COMPRESSED_ALT) {
1450 /* ... but it used to be "ALTACCT" */
d190cdc3 1451 ledgers_alt_compressed++;
39037602
A
1452 }
1453 pmap_store_pte(cpte, 0);
1454 }
b7266188 1455 continue;
39037602 1456 }
b7266188
A
1457
1458 pai = pa_index(pa);
1459
1460 LOCK_PVH(pai);
1461
1462 pa = pte_to_pa(*cpte);
1463 if (pa == 0) {
1464 UNLOCK_PVH(pai);
39037602 1465 goto check_pte_for_compressed_marker;
b7266188 1466 }
d190cdc3
A
1467
1468 /*
1469 * Remove the mapping from the pvlist for this physical page.
1470 */
1471 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte, &was_altacct);
1472
b7266188 1473 num_removed++;
d190cdc3 1474 /* update pmap stats */
39236c6e 1475 if (IS_REUSABLE_PAGE(pai)) {
d190cdc3 1476 stats_reusable++;
39236c6e 1477 } else if (IS_INTERNAL_PAGE(pai)) {
d190cdc3
A
1478 stats_internal++;
1479 } else {
1480 stats_external++;
1481 }
1482 /* update ledgers */
1483 if (was_altacct) {
1484 /* internal and alternate accounting */
1485 assert(IS_INTERNAL_PAGE(pai));
1486 ledgers_internal++;
1487 ledgers_alt_internal++;
1488 } else if (IS_REUSABLE_PAGE(pai)) {
1489 /* internal but reusable */
1490 assert(!was_altacct);
1491 assert(IS_INTERNAL_PAGE(pai));
1492 } else if (IS_INTERNAL_PAGE(pai)) {
1493 /* internal */
1494 assert(!was_altacct);
1495 assert(!IS_REUSABLE_PAGE(pai));
1496 ledgers_internal++;
39236c6e 1497 } else {
d190cdc3 1498 /* not internal */
39236c6e 1499 }
b7266188
A
1500
1501 /*
0a7de745
A
1502 * Get the modify and reference bits, then
1503 * nuke the entry in the page table
1504 */
b7266188 1505 /* remember reference and change */
7e41aa88
A
1506 if (!is_ept) {
1507 pmap_phys_attributes[pai] |=
0a7de745 1508 *cpte & (PHYS_MODIFIED | PHYS_REFERENCED);
7e41aa88
A
1509 } else {
1510 pmap_phys_attributes[pai] |=
0a7de745 1511 ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
7e41aa88 1512 }
b7266188 1513
b7266188
A
1514 /* completely invalidate the PTE */
1515 pmap_store_pte(cpte, 0);
1516
1517 UNLOCK_PVH(pai);
1518
1519 if (pvh_e != PV_HASHED_ENTRY_NULL) {
1520 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
1521 pvh_eh = pvh_e;
1522
1523 if (pvh_et == PV_HASHED_ENTRY_NULL) {
1524 pvh_et = pvh_e;
1525 }
1526 pvh_cnt++;
1527 }
f427ee49
A
1528 /* We can encounter at most 'num_found' PTEs for this level
1529 * Fewer may be encountered if some were replaced by
1530 * compressed markers. No new valid PTEs can be created
1531 * since the pmap lock is held exclusively.
1532 */
1533 if (num_removed == num_found) {
1534 break;
1535 }
b7266188
A
1536 } /* for loop */
1537
1538 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
1539 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
1540 }
1541update_counts:
1542 /*
1543 * Update the counts
1544 */
1545#if TESTING
0a7de745
A
1546 if (pmap->stats.resident_count < num_removed) {
1547 panic("pmap_remove_range: resident_count");
1548 }
b7266188 1549#endif
f427ee49
A
1550 if (num_removed) {
1551 pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed));
1552 PMAP_STATS_ASSERTF((pmap->stats.resident_count >= num_removed,
1553 "pmap=%p num_removed=%d stats.resident_count=%d",
1554 pmap, num_removed, pmap->stats.resident_count));
1555 OSAddAtomic(-num_removed, &pmap->stats.resident_count);
1556 }
b7266188 1557
39236c6e 1558 if (pmap != kernel_pmap) {
d190cdc3 1559 PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external,
0a7de745
A
1560 "pmap=%p stats_external=%d stats.external=%d",
1561 pmap, stats_external, pmap->stats.external));
d190cdc3 1562 PMAP_STATS_ASSERTF((pmap->stats.internal >= stats_internal,
0a7de745
A
1563 "pmap=%p stats_internal=%d stats.internal=%d",
1564 pmap, stats_internal, pmap->stats.internal));
d190cdc3 1565 PMAP_STATS_ASSERTF((pmap->stats.reusable >= stats_reusable,
0a7de745
A
1566 "pmap=%p stats_reusable=%d stats.reusable=%d",
1567 pmap, stats_reusable, pmap->stats.reusable));
d190cdc3 1568 PMAP_STATS_ASSERTF((pmap->stats.compressed >= stats_compressed,
0a7de745
A
1569 "pmap=%p stats_compressed=%lld, stats.compressed=%lld",
1570 pmap, stats_compressed, pmap->stats.compressed));
d190cdc3
A
1571
1572 /* update pmap stats */
1573 if (stats_external) {
1574 OSAddAtomic(-stats_external, &pmap->stats.external);
39037602 1575 }
d190cdc3
A
1576 if (stats_internal) {
1577 OSAddAtomic(-stats_internal, &pmap->stats.internal);
1578 }
0a7de745 1579 if (stats_reusable) {
d190cdc3 1580 OSAddAtomic(-stats_reusable, &pmap->stats.reusable);
0a7de745
A
1581 }
1582 if (stats_compressed) {
d190cdc3 1583 OSAddAtomic64(-stats_compressed, &pmap->stats.compressed);
0a7de745 1584 }
d190cdc3 1585 /* update ledgers */
f427ee49 1586
d190cdc3 1587 if (ledgers_internal) {
39037602 1588 pmap_ledger_debit(pmap,
0a7de745
A
1589 task_ledgers.internal,
1590 machine_ptob(ledgers_internal));
39037602 1591 }
d190cdc3 1592 if (ledgers_compressed) {
39037602 1593 pmap_ledger_debit(pmap,
0a7de745
A
1594 task_ledgers.internal_compressed,
1595 machine_ptob(ledgers_compressed));
39037602 1596 }
d190cdc3 1597 if (ledgers_alt_internal) {
39037602 1598 pmap_ledger_debit(pmap,
0a7de745
A
1599 task_ledgers.alternate_accounting,
1600 machine_ptob(ledgers_alt_internal));
39037602 1601 }
d190cdc3 1602 if (ledgers_alt_compressed) {
39037602 1603 pmap_ledger_debit(pmap,
0a7de745
A
1604 task_ledgers.alternate_accounting_compressed,
1605 machine_ptob(ledgers_alt_compressed));
39037602 1606 }
f427ee49
A
1607
1608 uint64_t net_debit = (ledgers_internal - ledgers_alt_internal) + (ledgers_compressed - ledgers_alt_compressed);
1609 if (net_debit) {
1610 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(net_debit));
1611 }
39236c6e
A
1612 }
1613
b7266188 1614#if TESTING
0a7de745
A
1615 if (pmap->stats.wired_count < num_unwired) {
1616 panic("pmap_remove_range: wired_count");
1617 }
b7266188 1618#endif
39037602 1619 PMAP_STATS_ASSERTF((pmap->stats.wired_count >= num_unwired,
0a7de745
A
1620 "pmap=%p num_unwired=%d stats.wired_count=%d",
1621 pmap, num_unwired, pmap->stats.wired_count));
b7266188 1622
f427ee49
A
1623 if (num_unwired != 0) {
1624 OSAddAtomic(-num_unwired, &pmap->stats.wired_count);
1625 pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired));
1626 }
b7266188
A
1627 return;
1628}
1629
1630
1631/*
1632 * Remove the given range of addresses
1633 * from the specified map.
1634 *
1635 * It is assumed that the start and end are properly
1636 * rounded to the hardware page size.
1637 */
1638void
1639pmap_remove(
0a7de745
A
1640 pmap_t map,
1641 addr64_t s64,
1642 addr64_t e64)
39236c6e 1643{
3e170ce0 1644 pmap_remove_options(map, s64, e64, PMAP_OPTIONS_REMOVE);
39236c6e 1645}
f427ee49 1646#define PLCHECK_THRESHOLD (2)
39236c6e
A
1647
1648void
1649pmap_remove_options(
0a7de745
A
1650 pmap_t map,
1651 addr64_t s64,
1652 addr64_t e64,
1653 int options)
b7266188
A
1654{
1655 pt_entry_t *pde;
1656 pt_entry_t *spte, *epte;
1657 addr64_t l64;
0a7de745
A
1658 uint64_t deadline = 0;
1659 boolean_t is_ept;
b7266188
A
1660
1661 pmap_intr_assert();
1662
0a7de745 1663 if (map == PMAP_NULL || s64 == e64) {
b7266188 1664 return;
0a7de745 1665 }
b7266188 1666
3e170ce0
A
1667 is_ept = is_ept_pmap(map);
1668
b7266188 1669 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
0a7de745
A
1670 VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(s64),
1671 VM_KERNEL_ADDRHIDE(e64));
b7266188 1672
0a7de745
A
1673 PMAP_LOCK_EXCLUSIVE(map);
1674 uint32_t traverse_count = 0;
b7266188 1675
0a7de745
A
1676 while (s64 < e64) {
1677 pml4_entry_t *pml4e = pmap64_pml4(map, s64);
1678 if ((pml4e == NULL) ||
1679 ((*pml4e & PTE_VALID_MASK(is_ept)) == 0)) {
1680 s64 = (s64 + NBPML4) & ~(PML4MASK);
1681 continue;
1682 }
1683 pdpt_entry_t *pdpte = pmap64_pdpt(map, s64);
1684 if ((pdpte == NULL) ||
1685 ((*pdpte & PTE_VALID_MASK(is_ept)) == 0)) {
1686 s64 = (s64 + NBPDPT) & ~(PDPTMASK);
1687 continue;
b7266188 1688 }
b7266188 1689
0a7de745 1690 l64 = (s64 + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1);
b7266188 1691
0a7de745 1692 if (l64 > e64) {
b7266188 1693 l64 = e64;
0a7de745
A
1694 }
1695
b7266188
A
1696 pde = pmap_pde(map, s64);
1697
3e170ce0
A
1698 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
1699 if (*pde & PTE_PS) {
b7266188
A
1700 /*
1701 * If we're removing a superpage, pmap_remove_range()
1702 * must work on level 2 instead of level 1; and we're
1703 * only passing a single level 2 entry instead of a
1704 * level 1 range.
1705 */
1706 spte = pde;
0a7de745 1707 epte = spte + 1; /* excluded */
b7266188 1708 } else {
0a7de745 1709 spte = pmap_pte(map, (s64 & ~(PDE_MAPPED_SIZE - 1)));
b7266188
A
1710 spte = &spte[ptenum(s64)];
1711 epte = &spte[intel_btop(l64 - s64)];
1712 }
39236c6e 1713 pmap_remove_range_options(map, s64, spte, epte,
0a7de745 1714 options);
b7266188
A
1715 }
1716 s64 = l64;
1717
0a7de745
A
1718 if ((s64 < e64) && (traverse_count++ > PLCHECK_THRESHOLD)) {
1719 if (deadline == 0) {
f427ee49 1720 deadline = rdtsc64_nofence() + max_preemption_latency_tsc;
0a7de745 1721 } else {
f427ee49 1722 if (rdtsc64_nofence() > deadline) {
0a7de745
A
1723 PMAP_UNLOCK_EXCLUSIVE(map);
1724 __builtin_ia32_pause();
1725 PMAP_LOCK_EXCLUSIVE(map);
f427ee49 1726 deadline = rdtsc64_nofence() + max_preemption_latency_tsc;
0a7de745
A
1727 }
1728 }
b7266188
A
1729 }
1730 }
1731
0a7de745 1732 PMAP_UNLOCK_EXCLUSIVE(map);
b7266188 1733
5ba3f43e 1734 PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END);
b7266188
A
1735}
1736
39236c6e
A
1737void
1738pmap_page_protect(
0a7de745
A
1739 ppnum_t pn,
1740 vm_prot_t prot)
39236c6e
A
1741{
1742 pmap_page_protect_options(pn, prot, 0, NULL);
1743}
1744
b7266188 1745/*
39236c6e 1746 * Routine: pmap_page_protect_options
b7266188
A
1747 *
1748 * Function:
1749 * Lower the permission for all mappings to a given
1750 * page.
1751 */
1752void
39236c6e 1753pmap_page_protect_options(
0a7de745
A
1754 ppnum_t pn,
1755 vm_prot_t prot,
1756 unsigned int options,
1757 void *arg)
b7266188 1758{
0a7de745
A
1759 pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL;
1760 pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL;
1761 pv_hashed_entry_t nexth;
1762 int pvh_cnt = 0;
1763 pv_rooted_entry_t pv_h;
1764 pv_rooted_entry_t pv_e;
1765 pv_hashed_entry_t pvh_e;
1766 pt_entry_t *pte;
1767 int pai;
1768 pmap_t pmap;
1769 boolean_t remove;
1770 pt_entry_t new_pte_value;
1771 boolean_t is_ept;
b7266188
A
1772
1773 pmap_intr_assert();
1774 assert(pn != vm_page_fictitious_addr);
0a7de745 1775 if (pn == vm_page_guard_addr) {
b7266188 1776 return;
0a7de745 1777 }
b7266188
A
1778
1779 pai = ppn_to_pai(pn);
1780
1781 if (!IS_MANAGED_PAGE(pai)) {
1782 /*
0a7de745
A
1783 * Not a managed page.
1784 */
b7266188
A
1785 return;
1786 }
5ba3f43e
A
1787
1788 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START, pn, prot);
b7266188
A
1789
1790 /*
1791 * Determine the new protection.
1792 */
1793 switch (prot) {
1794 case VM_PROT_READ:
1795 case VM_PROT_READ | VM_PROT_EXECUTE:
1796 remove = FALSE;
1797 break;
1798 case VM_PROT_ALL:
0a7de745 1799 return; /* nothing to do */
b7266188
A
1800 default:
1801 remove = TRUE;
1802 break;
1803 }
1804
1805 pv_h = pai_to_pvh(pai);
1806
1807 LOCK_PVH(pai);
1808
1809
1810 /*
1811 * Walk down PV list, if any, changing or removing all mappings.
1812 */
0a7de745 1813 if (pv_h->pmap == PMAP_NULL) {
b7266188 1814 goto done;
0a7de745 1815 }
b7266188
A
1816
1817 pv_e = pv_h;
0a7de745 1818 pvh_e = (pv_hashed_entry_t) pv_e; /* cheat */
b7266188
A
1819
1820 do {
1821 vm_map_offset_t vaddr;
1822
3e170ce0
A
1823 if ((options & PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
1824 (pmap_phys_attributes[pai] & PHYS_MODIFIED)) {
1825 /* page was modified, so it will be compressed */
1826 options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1827 options |= PMAP_OPTIONS_COMPRESSOR;
1828 }
1829
b7266188 1830 pmap = pv_e->pmap;
3e170ce0 1831 is_ept = is_ept_pmap(pmap);
39037602 1832 vaddr = PVE_VA(pv_e);
b7266188
A
1833 pte = pmap_pte(pmap, vaddr);
1834
6d2010ae
A
1835 pmap_assert2((pa_index(pte_to_pa(*pte)) == pn),
1836 "pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
1837
b7266188
A
1838 if (0 == pte) {
1839 panic("pmap_page_protect() "
0a7de745
A
1840 "pmap=%p pn=0x%x vaddr=0x%llx\n",
1841 pmap, pn, vaddr);
b7266188
A
1842 }
1843 nexth = (pv_hashed_entry_t) queue_next(&pvh_e->qlink);
1844
1845 /*
1846 * Remove the mapping if new protection is NONE
b7266188 1847 */
6d2010ae 1848 if (remove) {
6d2010ae
A
1849 /* Remove per-pmap wired count */
1850 if (iswired(*pte)) {
1851 OSAddAtomic(-1, &pmap->stats.wired_count);
316670eb 1852 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
6d2010ae
A
1853 }
1854
39236c6e
A
1855 if (pmap != kernel_pmap &&
1856 (options & PMAP_OPTIONS_COMPRESSOR) &&
1857 IS_INTERNAL_PAGE(pai)) {
cb323159 1858 assert(!PTE_IS_COMPRESSED(*pte, pte, pmap, vaddr));
39037602 1859 /* mark this PTE as having been "compressed" */
3e170ce0 1860 new_pte_value = PTE_COMPRESSED;
d190cdc3 1861 if (IS_ALTACCT_PAGE(pai, pv_e)) {
39037602
A
1862 new_pte_value |= PTE_COMPRESSED_ALT;
1863 }
39236c6e
A
1864 } else {
1865 new_pte_value = 0;
1866 }
1867
1868 if (options & PMAP_OPTIONS_NOREFMOD) {
1869 pmap_store_pte(pte, new_pte_value);
b7266188 1870
0a7de745 1871 if (options & PMAP_OPTIONS_NOFLUSH) {
39236c6e 1872 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
0a7de745 1873 } else {
39236c6e 1874 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
0a7de745 1875 }
39236c6e
A
1876 } else {
1877 /*
1878 * Remove the mapping, collecting dirty bits.
1879 */
3e170ce0 1880 pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0);
39236c6e 1881
0a7de745 1882 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
39037602
A
1883 if (!is_ept) {
1884 pmap_phys_attributes[pai] |=
0a7de745 1885 *pte & (PHYS_MODIFIED | PHYS_REFERENCED);
39037602
A
1886 } else {
1887 pmap_phys_attributes[pai] |=
0a7de745 1888 ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
39037602 1889 }
3e170ce0 1890 if ((options &
0a7de745 1891 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) &&
39037602
A
1892 IS_INTERNAL_PAGE(pai) &&
1893 (pmap_phys_attributes[pai] &
0a7de745 1894 PHYS_MODIFIED)) {
3e170ce0
A
1895 /*
1896 * Page is actually "modified" and
1897 * will be compressed. Start
1898 * accounting for it as "compressed".
1899 */
39037602 1900 assert(!(options & PMAP_OPTIONS_COMPRESSOR));
3e170ce0
A
1901 options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
1902 options |= PMAP_OPTIONS_COMPRESSOR;
39037602 1903 assert(new_pte_value == 0);
d190cdc3
A
1904 if (pmap != kernel_pmap) {
1905 new_pte_value = PTE_COMPRESSED;
1906 if (IS_ALTACCT_PAGE(pai, pv_e)) {
1907 new_pte_value |= PTE_COMPRESSED_ALT;
1908 }
39037602 1909 }
3e170ce0 1910 }
39236c6e
A
1911 pmap_store_pte(pte, new_pte_value);
1912 }
3e170ce0 1913
b7266188 1914#if TESTING
0a7de745 1915 if (pmap->stats.resident_count < 1) {
b7266188 1916 panic("pmap_page_protect: resident_count");
0a7de745 1917 }
b7266188 1918#endif
316670eb 1919 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
b7266188 1920 assert(pmap->stats.resident_count >= 1);
0a7de745 1921 OSAddAtomic(-1, &pmap->stats.resident_count);
39037602
A
1922
1923 /*
1924 * We only ever compress internal pages.
1925 */
39236c6e 1926 if (options & PMAP_OPTIONS_COMPRESSOR) {
39037602 1927 assert(IS_INTERNAL_PAGE(pai));
39236c6e 1928 }
39236c6e 1929 if (pmap != kernel_pmap) {
d190cdc3 1930 /* update pmap stats */
39236c6e
A
1931 if (IS_REUSABLE_PAGE(pai)) {
1932 assert(pmap->stats.reusable > 0);
1933 OSAddAtomic(-1, &pmap->stats.reusable);
1934 } else if (IS_INTERNAL_PAGE(pai)) {
1935 assert(pmap->stats.internal > 0);
1936 OSAddAtomic(-1, &pmap->stats.internal);
1937 } else {
1938 assert(pmap->stats.external > 0);
1939 OSAddAtomic(-1, &pmap->stats.external);
1940 }
39037602
A
1941 if ((options & PMAP_OPTIONS_COMPRESSOR) &&
1942 IS_INTERNAL_PAGE(pai)) {
1943 /* adjust "compressed" stats */
1944 OSAddAtomic64(+1, &pmap->stats.compressed);
1945 PMAP_STATS_PEAK(pmap->stats.compressed);
1946 pmap->stats.compressed_lifetime++;
1947 }
d190cdc3
A
1948
1949 /* update ledgers */
1950 if (IS_ALTACCT_PAGE(pai, pv_e)) {
1951 assert(IS_INTERNAL_PAGE(pai));
1952 pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
1953 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
1954 if (options & PMAP_OPTIONS_COMPRESSOR) {
1955 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
1956 pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE);
1957 }
1958 } else if (IS_REUSABLE_PAGE(pai)) {
1959 assert(!IS_ALTACCT_PAGE(pai, pv_e));
1960 assert(IS_INTERNAL_PAGE(pai));
39037602
A
1961 if (options & PMAP_OPTIONS_COMPRESSOR) {
1962 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
1963 /* was not in footprint, but is now */
1964 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
1965 }
1966 } else if (IS_INTERNAL_PAGE(pai)) {
d190cdc3
A
1967 assert(!IS_ALTACCT_PAGE(pai, pv_e));
1968 assert(!IS_REUSABLE_PAGE(pai));
39037602
A
1969 pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
1970 /*
1971 * Update all stats related to physical
1972 * footprint, which only deals with
1973 * internal pages.
1974 */
1975 if (options & PMAP_OPTIONS_COMPRESSOR) {
1976 /*
1977 * This removal is only being
1978 * done so we can send this page
1979 * to the compressor; therefore
1980 * it mustn't affect total task
1981 * footprint.
1982 */
39037602
A
1983 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
1984 } else {
1985 /*
1986 * This internal page isn't
1987 * going to the compressor,
0a7de745 1988 * so adjust stats to keep
39037602
A
1989 * phys_footprint up to date.
1990 */
d190cdc3 1991 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
39037602
A
1992 }
1993 }
39236c6e
A
1994 }
1995
b7266188 1996 /*
0a7de745
A
1997 * Deal with the pv_rooted_entry.
1998 */
b7266188
A
1999
2000 if (pv_e == pv_h) {
2001 /*
2002 * Fix up head later.
2003 */
2004 pv_h->pmap = PMAP_NULL;
2005 } else {
2006 /*
2007 * Delete this entry.
2008 */
2009 pv_hash_remove(pvh_e);
2010 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
2011 pvh_eh = pvh_e;
2012
0a7de745 2013 if (pvh_et == PV_HASHED_ENTRY_NULL) {
b7266188 2014 pvh_et = pvh_e;
0a7de745 2015 }
b7266188
A
2016 pvh_cnt++;
2017 }
2018 } else {
2019 /*
0a7de745
A
2020 * Write-protect, after opportunistic refmod collect
2021 */
3e170ce0
A
2022 if (!is_ept) {
2023 pmap_phys_attributes[pai] |=
0a7de745 2024 *pte & (PHYS_MODIFIED | PHYS_REFERENCED);
3e170ce0
A
2025 } else {
2026 pmap_phys_attributes[pai] |=
0a7de745 2027 ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
3e170ce0
A
2028 }
2029 pmap_update_pte(pte, PTE_WRITE(is_ept), 0);
39236c6e 2030
0a7de745 2031 if (options & PMAP_OPTIONS_NOFLUSH) {
39236c6e 2032 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
0a7de745
A
2033 } else {
2034 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
2035 }
b7266188
A
2036 }
2037 pvh_e = nexth;
2038 } while ((pv_e = (pv_rooted_entry_t) nexth) != pv_h);
2039
2040
2041 /*
2042 * If pv_head mapping was removed, fix it up.
2043 */
2044 if (pv_h->pmap == PMAP_NULL) {
2045 pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
2046
2047 if (pvh_e != (pv_hashed_entry_t) pv_h) {
2048 pv_hash_remove(pvh_e);
2049 pv_h->pmap = pvh_e->pmap;
d190cdc3 2050 pv_h->va_and_flags = pvh_e->va_and_flags;
b7266188
A
2051 pvh_e->qlink.next = (queue_entry_t) pvh_eh;
2052 pvh_eh = pvh_e;
2053
0a7de745 2054 if (pvh_et == PV_HASHED_ENTRY_NULL) {
b7266188 2055 pvh_et = pvh_e;
0a7de745 2056 }
b7266188
A
2057 pvh_cnt++;
2058 }
2059 }
2060 if (pvh_eh != PV_HASHED_ENTRY_NULL) {
2061 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pvh_cnt);
2062 }
2063done:
2064 UNLOCK_PVH(pai);
2065
5ba3f43e 2066 PMAP_TRACE(PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END);
b7266188
A
2067}
2068
39236c6e 2069
6d2010ae
A
2070/*
2071 * Clear specified attribute bits.
2072 */
b7266188 2073void
6d2010ae 2074phys_attribute_clear(
0a7de745
A
2075 ppnum_t pn,
2076 int bits,
2077 unsigned int options,
2078 void *arg)
b7266188 2079{
0a7de745
A
2080 pv_rooted_entry_t pv_h;
2081 pv_hashed_entry_t pv_e;
2082 pt_entry_t *pte = NULL;
2083 int pai;
2084 pmap_t pmap;
2085 char attributes = 0;
2086 boolean_t is_internal, is_reusable, is_altacct, is_ept;
2087 int ept_bits_to_clear;
2088 boolean_t ept_keep_global_mod = FALSE;
fe8ab488
A
2089
2090 if ((bits & PHYS_MODIFIED) &&
2091 (options & PMAP_OPTIONS_NOFLUSH) &&
2092 arg == NULL) {
2093 panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): "
0a7de745
A
2094 "should not clear 'modified' without flushing TLBs\n",
2095 pn, bits, options, arg);
fe8ab488
A
2096 }
2097
3e170ce0
A
2098 /* We only support converting MOD and REF bits for EPT PTEs in this function */
2099 assert((bits & ~(PHYS_REFERENCED | PHYS_MODIFIED)) == 0);
2100
2101 ept_bits_to_clear = (unsigned)physmap_refmod_to_ept(bits & (PHYS_MODIFIED | PHYS_REFERENCED));
2102
6d2010ae
A
2103 pmap_intr_assert();
2104 assert(pn != vm_page_fictitious_addr);
0a7de745 2105 if (pn == vm_page_guard_addr) {
6d2010ae 2106 return;
0a7de745 2107 }
b7266188 2108
6d2010ae 2109 pai = ppn_to_pai(pn);
b7266188 2110
6d2010ae
A
2111 if (!IS_MANAGED_PAGE(pai)) {
2112 /*
2113 * Not a managed page.
2114 */
2115 return;
b7266188 2116 }
b7266188 2117
5ba3f43e 2118 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START, pn, bits);
b7266188 2119
6d2010ae 2120 pv_h = pai_to_pvh(pai);
b7266188 2121
6d2010ae 2122 LOCK_PVH(pai);
b7266188 2123
3e170ce0 2124
6d2010ae
A
2125 /*
2126 * Walk down PV list, clearing all modify or reference bits.
2127 * We do not have to lock the pv_list because we have
316670eb 2128 * the per-pmap lock
6d2010ae
A
2129 */
2130 if (pv_h->pmap != PMAP_NULL) {
2131 /*
2132 * There are some mappings.
2133 */
b7266188 2134
fe8ab488
A
2135 is_internal = IS_INTERNAL_PAGE(pai);
2136 is_reusable = IS_REUSABLE_PAGE(pai);
2137
6d2010ae 2138 pv_e = (pv_hashed_entry_t)pv_h;
b7266188 2139
6d2010ae 2140 do {
0a7de745 2141 vm_map_offset_t va;
fe8ab488 2142 char pte_bits;
b7266188 2143
6d2010ae 2144 pmap = pv_e->pmap;
3e170ce0 2145 is_ept = is_ept_pmap(pmap);
d190cdc3 2146 is_altacct = IS_ALTACCT_PAGE(pai, pv_e);
39037602 2147 va = PVE_VA(pv_e);
fe8ab488
A
2148 pte_bits = 0;
2149
2150 if (bits) {
2151 pte = pmap_pte(pmap, va);
2152 /* grab ref/mod bits from this PTE */
3e170ce0 2153 pte_bits = (*pte & (PTE_REF(is_ept) | PTE_MOD(is_ept)));
fe8ab488 2154 /* propagate to page's global attributes */
3e170ce0
A
2155 if (!is_ept) {
2156 attributes |= pte_bits;
2157 } else {
2158 attributes |= ept_refmod_to_physmap(pte_bits);
2159 if (!pmap_ept_support_ad && (pte_bits & INTEL_EPT_MOD)) {
2160 ept_keep_global_mod = TRUE;
2161 }
2162 }
fe8ab488 2163 /* which bits to clear for this PTE? */
3e170ce0
A
2164 if (!is_ept) {
2165 pte_bits &= bits;
2166 } else {
2167 pte_bits &= ept_bits_to_clear;
2168 }
fe8ab488 2169 }
0a7de745
A
2170 if (options & PMAP_OPTIONS_CLEAR_WRITE) {
2171 pte_bits |= PTE_WRITE(is_ept);
2172 }
b7266188 2173
0a7de745
A
2174 /*
2175 * Clear modify and/or reference bits.
2176 */
fe8ab488 2177 if (pte_bits) {
d9a64523 2178 pmap_update_pte(pte, pte_bits, 0);
fe8ab488
A
2179
2180 /* Ensure all processors using this translation
2181 * invalidate this TLB entry. The invalidation
2182 * *must* follow the PTE update, to ensure that
2183 * the TLB shadow of the 'D' bit (in particular)
2184 * is synchronized with the updated PTE.
2185 */
0a7de745 2186 if (!(options & PMAP_OPTIONS_NOFLUSH)) {
fe8ab488
A
2187 /* flush TLBS now */
2188 PMAP_UPDATE_TLBS(pmap,
0a7de745
A
2189 va,
2190 va + PAGE_SIZE);
fe8ab488
A
2191 } else if (arg) {
2192 /* delayed TLB flush: add "pmap" info */
2193 PMAP_UPDATE_TLBS_DELAYED(
2194 pmap,
2195 va,
2196 va + PAGE_SIZE,
2197 (pmap_flush_context *)arg);
2198 } else {
2199 /* no TLB flushing at all */
2200 }
2201 }
2202
2203 /* update pmap "reusable" stats */
2204 if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) &&
2205 is_reusable &&
2206 pmap != kernel_pmap) {
2207 /* one less "reusable" */
2208 assert(pmap->stats.reusable > 0);
2209 OSAddAtomic(-1, &pmap->stats.reusable);
2210 if (is_internal) {
2211 /* one more "internal" */
2212 OSAddAtomic(+1, &pmap->stats.internal);
2213 PMAP_STATS_PEAK(pmap->stats.internal);
39037602 2214 assert(pmap->stats.internal > 0);
d190cdc3
A
2215 if (is_altacct) {
2216 /* no impact on ledgers */
39037602 2217 } else {
d190cdc3 2218 pmap_ledger_credit(pmap,
0a7de745
A
2219 task_ledgers.internal,
2220 PAGE_SIZE);
39037602
A
2221 pmap_ledger_credit(
2222 pmap,
2223 task_ledgers.phys_footprint,
2224 PAGE_SIZE);
2225 }
fe8ab488
A
2226 } else {
2227 /* one more "external" */
2228 OSAddAtomic(+1, &pmap->stats.external);
2229 PMAP_STATS_PEAK(pmap->stats.external);
39037602 2230 assert(pmap->stats.external > 0);
fe8ab488
A
2231 }
2232 } else if ((options & PMAP_OPTIONS_SET_REUSABLE) &&
0a7de745
A
2233 !is_reusable &&
2234 pmap != kernel_pmap) {
fe8ab488
A
2235 /* one more "reusable" */
2236 OSAddAtomic(+1, &pmap->stats.reusable);
2237 PMAP_STATS_PEAK(pmap->stats.reusable);
39037602 2238 assert(pmap->stats.reusable > 0);
fe8ab488
A
2239 if (is_internal) {
2240 /* one less "internal" */
2241 assert(pmap->stats.internal > 0);
2242 OSAddAtomic(-1, &pmap->stats.internal);
d190cdc3 2243 if (is_altacct) {
39037602
A
2244 /* no impact on footprint */
2245 } else {
d190cdc3 2246 pmap_ledger_debit(pmap,
0a7de745
A
2247 task_ledgers.internal,
2248 PAGE_SIZE);
39037602
A
2249 pmap_ledger_debit(
2250 pmap,
2251 task_ledgers.phys_footprint,
2252 PAGE_SIZE);
2253 }
fe8ab488
A
2254 } else {
2255 /* one less "external" */
2256 assert(pmap->stats.external > 0);
2257 OSAddAtomic(-1, &pmap->stats.external);
2258 }
2259 }
b7266188 2260
6d2010ae 2261 pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
6d2010ae 2262 } while (pv_e != (pv_hashed_entry_t)pv_h);
b7266188 2263 }
6d2010ae
A
2264 /* Opportunistic refmod collection, annulled
2265 * if both REF and MOD are being cleared.
2266 */
b7266188 2267
6d2010ae 2268 pmap_phys_attributes[pai] |= attributes;
3e170ce0
A
2269
2270 if (ept_keep_global_mod) {
2271 /*
2272 * If the hardware doesn't support AD bits for EPT PTEs and someone is
2273 * requesting that we clear the modified bit for a phys page, we need
2274 * to ensure that there are no EPT mappings for the page with the
2275 * modified bit set. If there are, we cannot clear the global modified bit.
2276 */
2277 bits &= ~PHYS_MODIFIED;
2278 }
2279 pmap_phys_attributes[pai] &= ~(bits);
b7266188 2280
fe8ab488
A
2281 /* update this page's "reusable" status */
2282 if (options & PMAP_OPTIONS_CLEAR_REUSABLE) {
2283 pmap_phys_attributes[pai] &= ~PHYS_REUSABLE;
2284 } else if (options & PMAP_OPTIONS_SET_REUSABLE) {
2285 pmap_phys_attributes[pai] |= PHYS_REUSABLE;
2286 }
2287
6d2010ae 2288 UNLOCK_PVH(pai);
b7266188 2289
5ba3f43e 2290 PMAP_TRACE(PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END);
b7266188
A
2291}
2292
6d2010ae
A
2293/*
2294 * Check specified attribute bits.
2295 */
2296int
2297phys_attribute_test(
0a7de745
A
2298 ppnum_t pn,
2299 int bits)
0b4c1975 2300{
0a7de745
A
2301 pv_rooted_entry_t pv_h;
2302 pv_hashed_entry_t pv_e;
2303 pt_entry_t *pte;
2304 int pai;
2305 pmap_t pmap;
2306 int attributes = 0;
2307 boolean_t is_ept;
6d2010ae
A
2308
2309 pmap_intr_assert();
2310 assert(pn != vm_page_fictitious_addr);
3e170ce0 2311 assert((bits & ~(PHYS_MODIFIED | PHYS_REFERENCED)) == 0);
0a7de745 2312 if (pn == vm_page_guard_addr) {
6d2010ae 2313 return 0;
0a7de745 2314 }
0b4c1975
A
2315
2316 pai = ppn_to_pai(pn);
2317
6d2010ae
A
2318 if (!IS_MANAGED_PAGE(pai)) {
2319 /*
2320 * Not a managed page.
2321 */
2322 return 0;
2323 }
0b4c1975 2324
6d2010ae
A
2325 /*
2326 * Fast check... if bits already collected
2327 * no need to take any locks...
2328 * if not set, we need to recheck after taking
2329 * the lock in case they got pulled in while
2330 * we were waiting for the lock
2331 */
0a7de745 2332 if ((pmap_phys_attributes[pai] & bits) == bits) {
6d2010ae 2333 return bits;
0a7de745 2334 }
0b4c1975 2335
6d2010ae 2336 pv_h = pai_to_pvh(pai);
0b4c1975 2337
6d2010ae 2338 LOCK_PVH(pai);
0b4c1975 2339
6d2010ae 2340 attributes = pmap_phys_attributes[pai] & bits;
0b4c1975 2341
0b4c1975 2342
6d2010ae
A
2343 /*
2344 * Walk down PV list, checking the mappings until we
2345 * reach the end or we've found the desired attributes.
2346 */
2347 if (attributes != bits &&
2348 pv_h->pmap != PMAP_NULL) {
2349 /*
2350 * There are some mappings.
2351 */
2352 pv_e = (pv_hashed_entry_t)pv_h;
2353 do {
2354 vm_map_offset_t va;
0b4c1975 2355
6d2010ae 2356 pmap = pv_e->pmap;
3e170ce0 2357 is_ept = is_ept_pmap(pmap);
39037602 2358 va = PVE_VA(pv_e);
6d2010ae 2359 /*
0a7de745 2360 * pick up modify and/or reference bits from mapping
6d2010ae 2361 */
0b4c1975 2362
6d2010ae 2363 pte = pmap_pte(pmap, va);
3e170ce0
A
2364 if (!is_ept) {
2365 attributes |= (int)(*pte & bits);
2366 } else {
2367 attributes |= (int)(ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED));
3e170ce0 2368 }
6d2010ae
A
2369
2370 pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink);
6d2010ae 2371 } while ((attributes != bits) &&
0a7de745 2372 (pv_e != (pv_hashed_entry_t)pv_h));
0b4c1975 2373 }
6d2010ae 2374 pmap_phys_attributes[pai] |= attributes;
0b4c1975 2375
6d2010ae 2376 UNLOCK_PVH(pai);
0a7de745 2377 return attributes;
6d2010ae 2378}
0b4c1975 2379
6d2010ae
A
2380/*
2381 * Routine: pmap_change_wiring
2382 * Function: Change the wiring attribute for a map/virtual-address
2383 * pair.
2384 * In/out conditions:
2385 * The mapping must already exist in the pmap.
2386 */
0b4c1975 2387void
6d2010ae 2388pmap_change_wiring(
0a7de745
A
2389 pmap_t map,
2390 vm_map_offset_t vaddr,
2391 boolean_t wired)
0b4c1975 2392{
0a7de745 2393 pt_entry_t *pte;
0b4c1975 2394
0a7de745 2395 PMAP_LOCK_SHARED(map);
0b4c1975 2396
0a7de745 2397 if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL) {
4bd07ac2 2398 panic("pmap_change_wiring(%p,0x%llx,%d): pte missing",
0a7de745
A
2399 map, vaddr, wired);
2400 }
0b4c1975 2401
6d2010ae
A
2402 if (wired && !iswired(*pte)) {
2403 /*
2404 * wiring down mapping
2405 */
316670eb 2406 pmap_ledger_credit(map, task_ledgers.wired_mem, PAGE_SIZE);
0a7de745 2407 OSAddAtomic(+1, &map->stats.wired_count);
3e170ce0 2408 pmap_update_pte(pte, 0, PTE_WIRED);
0a7de745 2409 } else if (!wired && iswired(*pte)) {
6d2010ae
A
2410 /*
2411 * unwiring mapping
2412 */
2413 assert(map->stats.wired_count >= 1);
0a7de745 2414 OSAddAtomic(-1, &map->stats.wired_count);
316670eb 2415 pmap_ledger_debit(map, task_ledgers.wired_mem, PAGE_SIZE);
3e170ce0 2416 pmap_update_pte(pte, PTE_WIRED, 0);
060df5ea 2417 }
060df5ea 2418
0a7de745 2419 PMAP_UNLOCK_SHARED(map);
6d2010ae 2420}
7ddcb079
A
2421
2422/*
2423 * "Backdoor" direct map routine for early mappings.
0a7de745 2424 * Useful for mapping memory outside the range
7ddcb079
A
2425 * Sets A, D and NC if requested
2426 */
2427
2428vm_offset_t
2429pmap_map_bd(
0a7de745
A
2430 vm_offset_t virt,
2431 vm_map_offset_t start_addr,
2432 vm_map_offset_t end_addr,
2433 vm_prot_t prot,
2434 unsigned int flags)
7ddcb079 2435{
0a7de745
A
2436 pt_entry_t template;
2437 pt_entry_t *ptep;
5c9f4661 2438
0a7de745
A
2439 vm_offset_t base = virt;
2440 boolean_t doflush = FALSE;
5c9f4661 2441
7ddcb079 2442 template = pa_to_pte(start_addr)
0a7de745
A
2443 | INTEL_PTE_REF
2444 | INTEL_PTE_MOD
2445 | INTEL_PTE_WIRED
2446 | INTEL_PTE_VALID;
7ddcb079
A
2447
2448 if ((flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) == VM_MEM_NOT_CACHEABLE) {
2449 template |= INTEL_PTE_NCACHE;
0a7de745
A
2450 if (!(flags & (VM_MEM_GUARDED))) {
2451 template |= INTEL_PTE_PAT;
2452 }
7ddcb079 2453 }
316670eb 2454
0a7de745 2455 if ((prot & VM_PROT_EXECUTE) == 0) {
316670eb 2456 template |= INTEL_PTE_NX;
0a7de745 2457 }
316670eb 2458
0a7de745 2459 if (prot & VM_PROT_WRITE) {
7ddcb079 2460 template |= INTEL_PTE_WRITE;
0a7de745
A
2461 }
2462 vm_map_offset_t caddr = start_addr;
2463 while (caddr < end_addr) {
5c9f4661
A
2464 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
2465 if (ptep == PT_ENTRY_NULL) {
2466 panic("pmap_map_bd: Invalid kernel address");
2467 }
2468 if (pte_to_pa(*ptep)) {
2469 doflush = TRUE;
7ddcb079 2470 }
5c9f4661 2471 pmap_store_pte(ptep, template);
7ddcb079
A
2472 pte_increment_pa(template);
2473 virt += PAGE_SIZE;
0a7de745 2474 caddr += PAGE_SIZE;
7ddcb079 2475 }
5c9f4661 2476 if (doflush) {
0a7de745 2477 pmap_tlbi_range(0, ~0ULL, true, 0);
5c9f4661
A
2478 PMAP_UPDATE_TLBS(kernel_pmap, base, base + end_addr - start_addr);
2479 }
0a7de745 2480 return virt;
7ddcb079 2481}
39236c6e 2482
5c9f4661
A
2483/* Create a virtual alias beginning at 'ava' of the specified kernel virtual
2484 * range. The aliased pagetable range is expanded if
2485 * PMAP_EXPAND_OPTIONS_ALIASMAP is specified. Performs no synchronization,
2486 * assumes caller has stabilized the source and destination ranges. Currently
2487 * used to populate sections of the trampoline "doublemap" at CPU startup.
2488 */
2489
2490void
2491pmap_alias(
0a7de745
A
2492 vm_offset_t ava,
2493 vm_map_offset_t start_addr,
2494 vm_map_offset_t end_addr,
2495 vm_prot_t prot,
2496 unsigned int eoptions)
5c9f4661 2497{
0a7de745
A
2498 pt_entry_t prot_template, template;
2499 pt_entry_t *aptep, *sptep;
5c9f4661
A
2500
2501 prot_template = INTEL_PTE_REF | INTEL_PTE_MOD | INTEL_PTE_WIRED | INTEL_PTE_VALID;
0a7de745 2502 if ((prot & VM_PROT_EXECUTE) == 0) {
5c9f4661 2503 prot_template |= INTEL_PTE_NX;
0a7de745 2504 }
5c9f4661 2505
0a7de745 2506 if (prot & VM_PROT_WRITE) {
5c9f4661 2507 prot_template |= INTEL_PTE_WRITE;
0a7de745 2508 }
5c9f4661
A
2509 assert(((start_addr | end_addr) & PAGE_MASK) == 0);
2510 while (start_addr < end_addr) {
2511 aptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ava);
2512 if (aptep == PT_ENTRY_NULL) {
2513 if (eoptions & PMAP_EXPAND_OPTIONS_ALIASMAP) {
2514 pmap_expand(kernel_pmap, ava, PMAP_EXPAND_OPTIONS_ALIASMAP);
2515 aptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ava);
2516 } else {
2517 panic("pmap_alias: Invalid alias address");
2518 }
2519 }
2520 /* The aliased range should not have any active mappings */
2521 assert(pte_to_pa(*aptep) == 0);
2522
2523 sptep = pmap_pte(kernel_pmap, start_addr);
2524 assert(sptep != PT_ENTRY_NULL && (pte_to_pa(*sptep) != 0));
2525 template = pa_to_pte(pte_to_pa(*sptep)) | prot_template;
2526 pmap_store_pte(aptep, template);
2527
2528 ava += PAGE_SIZE;
2529 start_addr += PAGE_SIZE;
2530 }
2531}
2532
4bd07ac2 2533mach_vm_size_t
39236c6e 2534pmap_query_resident(
0a7de745
A
2535 pmap_t pmap,
2536 addr64_t s64,
2537 addr64_t e64,
2538 mach_vm_size_t *compressed_bytes_p)
39236c6e
A
2539{
2540 pt_entry_t *pde;
2541 pt_entry_t *spte, *epte;
2542 addr64_t l64;
0a7de745
A
2543 uint64_t deadline = 0;
2544 mach_vm_size_t resident_bytes;
2545 mach_vm_size_t compressed_bytes;
2546 boolean_t is_ept;
39236c6e
A
2547
2548 pmap_intr_assert();
2549
3e170ce0 2550 if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64) {
4bd07ac2
A
2551 if (compressed_bytes_p) {
2552 *compressed_bytes_p = 0;
3e170ce0 2553 }
39236c6e 2554 return 0;
3e170ce0
A
2555 }
2556
2557 is_ept = is_ept_pmap(pmap);
39236c6e
A
2558
2559 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START,
0a7de745
A
2560 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(s64),
2561 VM_KERNEL_ADDRHIDE(e64));
39236c6e 2562
4bd07ac2
A
2563 resident_bytes = 0;
2564 compressed_bytes = 0;
39236c6e 2565
0a7de745
A
2566 PMAP_LOCK_EXCLUSIVE(pmap);
2567 uint32_t traverse_count = 0;
39236c6e
A
2568
2569 while (s64 < e64) {
0a7de745
A
2570 l64 = (s64 + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1);
2571 if (l64 > e64) {
39236c6e 2572 l64 = e64;
0a7de745 2573 }
39236c6e
A
2574 pde = pmap_pde(pmap, s64);
2575
3e170ce0
A
2576 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
2577 if (*pde & PTE_PS) {
39236c6e
A
2578 /* superpage: not supported */
2579 } else {
2580 spte = pmap_pte(pmap,
0a7de745 2581 (s64 & ~(PDE_MAPPED_SIZE - 1)));
39236c6e
A
2582 spte = &spte[ptenum(s64)];
2583 epte = &spte[intel_btop(l64 - s64)];
2584
2585 for (; spte < epte; spte++) {
2586 if (pte_to_pa(*spte) != 0) {
4bd07ac2 2587 resident_bytes += PAGE_SIZE;
3e170ce0 2588 } else if (*spte & PTE_COMPRESSED) {
4bd07ac2 2589 compressed_bytes += PAGE_SIZE;
39236c6e
A
2590 }
2591 }
39236c6e
A
2592 }
2593 }
2594 s64 = l64;
2595
0a7de745
A
2596 if ((s64 < e64) && (traverse_count++ > PLCHECK_THRESHOLD)) {
2597 if (deadline == 0) {
2598 deadline = rdtsc64() + max_preemption_latency_tsc;
2599 } else {
2600 if (rdtsc64() > deadline) {
2601 PMAP_UNLOCK_EXCLUSIVE(pmap);
2602 __builtin_ia32_pause();
2603 PMAP_LOCK_EXCLUSIVE(pmap);
2604 deadline = rdtsc64() + max_preemption_latency_tsc;
2605 }
2606 }
39236c6e
A
2607 }
2608 }
2609
0a7de745 2610 PMAP_UNLOCK_EXCLUSIVE(pmap);
39236c6e
A
2611
2612 PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END,
0a7de745 2613 resident_bytes);
39236c6e 2614
4bd07ac2
A
2615 if (compressed_bytes_p) {
2616 *compressed_bytes_p = compressed_bytes;
3e170ce0 2617 }
4bd07ac2 2618 return resident_bytes;
39236c6e 2619}
fe8ab488 2620
39037602
A
2621kern_return_t
2622pmap_query_page_info(
0a7de745
A
2623 pmap_t pmap,
2624 vm_map_offset_t va,
2625 int *disp_p)
39037602 2626{
0a7de745
A
2627 int disp;
2628 boolean_t is_ept;
2629 pmap_paddr_t pa;
2630 ppnum_t pai;
2631 pd_entry_t *pde;
2632 pt_entry_t *pte;
39037602
A
2633
2634 pmap_intr_assert();
2635 if (pmap == PMAP_NULL || pmap == kernel_pmap) {
2636 *disp_p = 0;
2637 return KERN_INVALID_ARGUMENT;
2638 }
2639
2640 disp = 0;
2641 is_ept = is_ept_pmap(pmap);
2642
0a7de745 2643 PMAP_LOCK_EXCLUSIVE(pmap);
39037602
A
2644
2645 pde = pmap_pde(pmap, va);
2646 if (!pde ||
2647 !(*pde & PTE_VALID_MASK(is_ept)) ||
2648 (*pde & PTE_PS)) {
2649 goto done;
2650 }
2651
2652 pte = pmap_pte(pmap, va);
2653 if (pte == PT_ENTRY_NULL) {
2654 goto done;
2655 }
2656
2657 pa = pte_to_pa(*pte);
2658 if (pa == 0) {
cb323159 2659 if (PTE_IS_COMPRESSED(*pte, pte, pmap, va)) {
39037602
A
2660 disp |= PMAP_QUERY_PAGE_COMPRESSED;
2661 if (*pte & PTE_COMPRESSED_ALT) {
2662 disp |= PMAP_QUERY_PAGE_COMPRESSED_ALTACCT;
2663 }
2664 }
2665 } else {
2666 disp |= PMAP_QUERY_PAGE_PRESENT;
2667 pai = pa_index(pa);
2668 if (!IS_MANAGED_PAGE(pai)) {
d190cdc3
A
2669 } else if (pmap_pv_is_altacct(pmap, va, pai)) {
2670 assert(IS_INTERNAL_PAGE(pai));
2671 disp |= PMAP_QUERY_PAGE_INTERNAL;
2672 disp |= PMAP_QUERY_PAGE_ALTACCT;
39037602
A
2673 } else if (IS_REUSABLE_PAGE(pai)) {
2674 disp |= PMAP_QUERY_PAGE_REUSABLE;
2675 } else if (IS_INTERNAL_PAGE(pai)) {
2676 disp |= PMAP_QUERY_PAGE_INTERNAL;
39037602
A
2677 }
2678 }
2679
2680done:
0a7de745 2681 PMAP_UNLOCK_EXCLUSIVE(pmap);
39037602
A
2682 *disp_p = disp;
2683 return KERN_SUCCESS;
2684}
2685
f427ee49
A
2686void
2687pmap_set_vm_map_cs_enforced(
2688 pmap_t pmap,
2689 bool new_value)
2690{
2691 PMAP_LOCK_EXCLUSIVE(pmap);
2692 pmap->pm_vm_map_cs_enforced = new_value;
2693 PMAP_UNLOCK_EXCLUSIVE(pmap);
2694}
2695extern int cs_process_enforcement_enable;
2696bool
2697pmap_get_vm_map_cs_enforced(
2698 pmap_t pmap)
2699{
2700 if (cs_process_enforcement_enable) {
2701 return true;
2702 }
2703 return pmap->pm_vm_map_cs_enforced;
2704}
2705
d9a64523
A
2706void
2707pmap_set_jit_entitled(__unused pmap_t pmap)
39037602 2708{
5ba3f43e
A
2709 /* The x86 pmap layer does not care if a map has a JIT entry. */
2710 return;
39037602
A
2711}
2712
d9a64523 2713bool
f427ee49
A
2714pmap_get_jit_entitled(__unused pmap_t pmap)
2715{
2716 /* The x86 pmap layer does not care if a map is using JIT. */
2717 return false;
2718}
2719
2720bool
2721pmap_has_prot_policy(__unused pmap_t pmap, __unused bool translated_allow_execute, __unused vm_prot_t prot)
fe8ab488 2722{
5ba3f43e
A
2723 /*
2724 * The x86 pmap layer does not apply any policy to any protection
2725 * types.
2726 */
f427ee49 2727 return false;
5ba3f43e
A
2728}
2729
d9a64523
A
2730uint64_t
2731pmap_release_pages_fast(void)
2732{
2733 return 0;
2734}
2735
2736void
f427ee49 2737pmap_trim(__unused pmap_t grand, __unused pmap_t subord, __unused addr64_t vstart, __unused uint64_t size)
5ba3f43e
A
2738{
2739 return;
fe8ab488 2740}
5ba3f43e 2741
cb323159 2742__dead2
0a7de745
A
2743void
2744pmap_ledger_alloc_init(size_t size)
d9a64523
A
2745{
2746 panic("%s: unsupported, "
0a7de745
A
2747 "size=%lu",
2748 __func__, size);
d9a64523
A
2749}
2750
cb323159 2751__dead2
0a7de745
A
2752ledger_t
2753pmap_ledger_alloc(void)
d9a64523
A
2754{
2755 panic("%s: unsupported",
0a7de745 2756 __func__);
d9a64523
A
2757}
2758
cb323159 2759__dead2
0a7de745
A
2760void
2761pmap_ledger_free(ledger_t ledger)
d9a64523
A
2762{
2763 panic("%s: unsupported, "
0a7de745
A
2764 "ledger=%p",
2765 __func__, ledger);
d9a64523
A
2766}
2767
f427ee49
A
2768kern_return_t
2769pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused,
2770 unsigned int level_mask __unused, size_t *bytes_copied __unused)
d9a64523 2771{
f427ee49 2772 return KERN_NOT_SUPPORTED;
d9a64523
A
2773}
2774
0a7de745
A
2775void *
2776pmap_map_compressor_page(ppnum_t pn)
2777{
2778 assertf(IS_MANAGED_PAGE(ppn_to_pai(pn)), "%s called on non-managed page 0x%08x", __func__, pn);
2779 return PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT);
2780}
2781
2782void
2783pmap_unmap_compressor_page(ppnum_t pn __unused, void *kva __unused)
2784{
2785}
f427ee49
A
2786
2787bool
2788pmap_clear_refmod_range_options(
2789 pmap_t pmap __unused,
2790 vm_map_address_t start __unused,
2791 vm_map_address_t end __unused,
2792 unsigned int mask __unused,
2793 unsigned int options __unused)
2794{
2795 /*
2796 * x86 doesn't have ranged tlbi instructions, and we already have
2797 * the pmap_flush_context. This operation isn't implemented.
2798 */
2799 return false;
2800}