]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap_common.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_common.c
CommitLineData
6d2010ae 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <vm/pmap.h>
316670eb 29#include <kern/ledger.h>
6d2010ae
A
30#include <i386/pmap_internal.h>
31
316670eb 32
6d2010ae
A
33/*
34 * Each entry in the pv_head_table is locked by a bit in the
35 * pv_lock_table. The lock bits are accessed by the physical
36 * address of the page they lock.
37 */
38
0a7de745 39char *pv_lock_table; /* pointer to array of bits */
6d2010ae
A
40char *pv_hash_lock_table;
41
0a7de745
A
42pv_rooted_entry_t pv_head_table; /* array of entries, one per
43 * page */
44uint32_t pv_hashed_free_count = 0;
45uint32_t pv_hashed_kern_free_count = 0;
6d2010ae
A
46
47pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
48uint32_t pmap_pagetable_corruption_incidents;
49uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
50uint64_t pmap_pagetable_corruption_interval_abstime;
0a7de745
A
51thread_call_t pmap_pagetable_corruption_log_call;
52static thread_call_data_t pmap_pagetable_corruption_log_call_data;
6d2010ae
A
53boolean_t pmap_pagetable_corruption_timeout = FALSE;
54
0a7de745 55volatile uint32_t mappingrecurse = 0;
6d2010ae
A
56
57uint32_t pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark, pv_hashed_alloc_chunk, pv_hashed_kern_alloc_chunk;
58
59thread_t mapping_replenish_thread;
0a7de745 60event_t mapping_replenish_event, pmap_user_pv_throttle_event;
6d2010ae
A
61
62uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters;
63
5c9f4661 64int pmap_asserts_enabled = (DEBUG);
39037602
A
65int pmap_asserts_traced = 0;
66
0a7de745
A
67unsigned int
68pmap_cache_attributes(ppnum_t pn)
69{
70 int cacheattr = pmap_get_cache_attributes(pn, FALSE);
71
72 if (cacheattr & INTEL_PTE_NCACHE) {
73 if (cacheattr & INTEL_PTE_PAT) {
74 /* WC */
75 return VM_WIMG_WCOMB;
76 }
77 return VM_WIMG_IO;
78 } else {
79 return VM_WIMG_COPYBACK;
80 }
6d2010ae
A
81}
82
0a7de745
A
83void
84pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr)
85{
6d2010ae
A
86 unsigned int current, template = 0;
87 int pai;
88
89 if (cacheattr & VM_MEM_NOT_CACHEABLE) {
0a7de745
A
90 if (!(cacheattr & VM_MEM_GUARDED)) {
91 template |= PHYS_PAT;
92 }
6d2010ae
A
93 template |= PHYS_NCACHE;
94 }
95
96 pmap_intr_assert();
97
98 assert((pn != vm_page_fictitious_addr) && (pn != vm_page_guard_addr));
99
100 pai = ppn_to_pai(pn);
101
102 if (!IS_MANAGED_PAGE(pai)) {
103 return;
104 }
105
106 /* override cache attributes for this phys page
107 * Does not walk through existing mappings to adjust,
108 * assumes page is disconnected
109 */
110
111 LOCK_PVH(pai);
112
113 pmap_update_cache_attributes_locked(pn, template);
114
115 current = pmap_phys_attributes[pai] & PHYS_CACHEABILITY_MASK;
116 pmap_phys_attributes[pai] &= ~PHYS_CACHEABILITY_MASK;
117 pmap_phys_attributes[pai] |= template;
118
119 UNLOCK_PVH(pai);
120
121 if ((template & PHYS_NCACHE) && !(current & PHYS_NCACHE)) {
122 pmap_sync_page_attributes_phys(pn);
123 }
124}
125
0a7de745
A
126unsigned
127pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept)
128{
129 if (last_managed_page == 0) {
6d2010ae 130 return 0;
0a7de745 131 }
6d2010ae 132
0a7de745
A
133 if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
134 return PTE_NCACHE(is_ept);
135 }
6d2010ae
A
136
137 /*
138 * The cache attributes are read locklessly for efficiency.
139 */
140 unsigned int attr = pmap_phys_attributes[ppn_to_pai(pn)];
141 unsigned int template = 0;
3e170ce0
A
142
143 /*
144 * The PTA bit is currently unsupported for EPT PTEs.
145 */
0a7de745
A
146 if ((attr & PHYS_PAT) && !is_ept) {
147 template |= INTEL_PTE_PAT;
148 }
3e170ce0
A
149
150 /*
151 * If the page isn't marked as NCACHE, the default for EPT entries
152 * is WB.
153 */
0a7de745 154 if (attr & PHYS_NCACHE) {
3e170ce0 155 template |= PTE_NCACHE(is_ept);
0a7de745 156 } else if (is_ept) {
3e170ce0 157 template |= INTEL_EPT_WB;
0a7de745 158 }
3e170ce0 159
6d2010ae
A
160 return template;
161}
162
0a7de745 163boolean_t
3e170ce0
A
164pmap_has_managed_page(ppnum_t first, ppnum_t last)
165{
5ba3f43e
A
166 ppnum_t pn, kdata_start, kdata_end;
167 boolean_t result;
168 boot_args * args;
169
170 args = (boot_args *) PE_state.bootArgs;
171
172 // Allow pages that the booter added to the end of the kernel.
173 // We may miss reporting some pages in this range that were freed
174 // with ml_static_free()
175 kdata_start = atop_32(args->kaddr);
176 kdata_end = atop_32(args->kaddr + args->ksize);
3e170ce0 177
0a7de745
A
178 assert(last_managed_page);
179 assert(first <= last);
3e170ce0 180
0a7de745
A
181 for (result = FALSE, pn = first;
182 !result
183 && (pn <= last)
184 && (pn <= last_managed_page);
185 pn++) {
186 if ((pn >= kdata_start) && (pn < kdata_end)) {
187 continue;
188 }
189 result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED));
190 }
3e170ce0 191
0a7de745 192 return result;
3e170ce0 193}
6d2010ae
A
194
195boolean_t
196pmap_is_noencrypt(ppnum_t pn)
197{
0a7de745 198 int pai;
6d2010ae
A
199
200 pai = ppn_to_pai(pn);
201
0a7de745
A
202 if (!IS_MANAGED_PAGE(pai)) {
203 return FALSE;
204 }
6d2010ae 205
0a7de745
A
206 if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
207 return TRUE;
208 }
6d2010ae 209
0a7de745 210 return FALSE;
6d2010ae
A
211}
212
213
214void
215pmap_set_noencrypt(ppnum_t pn)
216{
0a7de745 217 int pai;
6d2010ae
A
218
219 pai = ppn_to_pai(pn);
220
221 if (IS_MANAGED_PAGE(pai)) {
222 LOCK_PVH(pai);
223
224 pmap_phys_attributes[pai] |= PHYS_NOENCRYPT;
225
226 UNLOCK_PVH(pai);
227 }
228}
229
230
231void
232pmap_clear_noencrypt(ppnum_t pn)
233{
0a7de745 234 int pai;
6d2010ae
A
235
236 pai = ppn_to_pai(pn);
237
238 if (IS_MANAGED_PAGE(pai)) {
7ddcb079
A
239 /*
240 * synchronization at VM layer prevents PHYS_NOENCRYPT
241 * from changing state, so we don't need the lock to inspect
242 */
243 if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
244 LOCK_PVH(pai);
6d2010ae 245
7ddcb079 246 pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
6d2010ae 247
7ddcb079
A
248 UNLOCK_PVH(pai);
249 }
6d2010ae
A
250 }
251}
252
253void
254compute_pmap_gc_throttle(void *arg __unused)
255{
6d2010ae
A
256}
257
258
fe8ab488
A
259void
260pmap_lock_phys_page(ppnum_t pn)
261{
0a7de745 262 int pai;
fe8ab488
A
263
264 pai = ppn_to_pai(pn);
265
266 if (IS_MANAGED_PAGE(pai)) {
267 LOCK_PVH(pai);
0a7de745
A
268 } else {
269 simple_lock(&phys_backup_lock, LCK_GRP_NULL);
270 }
fe8ab488
A
271}
272
273
274void
275pmap_unlock_phys_page(ppnum_t pn)
276{
0a7de745 277 int pai;
fe8ab488
A
278
279 pai = ppn_to_pai(pn);
280
281 if (IS_MANAGED_PAGE(pai)) {
282 UNLOCK_PVH(pai);
0a7de745 283 } else {
fe8ab488 284 simple_unlock(&phys_backup_lock);
0a7de745 285 }
fe8ab488
A
286}
287
288
289
6d2010ae 290__private_extern__ void
0a7de745
A
291pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1, 2))
292{
6d2010ae 293 if (pmap_pagetable_corruption_incidents > 0) {
cb323159 294 int i, j, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
6d2010ae
A
295 (*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
296 for (i = 0; i < e; i++) {
cb323159
A
297 (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n",
298 pmap_pagetable_corruption_records[i].incident,
299 pmap_pagetable_corruption_records[i].reason,
300 pmap_pagetable_corruption_records[i].action,
301 pmap_pagetable_corruption_records[i].abstime);
302
303 if (pmap_pagetable_corruption_records[i].adj_ptes_count > 0) {
304 for (j = 0; j < pmap_pagetable_corruption_records[i].adj_ptes_count; j++) {
305 (*log_func)("\tAdjacent PTE[%d] = 0x%llx\n", j,
306 pmap_pagetable_corruption_records[i].adj_ptes[j]);
307 }
308 }
6d2010ae
A
309 }
310 }
311}
312
313static inline void
0a7de745
A
314pmap_pagetable_corruption_log_setup(void)
315{
6d2010ae
A
316 if (pmap_pagetable_corruption_log_call == NULL) {
317 nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
318 thread_call_setup(&pmap_pagetable_corruption_log_call_data,
319 (thread_call_func_t) pmap_pagetable_corruption_msg_log,
320 (thread_call_param_t) &printf);
321 pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
322 }
323}
324
325void
326mapping_free_prime(void)
327{
0a7de745
A
328 unsigned i;
329 pv_hashed_entry_t pvh_e;
330 pv_hashed_entry_t pvh_eh;
331 pv_hashed_entry_t pvh_et;
332 int pv_cnt;
6d2010ae
A
333
334 /* Scale based on DRAM size */
335 pv_hashed_low_water_mark = MAX(PV_HASHED_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 2000);
336 pv_hashed_low_water_mark = MIN(pv_hashed_low_water_mark, 16000);
337 /* Alterable via sysctl */
338 pv_hashed_kern_low_water_mark = MAX(PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 1000);
339 pv_hashed_kern_low_water_mark = MIN(pv_hashed_kern_low_water_mark, 16000);
340 pv_hashed_kern_alloc_chunk = PV_HASHED_KERN_ALLOC_CHUNK_INITIAL;
341 pv_hashed_alloc_chunk = PV_HASHED_ALLOC_CHUNK_INITIAL;
342
343 pv_cnt = 0;
344 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
345
346 for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK_INITIAL); i++) {
347 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
348
349 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
350 pvh_eh = pvh_e;
351
0a7de745
A
352 if (pvh_et == PV_HASHED_ENTRY_NULL) {
353 pvh_et = pvh_e;
354 }
6d2010ae
A
355 pv_cnt++;
356 }
357 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
358
359 pv_cnt = 0;
360 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
361 for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK_INITIAL; i++) {
362 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
363
364 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
365 pvh_eh = pvh_e;
366
0a7de745
A
367 if (pvh_et == PV_HASHED_ENTRY_NULL) {
368 pvh_et = pvh_e;
369 }
6d2010ae
A
370 pv_cnt++;
371 }
372 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
373}
374
375void mapping_replenish(void);
376
0a7de745
A
377void
378mapping_adjust(void)
379{
6d2010ae
A
380 kern_return_t mres;
381
382 pmap_pagetable_corruption_log_setup();
383
384 mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread);
385 if (mres != KERN_SUCCESS) {
386 panic("pmap: mapping_replenish_thread creation failed");
387 }
388 thread_deallocate(mapping_replenish_thread);
389}
390
0a7de745 391unsigned pmap_mapping_thread_wakeups;
6d2010ae
A
392unsigned pmap_kernel_reserve_replenish_stat;
393unsigned pmap_user_reserve_replenish_stat;
394unsigned pmap_kern_reserve_alloc_stat;
395
39037602
A
396__attribute__((noreturn))
397void
398mapping_replenish(void)
6d2010ae 399{
0a7de745
A
400 pv_hashed_entry_t pvh_e;
401 pv_hashed_entry_t pvh_eh;
402 pv_hashed_entry_t pvh_et;
403 int pv_cnt;
404 unsigned i;
6d2010ae
A
405
406 /* We qualify for VM privileges...*/
407 current_thread()->options |= TH_OPT_VMPRIV;
408
409 for (;;) {
6d2010ae
A
410 while (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
411 pv_cnt = 0;
412 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
413
414 for (i = 0; i < pv_hashed_kern_alloc_chunk; i++) {
415 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
416 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
417 pvh_eh = pvh_e;
418
0a7de745 419 if (pvh_et == PV_HASHED_ENTRY_NULL) {
6d2010ae 420 pvh_et = pvh_e;
0a7de745 421 }
6d2010ae
A
422 pv_cnt++;
423 }
424 pmap_kernel_reserve_replenish_stat += pv_cnt;
425 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
426 }
427
428 pv_cnt = 0;
429 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
430
431 if (pv_hashed_free_count < pv_hashed_low_water_mark) {
432 for (i = 0; i < pv_hashed_alloc_chunk; i++) {
433 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
434
435 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
436 pvh_eh = pvh_e;
437
0a7de745 438 if (pvh_et == PV_HASHED_ENTRY_NULL) {
6d2010ae 439 pvh_et = pvh_e;
0a7de745 440 }
6d2010ae
A
441 pv_cnt++;
442 }
443 pmap_user_reserve_replenish_stat += pv_cnt;
444 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
445 }
446/* Wake threads throttled while the kernel reserve was being replenished.
447 */
448 if (pmap_pv_throttled_waiters) {
449 pmap_pv_throttled_waiters = 0;
450 thread_wakeup(&pmap_user_pv_throttle_event);
451 }
452 /* Check if the kernel pool has been depleted since the
453 * first pass, to reduce refill latency.
454 */
0a7de745 455 if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
6d2010ae 456 continue;
0a7de745 457 }
6d2010ae
A
458 /* Block sans continuation to avoid yielding kernel stack */
459 assert_wait(&mapping_replenish_event, THREAD_UNINT);
460 mappingrecurse = 0;
461 thread_block(THREAD_CONTINUE_NULL);
462 pmap_mapping_thread_wakeups++;
463 }
464}
465
466/*
467 * Set specified attribute bits.
468 */
469
470void
471phys_attribute_set(
0a7de745
A
472 ppnum_t pn,
473 int bits)
6d2010ae 474{
0a7de745 475 int pai;
6d2010ae
A
476
477 pmap_intr_assert();
478 assert(pn != vm_page_fictitious_addr);
0a7de745 479 if (pn == vm_page_guard_addr) {
6d2010ae 480 return;
0a7de745 481 }
6d2010ae
A
482
483 pai = ppn_to_pai(pn);
484
485 if (!IS_MANAGED_PAGE(pai)) {
486 /* Not a managed page. */
487 return;
488 }
489
490 LOCK_PVH(pai);
491 pmap_phys_attributes[pai] |= bits;
492 UNLOCK_PVH(pai);
493}
494
495/*
496 * Set the modify bit on the specified physical page.
497 */
498
499void
500pmap_set_modify(ppnum_t pn)
501{
502 phys_attribute_set(pn, PHYS_MODIFIED);
503}
504
505/*
506 * Clear the modify bits on the specified physical page.
507 */
508
509void
510pmap_clear_modify(ppnum_t pn)
511{
39236c6e 512 phys_attribute_clear(pn, PHYS_MODIFIED, 0, NULL);
6d2010ae
A
513}
514
515/*
516 * pmap_is_modified:
517 *
518 * Return whether or not the specified physical page is modified
519 * by any physical maps.
520 */
521
522boolean_t
523pmap_is_modified(ppnum_t pn)
524{
0a7de745 525 if (phys_attribute_test(pn, PHYS_MODIFIED)) {
6d2010ae 526 return TRUE;
0a7de745 527 }
6d2010ae
A
528 return FALSE;
529}
530
531
532/*
533 * pmap_clear_reference:
534 *
535 * Clear the reference bit on the specified physical page.
536 */
537
538void
539pmap_clear_reference(ppnum_t pn)
540{
39236c6e 541 phys_attribute_clear(pn, PHYS_REFERENCED, 0, NULL);
6d2010ae
A
542}
543
544void
545pmap_set_reference(ppnum_t pn)
546{
547 phys_attribute_set(pn, PHYS_REFERENCED);
548}
549
550/*
551 * pmap_is_referenced:
552 *
553 * Return whether or not the specified physical page is referenced
554 * by any physical maps.
555 */
556
557boolean_t
558pmap_is_referenced(ppnum_t pn)
559{
0a7de745 560 if (phys_attribute_test(pn, PHYS_REFERENCED)) {
6d2010ae 561 return TRUE;
0a7de745 562 }
6d2010ae
A
563 return FALSE;
564}
565
566
567/*
568 * pmap_get_refmod(phys)
569 * returns the referenced and modified bits of the specified
570 * physical page.
571 */
572unsigned int
573pmap_get_refmod(ppnum_t pn)
574{
0a7de745
A
575 int refmod;
576 unsigned int retval = 0;
6d2010ae
A
577
578 refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED);
579
0a7de745
A
580 if (refmod & PHYS_MODIFIED) {
581 retval |= VM_MEM_MODIFIED;
582 }
583 if (refmod & PHYS_REFERENCED) {
584 retval |= VM_MEM_REFERENCED;
585 }
6d2010ae 586
0a7de745 587 return retval;
6d2010ae
A
588}
589
39236c6e
A
590
591void
592pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg)
593{
0a7de745 594 unsigned int x86Mask;
39236c6e 595
0a7de745
A
596 x86Mask = (((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
597 | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
39236c6e 598
0a7de745 599 phys_attribute_clear(pn, x86Mask, options, arg);
39236c6e
A
600}
601
6d2010ae
A
602/*
603 * pmap_clear_refmod(phys, mask)
604 * clears the referenced and modified bits as specified by the mask
605 * of the specified physical page.
606 */
607void
608pmap_clear_refmod(ppnum_t pn, unsigned int mask)
609{
610 unsigned int x86Mask;
611
0a7de745
A
612 x86Mask = (((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
613 | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
39236c6e
A
614
615 phys_attribute_clear(pn, x86Mask, 0, NULL);
616}
617
618unsigned int
619pmap_disconnect(ppnum_t pa)
620{
0a7de745 621 return pmap_disconnect_options(pa, 0, NULL);
6d2010ae
A
622}
623
624/*
625 * Routine:
39236c6e 626 * pmap_disconnect_options
6d2010ae
A
627 *
628 * Function:
629 * Disconnect all mappings for this page and return reference and change status
630 * in generic format.
631 *
632 */
633unsigned int
39236c6e 634pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg)
6d2010ae
A
635{
636 unsigned refmod, vmrefmod = 0;
637
0a7de745 638 pmap_page_protect_options(pa, 0, options, arg); /* disconnect the page */
6d2010ae
A
639
640 pmap_assert(pa != vm_page_fictitious_addr);
0a7de745 641 if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD)) {
6d2010ae 642 return 0;
0a7de745 643 }
6d2010ae 644 refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED);
0a7de745
A
645
646 if (refmod & PHYS_MODIFIED) {
647 vmrefmod |= VM_MEM_MODIFIED;
648 }
649 if (refmod & PHYS_REFERENCED) {
650 vmrefmod |= VM_MEM_REFERENCED;
651 }
6d2010ae
A
652
653 return vmrefmod;
654}