]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pmap_common.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / i386 / pmap_common.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <vm/pmap.h>
316670eb 29#include <kern/ledger.h>
6d2010ae
A
30#include <i386/pmap_internal.h>
31
316670eb 32
6d2010ae
A
33/*
34 * Each entry in the pv_head_table is locked by a bit in the
35 * pv_lock_table. The lock bits are accessed by the physical
36 * address of the page they lock.
37 */
38
39char *pv_lock_table; /* pointer to array of bits */
40char *pv_hash_lock_table;
41
42pv_rooted_entry_t pv_head_table; /* array of entries, one per
43 * page */
44uint32_t pv_hashed_free_count = 0;
45uint32_t pv_hashed_kern_free_count = 0;
46
47pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
48uint32_t pmap_pagetable_corruption_incidents;
49uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
50uint64_t pmap_pagetable_corruption_interval_abstime;
51thread_call_t pmap_pagetable_corruption_log_call;
52static thread_call_data_t pmap_pagetable_corruption_log_call_data;
53boolean_t pmap_pagetable_corruption_timeout = FALSE;
54
55volatile uint32_t mappingrecurse = 0;
56
57uint32_t pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark, pv_hashed_alloc_chunk, pv_hashed_kern_alloc_chunk;
58
59thread_t mapping_replenish_thread;
60event_t mapping_replenish_event, pmap_user_pv_throttle_event;
61
62uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters;
63
64unsigned int pmap_cache_attributes(ppnum_t pn) {
65 if (pmap_get_cache_attributes(pn) & INTEL_PTE_NCACHE)
66 return (VM_WIMG_IO);
67 else
68 return (VM_WIMG_COPYBACK);
69}
70
71void pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr) {
72 unsigned int current, template = 0;
73 int pai;
74
75 if (cacheattr & VM_MEM_NOT_CACHEABLE) {
76 if(!(cacheattr & VM_MEM_GUARDED))
77 template |= PHYS_PTA;
78 template |= PHYS_NCACHE;
79 }
80
81 pmap_intr_assert();
82
83 assert((pn != vm_page_fictitious_addr) && (pn != vm_page_guard_addr));
84
85 pai = ppn_to_pai(pn);
86
87 if (!IS_MANAGED_PAGE(pai)) {
88 return;
89 }
90
91 /* override cache attributes for this phys page
92 * Does not walk through existing mappings to adjust,
93 * assumes page is disconnected
94 */
95
96 LOCK_PVH(pai);
97
98 pmap_update_cache_attributes_locked(pn, template);
99
100 current = pmap_phys_attributes[pai] & PHYS_CACHEABILITY_MASK;
101 pmap_phys_attributes[pai] &= ~PHYS_CACHEABILITY_MASK;
102 pmap_phys_attributes[pai] |= template;
103
104 UNLOCK_PVH(pai);
105
106 if ((template & PHYS_NCACHE) && !(current & PHYS_NCACHE)) {
107 pmap_sync_page_attributes_phys(pn);
108 }
109}
110
111unsigned pmap_get_cache_attributes(ppnum_t pn) {
112 if (last_managed_page == 0)
113 return 0;
114
115 if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
116 return INTEL_PTE_NCACHE;
117 }
118
119 /*
120 * The cache attributes are read locklessly for efficiency.
121 */
122 unsigned int attr = pmap_phys_attributes[ppn_to_pai(pn)];
123 unsigned int template = 0;
124
125 if (attr & PHYS_PTA)
126 template |= INTEL_PTE_PTA;
127 if (attr & PHYS_NCACHE)
128 template |= INTEL_PTE_NCACHE;
129 return template;
130}
131
132
133
134boolean_t
135pmap_is_noencrypt(ppnum_t pn)
136{
137 int pai;
138
139 pai = ppn_to_pai(pn);
140
141 if (!IS_MANAGED_PAGE(pai))
7ddcb079 142 return (FALSE);
6d2010ae
A
143
144 if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT)
145 return (TRUE);
146
147 return (FALSE);
148}
149
150
151void
152pmap_set_noencrypt(ppnum_t pn)
153{
154 int pai;
155
156 pai = ppn_to_pai(pn);
157
158 if (IS_MANAGED_PAGE(pai)) {
159 LOCK_PVH(pai);
160
161 pmap_phys_attributes[pai] |= PHYS_NOENCRYPT;
162
163 UNLOCK_PVH(pai);
164 }
165}
166
167
168void
169pmap_clear_noencrypt(ppnum_t pn)
170{
171 int pai;
172
173 pai = ppn_to_pai(pn);
174
175 if (IS_MANAGED_PAGE(pai)) {
7ddcb079
A
176 /*
177 * synchronization at VM layer prevents PHYS_NOENCRYPT
178 * from changing state, so we don't need the lock to inspect
179 */
180 if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
181 LOCK_PVH(pai);
6d2010ae 182
7ddcb079 183 pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
6d2010ae 184
7ddcb079
A
185 UNLOCK_PVH(pai);
186 }
6d2010ae
A
187 }
188}
189
190void
191compute_pmap_gc_throttle(void *arg __unused)
192{
193
194}
195
196
197__private_extern__ void
198pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1,2)) {
199 if (pmap_pagetable_corruption_incidents > 0) {
200 int i, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
201 (*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
202 for (i = 0; i < e; i++) {
203 (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n", pmap_pagetable_corruption_records[i].incident, pmap_pagetable_corruption_records[i].reason, pmap_pagetable_corruption_records[i].action, pmap_pagetable_corruption_records[i].abstime);
204 }
205 }
206}
207
208static inline void
209pmap_pagetable_corruption_log_setup(void) {
210 if (pmap_pagetable_corruption_log_call == NULL) {
211 nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
212 thread_call_setup(&pmap_pagetable_corruption_log_call_data,
213 (thread_call_func_t) pmap_pagetable_corruption_msg_log,
214 (thread_call_param_t) &printf);
215 pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
216 }
217}
218
219void
220mapping_free_prime(void)
221{
222 unsigned i;
223 pv_hashed_entry_t pvh_e;
224 pv_hashed_entry_t pvh_eh;
225 pv_hashed_entry_t pvh_et;
226 int pv_cnt;
227
228 /* Scale based on DRAM size */
229 pv_hashed_low_water_mark = MAX(PV_HASHED_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 2000);
230 pv_hashed_low_water_mark = MIN(pv_hashed_low_water_mark, 16000);
231 /* Alterable via sysctl */
232 pv_hashed_kern_low_water_mark = MAX(PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 1000);
233 pv_hashed_kern_low_water_mark = MIN(pv_hashed_kern_low_water_mark, 16000);
234 pv_hashed_kern_alloc_chunk = PV_HASHED_KERN_ALLOC_CHUNK_INITIAL;
235 pv_hashed_alloc_chunk = PV_HASHED_ALLOC_CHUNK_INITIAL;
236
237 pv_cnt = 0;
238 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
239
240 for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK_INITIAL); i++) {
241 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
242
243 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
244 pvh_eh = pvh_e;
245
246 if (pvh_et == PV_HASHED_ENTRY_NULL)
247 pvh_et = pvh_e;
248 pv_cnt++;
249 }
250 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
251
252 pv_cnt = 0;
253 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
254 for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK_INITIAL; i++) {
255 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
256
257 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
258 pvh_eh = pvh_e;
259
260 if (pvh_et == PV_HASHED_ENTRY_NULL)
261 pvh_et = pvh_e;
262 pv_cnt++;
263 }
264 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
265}
266
267void mapping_replenish(void);
268
269void mapping_adjust(void) {
270 kern_return_t mres;
271
272 pmap_pagetable_corruption_log_setup();
273
274 mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread);
275 if (mres != KERN_SUCCESS) {
276 panic("pmap: mapping_replenish_thread creation failed");
277 }
278 thread_deallocate(mapping_replenish_thread);
279}
280
281unsigned pmap_mapping_thread_wakeups;
282unsigned pmap_kernel_reserve_replenish_stat;
283unsigned pmap_user_reserve_replenish_stat;
284unsigned pmap_kern_reserve_alloc_stat;
285
286void mapping_replenish(void)
287{
288 pv_hashed_entry_t pvh_e;
289 pv_hashed_entry_t pvh_eh;
290 pv_hashed_entry_t pvh_et;
291 int pv_cnt;
292 unsigned i;
293
294 /* We qualify for VM privileges...*/
295 current_thread()->options |= TH_OPT_VMPRIV;
296
297 for (;;) {
298
299 while (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
300 pv_cnt = 0;
301 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
302
303 for (i = 0; i < pv_hashed_kern_alloc_chunk; i++) {
304 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
305 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
306 pvh_eh = pvh_e;
307
308 if (pvh_et == PV_HASHED_ENTRY_NULL)
309 pvh_et = pvh_e;
310 pv_cnt++;
311 }
312 pmap_kernel_reserve_replenish_stat += pv_cnt;
313 PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
314 }
315
316 pv_cnt = 0;
317 pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
318
319 if (pv_hashed_free_count < pv_hashed_low_water_mark) {
320 for (i = 0; i < pv_hashed_alloc_chunk; i++) {
321 pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
322
323 pvh_e->qlink.next = (queue_entry_t)pvh_eh;
324 pvh_eh = pvh_e;
325
326 if (pvh_et == PV_HASHED_ENTRY_NULL)
327 pvh_et = pvh_e;
328 pv_cnt++;
329 }
330 pmap_user_reserve_replenish_stat += pv_cnt;
331 PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
332 }
333/* Wake threads throttled while the kernel reserve was being replenished.
334 */
335 if (pmap_pv_throttled_waiters) {
336 pmap_pv_throttled_waiters = 0;
337 thread_wakeup(&pmap_user_pv_throttle_event);
338 }
339 /* Check if the kernel pool has been depleted since the
340 * first pass, to reduce refill latency.
341 */
342 if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark)
343 continue;
344 /* Block sans continuation to avoid yielding kernel stack */
345 assert_wait(&mapping_replenish_event, THREAD_UNINT);
346 mappingrecurse = 0;
347 thread_block(THREAD_CONTINUE_NULL);
348 pmap_mapping_thread_wakeups++;
349 }
350}
351
352/*
353 * Set specified attribute bits.
354 */
355
356void
357phys_attribute_set(
358 ppnum_t pn,
359 int bits)
360{
361 int pai;
362
363 pmap_intr_assert();
364 assert(pn != vm_page_fictitious_addr);
365 if (pn == vm_page_guard_addr)
366 return;
367
368 pai = ppn_to_pai(pn);
369
370 if (!IS_MANAGED_PAGE(pai)) {
371 /* Not a managed page. */
372 return;
373 }
374
375 LOCK_PVH(pai);
376 pmap_phys_attributes[pai] |= bits;
377 UNLOCK_PVH(pai);
378}
379
380/*
381 * Set the modify bit on the specified physical page.
382 */
383
384void
385pmap_set_modify(ppnum_t pn)
386{
387 phys_attribute_set(pn, PHYS_MODIFIED);
388}
389
390/*
391 * Clear the modify bits on the specified physical page.
392 */
393
394void
395pmap_clear_modify(ppnum_t pn)
396{
39236c6e 397 phys_attribute_clear(pn, PHYS_MODIFIED, 0, NULL);
6d2010ae
A
398}
399
400/*
401 * pmap_is_modified:
402 *
403 * Return whether or not the specified physical page is modified
404 * by any physical maps.
405 */
406
407boolean_t
408pmap_is_modified(ppnum_t pn)
409{
410 if (phys_attribute_test(pn, PHYS_MODIFIED))
411 return TRUE;
412 return FALSE;
413}
414
415
416/*
417 * pmap_clear_reference:
418 *
419 * Clear the reference bit on the specified physical page.
420 */
421
422void
423pmap_clear_reference(ppnum_t pn)
424{
39236c6e 425 phys_attribute_clear(pn, PHYS_REFERENCED, 0, NULL);
6d2010ae
A
426}
427
428void
429pmap_set_reference(ppnum_t pn)
430{
431 phys_attribute_set(pn, PHYS_REFERENCED);
432}
433
434/*
435 * pmap_is_referenced:
436 *
437 * Return whether or not the specified physical page is referenced
438 * by any physical maps.
439 */
440
441boolean_t
442pmap_is_referenced(ppnum_t pn)
443{
444 if (phys_attribute_test(pn, PHYS_REFERENCED))
445 return TRUE;
446 return FALSE;
447}
448
449
450/*
451 * pmap_get_refmod(phys)
452 * returns the referenced and modified bits of the specified
453 * physical page.
454 */
455unsigned int
456pmap_get_refmod(ppnum_t pn)
457{
458 int refmod;
459 unsigned int retval = 0;
460
461 refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED);
462
463 if (refmod & PHYS_MODIFIED)
464 retval |= VM_MEM_MODIFIED;
465 if (refmod & PHYS_REFERENCED)
466 retval |= VM_MEM_REFERENCED;
467
468 return (retval);
469}
470
39236c6e
A
471
472void
473pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg)
474{
475 unsigned int x86Mask;
476
477 x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
478 | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
479
480 phys_attribute_clear(pn, x86Mask, options, arg);
481}
482
6d2010ae
A
483/*
484 * pmap_clear_refmod(phys, mask)
485 * clears the referenced and modified bits as specified by the mask
486 * of the specified physical page.
487 */
488void
489pmap_clear_refmod(ppnum_t pn, unsigned int mask)
490{
491 unsigned int x86Mask;
492
493 x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0)
494 | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
39236c6e
A
495
496 phys_attribute_clear(pn, x86Mask, 0, NULL);
497}
498
499unsigned int
500pmap_disconnect(ppnum_t pa)
501{
502 return (pmap_disconnect_options(pa, 0, NULL));
6d2010ae
A
503}
504
505/*
506 * Routine:
39236c6e 507 * pmap_disconnect_options
6d2010ae
A
508 *
509 * Function:
510 * Disconnect all mappings for this page and return reference and change status
511 * in generic format.
512 *
513 */
514unsigned int
39236c6e 515pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg)
6d2010ae
A
516{
517 unsigned refmod, vmrefmod = 0;
518
39236c6e 519 pmap_page_protect_options(pa, 0, options, arg); /* disconnect the page */
6d2010ae
A
520
521 pmap_assert(pa != vm_page_fictitious_addr);
39236c6e 522 if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD))
6d2010ae
A
523 return 0;
524 refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED);
525
526 if (refmod & PHYS_MODIFIED)
527 vmrefmod |= VM_MEM_MODIFIED;
528 if (refmod & PHYS_REFERENCED)
529 vmrefmod |= VM_MEM_REFERENCED;
530
531 return vmrefmod;
532}