]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | ||
30 | #ifndef _I386_PMAP_INTERNAL_ | |
31 | #define _I386_PMAP_INTERNAL_ | |
32 | #ifdef MACH_KERNEL_PRIVATE | |
33 | ||
34 | #include <vm/pmap.h> | |
35 | #include <sys/kdebug.h> | |
36 | #include <kern/ledger.h> | |
37 | #include <kern/simple_lock.h> | |
38 | #include <i386/bit_routines.h> | |
39 | ||
40 | /* | |
41 | * pmap locking | |
42 | */ | |
43 | ||
44 | #define PMAP_LOCK(pmap) { \ | |
45 | simple_lock(&(pmap)->lock); \ | |
46 | } | |
47 | ||
48 | #define PMAP_UNLOCK(pmap) { \ | |
49 | simple_unlock(&(pmap)->lock); \ | |
50 | } | |
51 | ||
52 | #define PMAP_UPDATE_TLBS(pmap, s, e) \ | |
53 | pmap_flush_tlbs(pmap, s, e, 0, NULL) | |
54 | ||
55 | ||
56 | #define PMAP_DELAY_TLB_FLUSH 0x01 | |
57 | ||
58 | #define PMAP_UPDATE_TLBS_DELAYED(pmap, s, e, c) \ | |
59 | pmap_flush_tlbs(pmap, s, e, PMAP_DELAY_TLB_FLUSH, c) | |
60 | ||
61 | ||
62 | #define iswired(pte) ((pte) & INTEL_PTE_WIRED) | |
63 | ||
64 | #ifdef PMAP_TRACES | |
65 | extern boolean_t pmap_trace; | |
66 | #define PMAP_TRACE(x,a,b,c,d,e) \ | |
67 | if (pmap_trace) { \ | |
68 | KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e); \ | |
69 | } | |
70 | #else | |
71 | #define PMAP_TRACE(x,a,b,c,d,e) KERNEL_DEBUG(x,a,b,c,d,e) | |
72 | #endif /* PMAP_TRACES */ | |
73 | ||
74 | #define PMAP_TRACE_CONSTANT(x,a,b,c,d,e) \ | |
75 | KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e); \ | |
76 | ||
77 | kern_return_t pmap_expand_pml4( | |
78 | pmap_t map, | |
79 | vm_map_offset_t v, | |
80 | unsigned int options); | |
81 | ||
82 | kern_return_t pmap_expand_pdpt( | |
83 | pmap_t map, | |
84 | vm_map_offset_t v, | |
85 | unsigned int options); | |
86 | ||
87 | void phys_attribute_set( | |
88 | ppnum_t phys, | |
89 | int bits); | |
90 | ||
91 | void pmap_set_reference( | |
92 | ppnum_t pn); | |
93 | ||
94 | boolean_t phys_page_exists( | |
95 | ppnum_t pn); | |
96 | ||
97 | void | |
98 | pmap_flush_tlbs(pmap_t, vm_map_offset_t, vm_map_offset_t, int, pmap_flush_context *); | |
99 | ||
100 | void | |
101 | pmap_update_cache_attributes_locked(ppnum_t, unsigned); | |
102 | ||
103 | extern const boolean_t cpu_64bit; | |
104 | ||
105 | /* | |
106 | * Private data structures. | |
107 | */ | |
108 | ||
109 | /* | |
110 | * For each vm_page_t, there is a list of all currently | |
111 | * valid virtual mappings of that page. An entry is | |
112 | * a pv_rooted_entry_t; the list is the pv_table. | |
113 | * | |
114 | * N.B. with the new combo rooted/hashed scheme it is | |
115 | * only possibly to remove individual non-rooted entries | |
116 | * if they are found via the hashed chains as there is no | |
117 | * way to unlink the singly linked hashed entries if navigated to | |
118 | * via the queue list off the rooted entries. Think of it as | |
119 | * hash/walk/pull, keeping track of the prev pointer while walking | |
120 | * the singly linked hash list. All of this is to save memory and | |
121 | * keep both types of pv_entries as small as possible. | |
122 | */ | |
123 | ||
124 | /* | |
125 | ||
126 | PV HASHING Changes - JK 1/2007 | |
127 | ||
128 | Pve's establish physical to virtual mappings. These are used for aliasing of a | |
129 | physical page to (potentially many) virtual addresses within pmaps. In the | |
130 | previous implementation the structure of the pv_entries (each 16 bytes in size) was | |
131 | ||
132 | typedef struct pv_entry { | |
133 | struct pv_entry_t next; | |
134 | pmap_t pmap; | |
135 | vm_map_offset_t va; | |
136 | } *pv_entry_t; | |
137 | ||
138 | An initial array of these is created at boot time, one per physical page of | |
139 | memory, indexed by the physical page number. Additionally, a pool of entries | |
140 | is created from a pv_zone to be used as needed by pmap_enter() when it is | |
141 | creating new mappings. Originally, we kept this pool around because the code | |
142 | in pmap_enter() was unable to block if it needed an entry and none were | |
143 | available - we'd panic. Some time ago I restructured the pmap_enter() code | |
144 | so that for user pmaps it can block while zalloc'ing a pv structure and restart, | |
145 | removing a panic from the code (in the case of the kernel pmap we cannot block | |
146 | and still panic, so, we keep a separate hot pool for use only on kernel pmaps). | |
147 | The pool has not been removed since there is a large performance gain keeping | |
148 | freed pv's around for reuse and not suffering the overhead of zalloc for every | |
149 | new pv we need. | |
150 | ||
151 | As pmap_enter() created new mappings it linked the new pve's for them off the | |
152 | fixed pv array for that ppn (off the next pointer). These pve's are accessed | |
153 | for several operations, one of them being address space teardown. In that case, | |
154 | we basically do this | |
155 | ||
156 | for (every page/pte in the space) { | |
157 | calc pve_ptr from the ppn in the pte | |
158 | for (every pv in the list for the ppn) { | |
159 | if (this pv is for this pmap/vaddr) { | |
160 | do housekeeping | |
161 | unlink/free the pv | |
162 | } | |
163 | } | |
164 | } | |
165 | ||
166 | The problem arose when we were running, say 8000 (or even 2000) apache or | |
167 | other processes and one or all terminate. The list hanging off each pv array | |
168 | entry could have thousands of entries. We were continuously linearly searching | |
169 | each of these lists as we stepped through the address space we were tearing | |
170 | down. Because of the locks we hold, likely taking a cache miss for each node, | |
171 | and interrupt disabling for MP issues the system became completely unresponsive | |
172 | for many seconds while we did this. | |
173 | ||
174 | Realizing that pve's are accessed in two distinct ways (linearly running the | |
175 | list by ppn for operations like pmap_page_protect and finding and | |
176 | modifying/removing a single pve as part of pmap_enter processing) has led to | |
177 | modifying the pve structures and databases. | |
178 | ||
179 | There are now two types of pve structures. A "rooted" structure which is | |
180 | basically the original structure accessed in an array by ppn, and a ''hashed'' | |
181 | structure accessed on a hash list via a hash of [pmap, vaddr]. These have been | |
182 | designed with the two goals of minimizing wired memory and making the lookup of | |
183 | a ppn faster. Since a vast majority of pages in the system are not aliased | |
184 | and hence represented by a single pv entry I've kept the rooted entry size as | |
185 | small as possible because there is one of these dedicated for every physical | |
186 | page of memory. The hashed pve's are larger due to the addition of the hash | |
187 | link and the ppn entry needed for matching while running the hash list to find | |
188 | the entry we are looking for. This way, only systems that have lots of | |
189 | aliasing (like 2000+ httpd procs) will pay the extra memory price. Both | |
190 | structures have the same first three fields allowing some simplification in | |
191 | the code. | |
192 | ||
193 | They have these shapes | |
194 | ||
195 | typedef struct pv_rooted_entry { | |
196 | queue_head_t qlink; | |
197 | vm_map_offset_t va; | |
198 | pmap_t pmap; | |
199 | } *pv_rooted_entry_t; | |
200 | ||
201 | ||
202 | typedef struct pv_hashed_entry { | |
203 | queue_head_t qlink; | |
204 | vm_map_offset_t va; | |
205 | pmap_t pmap; | |
206 | ppnum_t ppn; | |
207 | struct pv_hashed_entry *nexth; | |
208 | } *pv_hashed_entry_t; | |
209 | ||
210 | The main flow difference is that the code is now aware of the rooted entry and | |
211 | the hashed entries. Code that runs the pv list still starts with the rooted | |
212 | entry and then continues down the qlink onto the hashed entries. Code that is | |
213 | looking up a specific pv entry first checks the rooted entry and then hashes | |
214 | and runs the hash list for the match. The hash list lengths are much smaller | |
215 | than the original pv lists that contained all aliases for the specific ppn. | |
216 | ||
217 | */ | |
218 | ||
219 | typedef struct pv_rooted_entry { | |
220 | /* first three entries must match pv_hashed_entry_t */ | |
221 | queue_head_t qlink; | |
222 | vm_map_offset_t va_and_flags; /* virtual address for mapping */ | |
223 | pmap_t pmap; /* pmap where mapping lies */ | |
224 | } *pv_rooted_entry_t; | |
225 | ||
226 | #define PV_ROOTED_ENTRY_NULL ((pv_rooted_entry_t) 0) | |
227 | ||
228 | typedef struct pv_hashed_entry { | |
229 | /* first three entries must match pv_rooted_entry_t */ | |
230 | queue_head_t qlink; | |
231 | vm_map_offset_t va_and_flags; | |
232 | pmap_t pmap; | |
233 | ppnum_t ppn; | |
234 | struct pv_hashed_entry *nexth; | |
235 | } *pv_hashed_entry_t; | |
236 | ||
237 | #define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0) | |
238 | ||
239 | #define PVE_VA(pve) ((pve)->va_and_flags & ~PAGE_MASK) | |
240 | #define PVE_FLAGS(pve) ((pve)->va_and_flags & PAGE_MASK) | |
241 | #define PVE_IS_ALTACCT 0x001 | |
242 | #define PVE_IS_ALTACCT_PAGE(pve) \ | |
243 | (((pve)->va_and_flags & PVE_IS_ALTACCT) ? TRUE : FALSE) | |
244 | ||
245 | //#define PV_DEBUG 1 /* uncomment to enable some PV debugging code */ | |
246 | #ifdef PV_DEBUG | |
247 | #define CHK_NPVHASH() if(0 == npvhashmask) panic("npvhash uninitialized"); | |
248 | #else | |
249 | #define CHK_NPVHASH(x) | |
250 | #endif | |
251 | ||
252 | #define NPVHASHBUCKETS (4096) | |
253 | #define NPVHASHMASK ((NPVHASHBUCKETS) - 1) /* MUST BE 2^N - 1 */ | |
254 | #define PV_HASHED_LOW_WATER_MARK_DEFAULT 5000 | |
255 | #define PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT 2000 | |
256 | #define PV_HASHED_ALLOC_CHUNK_INITIAL 2000 | |
257 | #define PV_HASHED_KERN_ALLOC_CHUNK_INITIAL 200 | |
258 | ||
259 | extern volatile uint32_t mappingrecurse; | |
260 | extern uint32_t pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark; | |
261 | ||
262 | /* | |
263 | * PV hash locking | |
264 | */ | |
265 | ||
266 | #define LOCK_PV_HASH(hash) lock_hash_hash(hash) | |
267 | #define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash) | |
268 | extern uint32_t npvhashmask; | |
269 | extern pv_hashed_entry_t *pv_hash_table; /* hash lists */ | |
270 | extern pv_hashed_entry_t pv_hashed_free_list; | |
271 | extern pv_hashed_entry_t pv_hashed_kern_free_list; | |
272 | decl_simple_lock_data(extern, pv_hashed_free_list_lock) | |
273 | decl_simple_lock_data(extern, pv_hashed_kern_free_list_lock) | |
274 | decl_simple_lock_data(extern, pv_hash_table_lock) | |
275 | decl_simple_lock_data(extern, phys_backup_lock) | |
276 | ||
277 | extern zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry | |
278 | * structures */ | |
279 | ||
280 | extern uint32_t pv_hashed_free_count; | |
281 | extern uint32_t pv_hashed_kern_free_count; | |
282 | /* | |
283 | * Each entry in the pv_head_table is locked by a bit in the | |
284 | * pv_lock_table. The lock bits are accessed by the address of | |
285 | * the frame they lock. | |
286 | */ | |
287 | #define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) | |
288 | #define pv_hash_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) | |
289 | extern char *pv_lock_table; /* pointer to array of bits */ | |
290 | extern char *pv_hash_lock_table; | |
291 | extern pv_rooted_entry_t pv_head_table; /* array of entries, one per page */ | |
292 | ||
293 | extern event_t mapping_replenish_event; | |
294 | ||
295 | static inline void PV_HASHED_ALLOC(pv_hashed_entry_t *pvh_ep) { | |
296 | pmap_assert(*pvh_ep == PV_HASHED_ENTRY_NULL); | |
297 | simple_lock(&pv_hashed_free_list_lock); | |
298 | /* If the kernel reserved pool is low, let non-kernel mappings allocate | |
299 | * synchronously, possibly subject to a throttle. | |
300 | */ | |
301 | if ((pv_hashed_kern_free_count > pv_hashed_kern_low_water_mark) && ((*pvh_ep = pv_hashed_free_list) != 0)) { | |
302 | pv_hashed_free_list = (pv_hashed_entry_t)(*pvh_ep)->qlink.next; | |
303 | pv_hashed_free_count--; | |
304 | } | |
305 | ||
306 | simple_unlock(&pv_hashed_free_list_lock); | |
307 | ||
308 | if (pv_hashed_free_count <= pv_hashed_low_water_mark) { | |
309 | if (!mappingrecurse && hw_compare_and_store(0,1, &mappingrecurse)) | |
310 | thread_wakeup(&mapping_replenish_event); | |
311 | } | |
312 | } | |
313 | ||
314 | static inline void PV_HASHED_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) { | |
315 | simple_lock(&pv_hashed_free_list_lock); | |
316 | pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list; | |
317 | pv_hashed_free_list = pvh_eh; | |
318 | pv_hashed_free_count += pv_cnt; | |
319 | simple_unlock(&pv_hashed_free_list_lock); | |
320 | } | |
321 | ||
322 | extern unsigned pmap_kern_reserve_alloc_stat; | |
323 | ||
324 | static inline void PV_HASHED_KERN_ALLOC(pv_hashed_entry_t *pvh_e) { | |
325 | pmap_assert(*pvh_e == PV_HASHED_ENTRY_NULL); | |
326 | simple_lock(&pv_hashed_kern_free_list_lock); | |
327 | ||
328 | if ((*pvh_e = pv_hashed_kern_free_list) != 0) { | |
329 | pv_hashed_kern_free_list = (pv_hashed_entry_t)(*pvh_e)->qlink.next; | |
330 | pv_hashed_kern_free_count--; | |
331 | pmap_kern_reserve_alloc_stat++; | |
332 | } | |
333 | ||
334 | simple_unlock(&pv_hashed_kern_free_list_lock); | |
335 | ||
336 | if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) { | |
337 | if (!mappingrecurse && hw_compare_and_store(0,1, &mappingrecurse)) | |
338 | thread_wakeup(&mapping_replenish_event); | |
339 | } | |
340 | } | |
341 | ||
342 | static inline void PV_HASHED_KERN_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) { | |
343 | simple_lock(&pv_hashed_kern_free_list_lock); | |
344 | pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list; | |
345 | pv_hashed_kern_free_list = pvh_eh; | |
346 | pv_hashed_kern_free_count += pv_cnt; | |
347 | simple_unlock(&pv_hashed_kern_free_list_lock); | |
348 | } | |
349 | ||
350 | extern uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters; | |
351 | extern event_t pmap_user_pv_throttle_event; | |
352 | ||
353 | static inline void pmap_pv_throttle(__unused pmap_t p) { | |
354 | pmap_assert(p != kernel_pmap); | |
355 | /* Apply throttle on non-kernel mappings */ | |
356 | if (pv_hashed_kern_free_count < (pv_hashed_kern_low_water_mark / 2)) { | |
357 | pmap_pv_throttle_stat++; | |
358 | /* This doesn't need to be strictly accurate, merely a hint | |
359 | * to eliminate the timeout when the reserve is replenished. | |
360 | */ | |
361 | pmap_pv_throttled_waiters++; | |
362 | assert_wait_timeout(&pmap_user_pv_throttle_event, THREAD_UNINT, 1, 1000 * NSEC_PER_USEC); | |
363 | thread_block(THREAD_CONTINUE_NULL); | |
364 | } | |
365 | } | |
366 | ||
367 | /* | |
368 | * Index into pv_head table, its lock bits, and the modify/reference and managed bits | |
369 | */ | |
370 | ||
371 | #define pa_index(pa) (i386_btop(pa)) | |
372 | #define ppn_to_pai(ppn) ((int)ppn) | |
373 | ||
374 | #define pai_to_pvh(pai) (&pv_head_table[pai]) | |
375 | #define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table) | |
376 | #define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table) | |
377 | #define pvhash(idx) (&pv_hash_table[idx]) | |
378 | #define lock_hash_hash(hash) bit_lock(hash, (void *)pv_hash_lock_table) | |
379 | #define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table) | |
380 | ||
381 | #define IS_MANAGED_PAGE(x) \ | |
382 | ((unsigned int)(x) <= last_managed_page && \ | |
383 | (pmap_phys_attributes[x] & PHYS_MANAGED)) | |
384 | #define IS_INTERNAL_PAGE(x) \ | |
385 | (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL)) | |
386 | #define IS_REUSABLE_PAGE(x) \ | |
387 | (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE)) | |
388 | #define IS_ALTACCT_PAGE(x,pve) \ | |
389 | (IS_MANAGED_PAGE((x)) && \ | |
390 | (PVE_IS_ALTACCT_PAGE((pve)))) | |
391 | ||
392 | /* | |
393 | * Physical page attributes. Copy bits from PTE definition. | |
394 | */ | |
395 | #define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ | |
396 | #define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ | |
397 | #define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */ | |
398 | #define PHYS_NOENCRYPT INTEL_PTE_USER /* no need to encrypt this page in the hibernation image */ | |
399 | #define PHYS_NCACHE INTEL_PTE_NCACHE | |
400 | #define PHYS_PTA INTEL_PTE_PTA | |
401 | #define PHYS_CACHEABILITY_MASK (INTEL_PTE_PTA | INTEL_PTE_NCACHE) | |
402 | #define PHYS_INTERNAL INTEL_PTE_WTHRU /* page from internal object */ | |
403 | #define PHYS_REUSABLE INTEL_PTE_WRITE /* page is "reusable" */ | |
404 | ||
405 | extern boolean_t pmap_disable_kheap_nx; | |
406 | extern boolean_t pmap_disable_kstack_nx; | |
407 | ||
408 | #define PMAP_EXPAND_OPTIONS_NONE (0x0) | |
409 | #define PMAP_EXPAND_OPTIONS_NOWAIT (PMAP_OPTIONS_NOWAIT) | |
410 | #define PMAP_EXPAND_OPTIONS_NOENTER (PMAP_OPTIONS_NOENTER) | |
411 | ||
412 | /* | |
413 | * Amount of virtual memory mapped by one | |
414 | * page-directory entry. | |
415 | */ | |
416 | #define PDE_MAPPED_SIZE (pdetova(1)) | |
417 | ||
418 | ||
419 | /* | |
420 | * Locking and TLB invalidation | |
421 | */ | |
422 | ||
423 | /* | |
424 | * Locking Protocols: (changed 2/2007 JK) | |
425 | * | |
426 | * There are two structures in the pmap module that need locking: | |
427 | * the pmaps themselves, and the per-page pv_lists (which are locked | |
428 | * by locking the pv_lock_table entry that corresponds to the pv_head | |
429 | * for the list in question.) Most routines want to lock a pmap and | |
430 | * then do operations in it that require pv_list locking -- however | |
431 | * pmap_remove_all and pmap_copy_on_write operate on a physical page | |
432 | * basis and want to do the locking in the reverse order, i.e. lock | |
433 | * a pv_list and then go through all the pmaps referenced by that list. | |
434 | * | |
435 | * The system wide pmap lock has been removed. Now, paths take a lock | |
436 | * on the pmap before changing its 'shape' and the reverse order lockers | |
437 | * (coming in by phys ppn) take a lock on the corresponding pv and then | |
438 | * retest to be sure nothing changed during the window before they locked | |
439 | * and can then run up/down the pv lists holding the list lock. This also | |
440 | * lets the pmap layer run (nearly completely) interrupt enabled, unlike | |
441 | * previously. | |
442 | */ | |
443 | ||
444 | /* | |
445 | * PV locking | |
446 | */ | |
447 | ||
448 | #define LOCK_PVH(index) { \ | |
449 | mp_disable_preemption(); \ | |
450 | lock_pvh_pai(index); \ | |
451 | } | |
452 | ||
453 | #define UNLOCK_PVH(index) { \ | |
454 | unlock_pvh_pai(index); \ | |
455 | mp_enable_preemption(); \ | |
456 | } | |
457 | ||
458 | extern uint64_t pde_mapped_size; | |
459 | ||
460 | extern char *pmap_phys_attributes; | |
461 | extern ppnum_t last_managed_page; | |
462 | ||
463 | extern ppnum_t lowest_lo; | |
464 | extern ppnum_t lowest_hi; | |
465 | extern ppnum_t highest_hi; | |
466 | ||
467 | /* | |
468 | * when spinning through pmap_remove | |
469 | * ensure that we don't spend too much | |
470 | * time with preemption disabled. | |
471 | * I'm setting the current threshold | |
472 | * to 20us | |
473 | */ | |
474 | #define MAX_PREEMPTION_LATENCY_NS 20000 | |
475 | extern uint64_t max_preemption_latency_tsc; | |
476 | ||
477 | /* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */ | |
478 | #ifdef DEBUGINTERRUPTS | |
479 | #define pmap_intr_assert() { \ | |
480 | if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) \ | |
481 | panic("pmap interrupt assert %s, %d",__FILE__, __LINE__); \ | |
482 | } | |
483 | #else | |
484 | #define pmap_intr_assert() | |
485 | #endif | |
486 | ||
487 | extern int nx_enabled; | |
488 | extern unsigned int inuse_ptepages_count; | |
489 | ||
490 | static inline uint32_t | |
491 | pvhashidx(pmap_t pmap, vm_map_offset_t va) | |
492 | { | |
493 | uint32_t hashidx = ((uint32_t)(uintptr_t)pmap ^ | |
494 | ((uint32_t)(va >> PAGE_SHIFT) & 0xFFFFFFFF)) & | |
495 | npvhashmask; | |
496 | return hashidx; | |
497 | } | |
498 | ||
499 | ||
500 | /* | |
501 | * unlinks the pv_hashed_entry_t pvh from the singly linked hash chain. | |
502 | * properly deals with the anchor. | |
503 | * must be called with the hash locked, does not unlock it | |
504 | */ | |
505 | static inline void | |
506 | pmap_pvh_unlink(pv_hashed_entry_t pvh) | |
507 | { | |
508 | pv_hashed_entry_t curh; | |
509 | pv_hashed_entry_t *pprevh; | |
510 | int pvhash_idx; | |
511 | ||
512 | CHK_NPVHASH(); | |
513 | pvhash_idx = pvhashidx(pvh->pmap, PVE_VA(pvh)); | |
514 | ||
515 | pprevh = pvhash(pvhash_idx); | |
516 | ||
517 | #if PV_DEBUG | |
518 | if (NULL == *pprevh) | |
519 | panic("pvh_unlink null anchor"); /* JK DEBUG */ | |
520 | #endif | |
521 | curh = *pprevh; | |
522 | ||
523 | while (PV_HASHED_ENTRY_NULL != curh) { | |
524 | if (pvh == curh) | |
525 | break; | |
526 | pprevh = &curh->nexth; | |
527 | curh = curh->nexth; | |
528 | } | |
529 | if (PV_HASHED_ENTRY_NULL == curh) panic("pmap_pvh_unlink no pvh"); | |
530 | *pprevh = pvh->nexth; | |
531 | return; | |
532 | } | |
533 | ||
534 | static inline void | |
535 | pv_hash_add(pv_hashed_entry_t pvh_e, | |
536 | pv_rooted_entry_t pv_h) | |
537 | { | |
538 | pv_hashed_entry_t *hashp; | |
539 | int pvhash_idx; | |
540 | ||
541 | CHK_NPVHASH(); | |
542 | pvhash_idx = pvhashidx(pvh_e->pmap, PVE_VA(pvh_e)); | |
543 | LOCK_PV_HASH(pvhash_idx); | |
544 | insque(&pvh_e->qlink, &pv_h->qlink); | |
545 | hashp = pvhash(pvhash_idx); | |
546 | #if PV_DEBUG | |
547 | if (NULL==hashp) | |
548 | panic("pv_hash_add(%p) null hash bucket", pvh_e); | |
549 | #endif | |
550 | pvh_e->nexth = *hashp; | |
551 | *hashp = pvh_e; | |
552 | UNLOCK_PV_HASH(pvhash_idx); | |
553 | } | |
554 | ||
555 | static inline void | |
556 | pv_hash_remove(pv_hashed_entry_t pvh_e) | |
557 | { | |
558 | int pvhash_idx; | |
559 | ||
560 | CHK_NPVHASH(); | |
561 | pvhash_idx = pvhashidx(pvh_e->pmap,PVE_VA(pvh_e)); | |
562 | LOCK_PV_HASH(pvhash_idx); | |
563 | remque(&pvh_e->qlink); | |
564 | pmap_pvh_unlink(pvh_e); | |
565 | UNLOCK_PV_HASH(pvhash_idx); | |
566 | } | |
567 | ||
568 | static inline boolean_t popcnt1(uint64_t distance) { | |
569 | return ((distance & (distance - 1)) == 0); | |
570 | } | |
571 | ||
572 | /* | |
573 | * Routines to handle suppression of/recovery from some forms of pagetable corruption | |
574 | * incidents observed in the field. These can be either software induced (wild | |
575 | * stores to the mapwindows where applicable, use after free errors | |
576 | * (typically of pages addressed physically), mis-directed DMAs etc., or due | |
577 | * to DRAM/memory hierarchy/interconnect errors. Given the theoretical rarity of these errors, | |
578 | * the recording mechanism is deliberately not MP-safe. The overarching goal is to | |
579 | * still assert on potential software races, but attempt recovery from incidents | |
580 | * identifiable as occurring due to issues beyond the control of the pmap module. | |
581 | * The latter includes single-bit errors and malformed pagetable entries. | |
582 | * We currently limit ourselves to recovery/suppression of one incident per | |
583 | * PMAP_PAGETABLE_CORRUPTION_INTERVAL seconds, and details of the incident | |
584 | * are logged. | |
585 | * Assertions are not suppressed if kernel debugging is enabled. (DRK 09) | |
586 | */ | |
587 | ||
588 | typedef enum { | |
589 | PTE_VALID = 0x0, | |
590 | PTE_INVALID = 0x1, | |
591 | PTE_RSVD = 0x2, | |
592 | PTE_SUPERVISOR = 0x4, | |
593 | PTE_BITFLIP = 0x8, | |
594 | PV_BITFLIP = 0x10, | |
595 | PTE_INVALID_CACHEABILITY = 0x20 | |
596 | } pmap_pagetable_corruption_t; | |
597 | ||
598 | typedef enum { | |
599 | ROOT_PRESENT = 0, | |
600 | ROOT_ABSENT = 1 | |
601 | } pmap_pv_assertion_t; | |
602 | ||
603 | typedef enum { | |
604 | PMAP_ACTION_IGNORE = 0x0, | |
605 | PMAP_ACTION_ASSERT = 0x1, | |
606 | PMAP_ACTION_RETRY = 0x2, | |
607 | PMAP_ACTION_RETRY_RELOCK = 0x4 | |
608 | } pmap_pagetable_corruption_action_t; | |
609 | ||
610 | #define PMAP_PAGETABLE_CORRUPTION_INTERVAL (6ULL * 3600ULL) | |
611 | extern uint64_t pmap_pagetable_corruption_interval_abstime; | |
612 | ||
613 | extern uint32_t pmap_pagetable_corruption_incidents; | |
614 | #define PMAP_PAGETABLE_CORRUPTION_MAX_LOG (8) | |
615 | typedef struct { | |
616 | pmap_pv_assertion_t incident; | |
617 | pmap_pagetable_corruption_t reason; | |
618 | pmap_pagetable_corruption_action_t action; | |
619 | pmap_t pmap; | |
620 | vm_map_offset_t vaddr; | |
621 | pt_entry_t pte; | |
622 | ppnum_t ppn; | |
623 | pmap_t pvpmap; | |
624 | vm_map_offset_t pvva; | |
625 | uint64_t abstime; | |
626 | } pmap_pagetable_corruption_record_t; | |
627 | ||
628 | extern pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[]; | |
629 | extern uint64_t pmap_pagetable_corruption_last_abstime; | |
630 | extern thread_call_t pmap_pagetable_corruption_log_call; | |
631 | extern boolean_t pmap_pagetable_corruption_timeout; | |
632 | ||
633 | static inline void | |
634 | pmap_pagetable_corruption_log(pmap_pv_assertion_t incident, pmap_pagetable_corruption_t suppress_reason, pmap_pagetable_corruption_action_t action, pmap_t pmap, vm_map_offset_t vaddr, pt_entry_t *ptep, ppnum_t ppn, pmap_t pvpmap, vm_map_offset_t pvva) { | |
635 | uint32_t pmap_pagetable_corruption_log_index; | |
636 | pmap_pagetable_corruption_log_index = pmap_pagetable_corruption_incidents++ % PMAP_PAGETABLE_CORRUPTION_MAX_LOG; | |
637 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].incident = incident; | |
638 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].reason = suppress_reason; | |
639 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].action = action; | |
640 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].pmap = pmap; | |
641 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].vaddr = vaddr; | |
642 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].pte = *ptep; | |
643 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].ppn = ppn; | |
644 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].pvpmap = pvpmap; | |
645 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].pvva = pvva; | |
646 | pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].abstime = mach_absolute_time(); | |
647 | /* Asynchronously log */ | |
648 | thread_call_enter(pmap_pagetable_corruption_log_call); | |
649 | } | |
650 | ||
651 | static inline pmap_pagetable_corruption_action_t | |
652 | pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t *ppnp, pt_entry_t *ptep, pmap_pv_assertion_t incident) { | |
653 | pmap_pagetable_corruption_action_t action = PMAP_ACTION_ASSERT; | |
654 | pmap_pagetable_corruption_t suppress_reason = PTE_VALID; | |
655 | ppnum_t suppress_ppn = 0; | |
656 | pt_entry_t cpte = *ptep; | |
657 | ppnum_t cpn = pa_index(pte_to_pa(cpte)); | |
658 | ppnum_t ppn = *ppnp; | |
659 | pv_rooted_entry_t pv_h = pai_to_pvh(ppn_to_pai(ppn)); | |
660 | pv_rooted_entry_t pv_e = pv_h; | |
661 | uint32_t bitdex; | |
662 | pmap_t pvpmap = pv_h->pmap; | |
663 | vm_map_offset_t pvva = PVE_VA(pv_h); | |
664 | vm_map_offset_t pve_flags; | |
665 | boolean_t ppcd = FALSE; | |
666 | boolean_t is_ept; | |
667 | ||
668 | /* Ideally, we'd consult the Mach VM here to definitively determine | |
669 | * the nature of the mapping for this address space and address. | |
670 | * As that would be a layering violation in this context, we | |
671 | * use various heuristics to recover from single bit errors, | |
672 | * malformed pagetable entries etc. These are not intended | |
673 | * to be comprehensive. | |
674 | */ | |
675 | ||
676 | /* As a precautionary measure, mark A+D */ | |
677 | pmap_phys_attributes[ppn_to_pai(ppn)] |= (PHYS_MODIFIED | PHYS_REFERENCED); | |
678 | is_ept = is_ept_pmap(pmap); | |
679 | ||
680 | /* | |
681 | * Correct potential single bit errors in either (but not both) element | |
682 | * of the PV | |
683 | */ | |
684 | do { | |
685 | if ((popcnt1((uintptr_t)pv_e->pmap ^ (uintptr_t)pmap) && PVE_VA(pv_e) == vaddr) || | |
686 | (pv_e->pmap == pmap && popcnt1(PVE_VA(pv_e) ^ vaddr))) { | |
687 | pve_flags = PVE_FLAGS(pv_e); | |
688 | pv_e->pmap = pmap; | |
689 | pv_h->va_and_flags = vaddr | pve_flags; | |
690 | suppress_reason = PV_BITFLIP; | |
691 | action = PMAP_ACTION_RETRY; | |
692 | goto pmap_cpc_exit; | |
693 | } | |
694 | } while (((pv_e = (pv_rooted_entry_t) queue_next(&pv_e->qlink))) && (pv_e != pv_h)); | |
695 | ||
696 | /* Discover root entries with a Hamming | |
697 | * distance of 1 from the supplied | |
698 | * physical page frame. | |
699 | */ | |
700 | for (bitdex = 0; bitdex < (sizeof(ppnum_t) << 3); bitdex++) { | |
701 | ppnum_t npn = cpn ^ (ppnum_t) (1ULL << bitdex); | |
702 | if (IS_MANAGED_PAGE(npn)) { | |
703 | pv_rooted_entry_t npv_h = pai_to_pvh(ppn_to_pai(npn)); | |
704 | if (PVE_VA(npv_h) == vaddr && npv_h->pmap == pmap) { | |
705 | suppress_reason = PTE_BITFLIP; | |
706 | suppress_ppn = npn; | |
707 | action = PMAP_ACTION_RETRY_RELOCK; | |
708 | UNLOCK_PVH(ppn_to_pai(ppn)); | |
709 | *ppnp = npn; | |
710 | goto pmap_cpc_exit; | |
711 | } | |
712 | } | |
713 | } | |
714 | ||
715 | if (pmap == kernel_pmap) { | |
716 | action = PMAP_ACTION_ASSERT; | |
717 | goto pmap_cpc_exit; | |
718 | } | |
719 | ||
720 | /* | |
721 | * Check for malformed/inconsistent entries. | |
722 | * The first check here isn't useful for EPT PTEs because INTEL_EPT_NCACHE == 0 | |
723 | */ | |
724 | if (!is_ept && ((cpte & (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU | INTEL_PTE_PTA)) == (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU))) { | |
725 | action = PMAP_ACTION_IGNORE; | |
726 | suppress_reason = PTE_INVALID_CACHEABILITY; | |
727 | } | |
728 | else if (cpte & INTEL_PTE_RSVD) { | |
729 | action = PMAP_ACTION_IGNORE; | |
730 | suppress_reason = PTE_RSVD; | |
731 | } | |
732 | else if ((pmap != kernel_pmap) && (!is_ept) && ((cpte & INTEL_PTE_USER) == 0)) { | |
733 | action = PMAP_ACTION_IGNORE; | |
734 | suppress_reason = PTE_SUPERVISOR; | |
735 | } | |
736 | pmap_cpc_exit: | |
737 | PE_parse_boot_argn("-pmap_pagetable_corruption_deassert", &ppcd, sizeof(ppcd)); | |
738 | ||
739 | if (debug_boot_arg && !ppcd) { | |
740 | action = PMAP_ACTION_ASSERT; | |
741 | } | |
742 | ||
743 | if ((mach_absolute_time() - pmap_pagetable_corruption_last_abstime) < pmap_pagetable_corruption_interval_abstime) { | |
744 | action = PMAP_ACTION_ASSERT; | |
745 | pmap_pagetable_corruption_timeout = TRUE; | |
746 | } | |
747 | else | |
748 | { | |
749 | pmap_pagetable_corruption_last_abstime = mach_absolute_time(); | |
750 | } | |
751 | pmap_pagetable_corruption_log(incident, suppress_reason, action, pmap, vaddr, &cpte, *ppnp, pvpmap, pvva); | |
752 | return action; | |
753 | } | |
754 | ||
755 | /* | |
756 | * Remove pv list entry. | |
757 | * Called with pv_head_table entry locked. | |
758 | * Returns pv entry to be freed (or NULL). | |
759 | */ | |
760 | static inline __attribute__((always_inline)) pv_hashed_entry_t | |
761 | pmap_pv_remove(pmap_t pmap, | |
762 | vm_map_offset_t vaddr, | |
763 | ppnum_t *ppnp, | |
764 | pt_entry_t *pte, | |
765 | boolean_t *was_altacct) | |
766 | { | |
767 | pv_hashed_entry_t pvh_e; | |
768 | pv_rooted_entry_t pv_h; | |
769 | pv_hashed_entry_t *pprevh; | |
770 | int pvhash_idx; | |
771 | uint32_t pv_cnt; | |
772 | ppnum_t ppn; | |
773 | ||
774 | *was_altacct = FALSE; | |
775 | pmap_pv_remove_retry: | |
776 | ppn = *ppnp; | |
777 | pvh_e = PV_HASHED_ENTRY_NULL; | |
778 | pv_h = pai_to_pvh(ppn_to_pai(ppn)); | |
779 | ||
780 | if (__improbable(pv_h->pmap == PMAP_NULL)) { | |
781 | pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_ABSENT); | |
782 | if (pac == PMAP_ACTION_IGNORE) | |
783 | goto pmap_pv_remove_exit; | |
784 | else if (pac == PMAP_ACTION_ASSERT) | |
785 | panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p, %p): null pv_list!", pmap, vaddr, ppn, *pte, ppnp, pte); | |
786 | else if (pac == PMAP_ACTION_RETRY_RELOCK) { | |
787 | LOCK_PVH(ppn_to_pai(*ppnp)); | |
788 | pmap_phys_attributes[ppn_to_pai(*ppnp)] |= (PHYS_MODIFIED | PHYS_REFERENCED); | |
789 | goto pmap_pv_remove_retry; | |
790 | } | |
791 | else if (pac == PMAP_ACTION_RETRY) | |
792 | goto pmap_pv_remove_retry; | |
793 | } | |
794 | ||
795 | if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { | |
796 | *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pv_h); | |
797 | /* | |
798 | * Header is the pv_rooted_entry. | |
799 | * We can't free that. If there is a queued | |
800 | * entry after this one we remove that | |
801 | * from the ppn queue, we remove it from the hash chain | |
802 | * and copy it to the rooted entry. Then free it instead. | |
803 | */ | |
804 | pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink); | |
805 | if (pv_h != (pv_rooted_entry_t) pvh_e) { | |
806 | /* | |
807 | * Entry queued to root, remove this from hash | |
808 | * and install as new root. | |
809 | */ | |
810 | CHK_NPVHASH(); | |
811 | pvhash_idx = pvhashidx(pvh_e->pmap, PVE_VA(pvh_e)); | |
812 | LOCK_PV_HASH(pvhash_idx); | |
813 | remque(&pvh_e->qlink); | |
814 | pprevh = pvhash(pvhash_idx); | |
815 | if (PV_HASHED_ENTRY_NULL == *pprevh) { | |
816 | panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x): " | |
817 | "empty hash, removing rooted", | |
818 | pmap, vaddr, ppn); | |
819 | } | |
820 | pmap_pvh_unlink(pvh_e); | |
821 | UNLOCK_PV_HASH(pvhash_idx); | |
822 | pv_h->pmap = pvh_e->pmap; | |
823 | pv_h->va_and_flags = pvh_e->va_and_flags; | |
824 | /* dispose of pvh_e */ | |
825 | } else { | |
826 | /* none queued after rooted */ | |
827 | pv_h->pmap = PMAP_NULL; | |
828 | pvh_e = PV_HASHED_ENTRY_NULL; | |
829 | } | |
830 | } else { | |
831 | /* | |
832 | * not removing rooted pv. find it on hash chain, remove from | |
833 | * ppn queue and hash chain and free it | |
834 | */ | |
835 | CHK_NPVHASH(); | |
836 | pvhash_idx = pvhashidx(pmap, vaddr); | |
837 | LOCK_PV_HASH(pvhash_idx); | |
838 | pprevh = pvhash(pvhash_idx); | |
839 | if (PV_HASHED_ENTRY_NULL == *pprevh) { | |
840 | panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p): empty hash", | |
841 | pmap, vaddr, ppn, *pte, pte); | |
842 | } | |
843 | pvh_e = *pprevh; | |
844 | pmap_pv_hashlist_walks++; | |
845 | pv_cnt = 0; | |
846 | while (PV_HASHED_ENTRY_NULL != pvh_e) { | |
847 | pv_cnt++; | |
848 | if (pvh_e->pmap == pmap && | |
849 | PVE_VA(pvh_e) == vaddr && | |
850 | pvh_e->ppn == ppn) | |
851 | break; | |
852 | pprevh = &pvh_e->nexth; | |
853 | pvh_e = pvh_e->nexth; | |
854 | } | |
855 | ||
856 | if (PV_HASHED_ENTRY_NULL == pvh_e) { | |
857 | pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_PRESENT); | |
858 | ||
859 | if (pac == PMAP_ACTION_ASSERT) | |
860 | panic("Possible memory corruption: pmap_pv_remove(%p, 0x%llx, 0x%x, 0x%llx, %p, %p): pv not on hash, head: %p, 0x%llx", pmap, vaddr, ppn, *pte, ppnp, pte, pv_h->pmap, PVE_VA(pv_h)); | |
861 | else { | |
862 | UNLOCK_PV_HASH(pvhash_idx); | |
863 | if (pac == PMAP_ACTION_RETRY_RELOCK) { | |
864 | LOCK_PVH(ppn_to_pai(*ppnp)); | |
865 | pmap_phys_attributes[ppn_to_pai(*ppnp)] |= (PHYS_MODIFIED | PHYS_REFERENCED); | |
866 | goto pmap_pv_remove_retry; | |
867 | } | |
868 | else if (pac == PMAP_ACTION_RETRY) { | |
869 | goto pmap_pv_remove_retry; | |
870 | } | |
871 | else if (pac == PMAP_ACTION_IGNORE) { | |
872 | goto pmap_pv_remove_exit; | |
873 | } | |
874 | } | |
875 | } | |
876 | ||
877 | *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pvh_e); | |
878 | ||
879 | pmap_pv_hashlist_cnts += pv_cnt; | |
880 | if (pmap_pv_hashlist_max < pv_cnt) | |
881 | pmap_pv_hashlist_max = pv_cnt; | |
882 | *pprevh = pvh_e->nexth; | |
883 | remque(&pvh_e->qlink); | |
884 | UNLOCK_PV_HASH(pvhash_idx); | |
885 | } | |
886 | pmap_pv_remove_exit: | |
887 | return pvh_e; | |
888 | } | |
889 | ||
890 | static inline __attribute__((always_inline)) boolean_t | |
891 | pmap_pv_is_altacct( | |
892 | pmap_t pmap, | |
893 | vm_map_offset_t vaddr, | |
894 | ppnum_t ppn) | |
895 | { | |
896 | pv_hashed_entry_t pvh_e; | |
897 | pv_rooted_entry_t pv_h; | |
898 | int pvhash_idx; | |
899 | boolean_t is_altacct; | |
900 | ||
901 | pvh_e = PV_HASHED_ENTRY_NULL; | |
902 | pv_h = pai_to_pvh(ppn_to_pai(ppn)); | |
903 | ||
904 | if (__improbable(pv_h->pmap == PMAP_NULL)) { | |
905 | return FALSE; | |
906 | } | |
907 | ||
908 | if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { | |
909 | /* | |
910 | * Header is the pv_rooted_entry. | |
911 | */ | |
912 | return IS_ALTACCT_PAGE(ppn, pv_h); | |
913 | } | |
914 | ||
915 | CHK_NPVHASH(); | |
916 | pvhash_idx = pvhashidx(pmap, vaddr); | |
917 | LOCK_PV_HASH(pvhash_idx); | |
918 | pvh_e = *(pvhash(pvhash_idx)); | |
919 | if (PV_HASHED_ENTRY_NULL == pvh_e) { | |
920 | panic("Possible memory corruption: pmap_pv_is_altacct(%p,0x%llx,0x%x): empty hash", | |
921 | pmap, vaddr, ppn); | |
922 | } | |
923 | while (PV_HASHED_ENTRY_NULL != pvh_e) { | |
924 | if (pvh_e->pmap == pmap && | |
925 | PVE_VA(pvh_e) == vaddr && | |
926 | pvh_e->ppn == ppn) | |
927 | break; | |
928 | pvh_e = pvh_e->nexth; | |
929 | } | |
930 | if (PV_HASHED_ENTRY_NULL == pvh_e) { | |
931 | is_altacct = FALSE; | |
932 | } else { | |
933 | is_altacct = IS_ALTACCT_PAGE(ppn, pvh_e); | |
934 | } | |
935 | UNLOCK_PV_HASH(pvhash_idx); | |
936 | ||
937 | return is_altacct; | |
938 | } | |
939 | ||
940 | extern int pt_fake_zone_index; | |
941 | static inline void | |
942 | PMAP_ZINFO_PALLOC(pmap_t pmap, vm_size_t bytes) | |
943 | { | |
944 | pmap_ledger_credit(pmap, task_ledgers.tkm_private, bytes); | |
945 | } | |
946 | ||
947 | static inline void | |
948 | PMAP_ZINFO_PFREE(pmap_t pmap, vm_size_t bytes) | |
949 | { | |
950 | pmap_ledger_debit(pmap, task_ledgers.tkm_private, bytes); | |
951 | } | |
952 | ||
953 | static inline void | |
954 | PMAP_ZINFO_SALLOC(pmap_t pmap, vm_size_t bytes) | |
955 | { | |
956 | pmap_ledger_credit(pmap, task_ledgers.tkm_shared, bytes); | |
957 | } | |
958 | ||
959 | static inline void | |
960 | PMAP_ZINFO_SFREE(pmap_t pmap, vm_size_t bytes) | |
961 | { | |
962 | pmap_ledger_debit(pmap, task_ledgers.tkm_shared, bytes); | |
963 | } | |
964 | ||
965 | extern boolean_t pmap_initialized;/* Has pmap_init completed? */ | |
966 | #define valid_page(x) (pmap_initialized && pmap_valid_page(x)) | |
967 | ||
968 | // XXX | |
969 | #define HIGH_MEM_BASE ((uint32_t)( -NBPDE) ) /* shared gdt etc seg addr */ /* XXX64 ?? */ | |
970 | // XXX | |
971 | ||
972 | ||
973 | int phys_attribute_test( | |
974 | ppnum_t phys, | |
975 | int bits); | |
976 | void phys_attribute_clear( | |
977 | ppnum_t phys, | |
978 | int bits, | |
979 | unsigned int options, | |
980 | void *arg); | |
981 | ||
982 | //#define PCID_DEBUG 1 | |
983 | #if PCID_DEBUG | |
984 | #define pmap_pcid_log(fmt, args...) \ | |
985 | do { \ | |
986 | kprintf(fmt, ##args); \ | |
987 | printf(fmt, ##args); \ | |
988 | } while(0) | |
989 | #else | |
990 | #define pmap_pcid_log(fmt, args...) | |
991 | #endif | |
992 | void pmap_pcid_configure(void); | |
993 | ||
994 | ||
995 | /* | |
996 | * Atomic 64-bit compare and exchange of a page table entry. | |
997 | */ | |
998 | static inline boolean_t | |
999 | pmap_cmpx_pte(pt_entry_t *entryp, pt_entry_t old, pt_entry_t new) | |
1000 | { | |
1001 | boolean_t ret; | |
1002 | ||
1003 | /* | |
1004 | * Load the old value into %rax | |
1005 | * Load the new value into another register | |
1006 | * Compare-exchange-quad at address entryp | |
1007 | * If the compare succeeds, the new value is stored, return TRUE. | |
1008 | * Otherwise, no swap is made, return FALSE. | |
1009 | */ | |
1010 | asm volatile( | |
1011 | " lock; cmpxchgq %2,(%3) \n\t" | |
1012 | " setz %%al \n\t" | |
1013 | " movzbl %%al,%0" | |
1014 | : "=a" (ret) | |
1015 | : "a" (old), | |
1016 | "r" (new), | |
1017 | "r" (entryp) | |
1018 | : "memory"); | |
1019 | return ret; | |
1020 | } | |
1021 | ||
1022 | extern uint32_t pmap_update_clear_pte_count; | |
1023 | ||
1024 | static inline void pmap_update_pte(pt_entry_t *mptep, uint64_t pclear_bits, uint64_t pset_bits) { | |
1025 | pt_entry_t npte, opte; | |
1026 | do { | |
1027 | opte = *mptep; | |
1028 | if (__improbable(opte == 0)) { | |
1029 | pmap_update_clear_pte_count++; | |
1030 | break; | |
1031 | } | |
1032 | npte = opte & ~(pclear_bits); | |
1033 | npte |= pset_bits; | |
1034 | } while (!pmap_cmpx_pte(mptep, opte, npte)); | |
1035 | } | |
1036 | ||
1037 | #if defined(__x86_64__) | |
1038 | /* | |
1039 | * The single pml4 page per pmap is allocated at pmap create time and exists | |
1040 | * for the duration of the pmap. we allocate this page in kernel vm. | |
1041 | * this returns the address of the requested pml4 entry in the top level page. | |
1042 | */ | |
1043 | static inline | |
1044 | pml4_entry_t * | |
1045 | pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr) | |
1046 | { | |
1047 | if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) && | |
1048 | (vaddr < 0xFFFF800000000000ULL))) { | |
1049 | return (NULL); | |
1050 | } | |
1051 | ||
1052 | #if DEBUG | |
1053 | return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_cr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]); | |
1054 | #else | |
1055 | return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)]; | |
1056 | #endif | |
1057 | } | |
1058 | ||
1059 | /* | |
1060 | * Returns address of requested PDPT entry in the physmap. | |
1061 | */ | |
1062 | static inline pdpt_entry_t * | |
1063 | pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr) | |
1064 | { | |
1065 | pml4_entry_t newpf; | |
1066 | pml4_entry_t *pml4; | |
1067 | boolean_t is_ept; | |
1068 | ||
1069 | pml4 = pmap64_pml4(pmap, vaddr); | |
1070 | is_ept = is_ept_pmap(pmap); | |
1071 | ||
1072 | if (pml4 && (*pml4 & PTE_VALID_MASK(is_ept))) { | |
1073 | newpf = *pml4 & PG_FRAME; | |
1074 | return &((pdpt_entry_t *) PHYSMAP_PTOV(newpf)) | |
1075 | [(vaddr >> PDPTSHIFT) & (NPDPTPG-1)]; | |
1076 | } | |
1077 | return (NULL); | |
1078 | } | |
1079 | /* | |
1080 | * Returns the address of the requested PDE entry in the physmap. | |
1081 | */ | |
1082 | static inline pd_entry_t * | |
1083 | pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr) | |
1084 | { | |
1085 | pdpt_entry_t newpf; | |
1086 | pdpt_entry_t *pdpt; | |
1087 | boolean_t is_ept; | |
1088 | ||
1089 | pdpt = pmap64_pdpt(pmap, vaddr); | |
1090 | is_ept = is_ept_pmap(pmap); | |
1091 | ||
1092 | if (pdpt && (*pdpt & PTE_VALID_MASK(is_ept))) { | |
1093 | newpf = *pdpt & PG_FRAME; | |
1094 | return &((pd_entry_t *) PHYSMAP_PTOV(newpf)) | |
1095 | [(vaddr >> PDSHIFT) & (NPDPG-1)]; | |
1096 | } | |
1097 | return (NULL); | |
1098 | } | |
1099 | ||
1100 | static inline pd_entry_t * | |
1101 | pmap_pde(pmap_t m, vm_map_offset_t v) | |
1102 | { | |
1103 | pd_entry_t *pde; | |
1104 | ||
1105 | pde = pmap64_pde(m, v); | |
1106 | ||
1107 | return pde; | |
1108 | } | |
1109 | ||
1110 | ||
1111 | /* | |
1112 | * return address of mapped pte for vaddr va in pmap pmap. | |
1113 | * | |
1114 | * In case the pde maps a superpage, return the pde, which, in this case | |
1115 | * is the actual page table entry. | |
1116 | */ | |
1117 | static inline pt_entry_t * | |
1118 | pmap_pte(pmap_t pmap, vm_map_offset_t vaddr) | |
1119 | { | |
1120 | pd_entry_t *pde; | |
1121 | pd_entry_t newpf; | |
1122 | boolean_t is_ept; | |
1123 | ||
1124 | assert(pmap); | |
1125 | pde = pmap64_pde(pmap, vaddr); | |
1126 | ||
1127 | is_ept = is_ept_pmap(pmap); | |
1128 | ||
1129 | if (pde && (*pde & PTE_VALID_MASK(is_ept))) { | |
1130 | if (*pde & PTE_PS) | |
1131 | return pde; | |
1132 | newpf = *pde & PG_FRAME; | |
1133 | return &((pt_entry_t *)PHYSMAP_PTOV(newpf)) | |
1134 | [i386_btop(vaddr) & (ppnum_t)(NPTEPG-1)]; | |
1135 | } | |
1136 | return (NULL); | |
1137 | } | |
1138 | #endif | |
1139 | #if DEBUG | |
1140 | #define DPRINTF(x...) kprintf(x) | |
1141 | #else | |
1142 | #define DPRINTF(x...) | |
1143 | #endif | |
1144 | ||
1145 | #endif /* MACH_KERNEL_PRIVATE */ | |
1146 | #endif /* _I386_PMAP_INTERNAL_ */ |