]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2000-2010 Apple Inc. All rights reserved. |
b0d623f7 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | /* | |
60 | * File: pmap.c | |
61 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
62 | * (These guys wrote the Vax version) | |
63 | * | |
64 | * Physical Map management code for Intel i386, i486, and i860. | |
65 | * | |
66 | * Manages physical address maps. | |
67 | * | |
68 | * In addition to hardware address maps, this | |
69 | * module is called upon to provide software-use-only | |
70 | * maps which may or may not be stored in the same | |
71 | * form as hardware maps. These pseudo-maps are | |
72 | * used to store intermediate results from copy | |
73 | * operations to and from address spaces. | |
74 | * | |
75 | * Since the information managed by this module is | |
76 | * also stored by the logical address mapping module, | |
77 | * this module may throw away valid virtual-to-physical | |
78 | * mappings at almost any time. However, invalidations | |
79 | * of virtual-to-physical mappings must be done as | |
80 | * requested. | |
81 | * | |
82 | * In order to cope with hardware architectures which | |
83 | * make virtual-to-physical map invalidates expensive, | |
84 | * this module may delay invalidate or reduced protection | |
85 | * operations until such time as they are actually | |
86 | * necessary. This module is given full information as | |
87 | * to which processors are currently using which maps, | |
88 | * and to when physical maps must be made correct. | |
89 | */ | |
90 | ||
91 | #include <string.h> | |
b0d623f7 A |
92 | #include <mach_ldebug.h> |
93 | ||
94 | #include <libkern/OSAtomic.h> | |
95 | ||
96 | #include <mach/machine/vm_types.h> | |
97 | ||
98 | #include <mach/boolean.h> | |
99 | #include <kern/thread.h> | |
100 | #include <kern/zalloc.h> | |
101 | #include <kern/queue.h> | |
316670eb | 102 | #include <kern/ledger.h> |
6d2010ae | 103 | #include <kern/mach_param.h> |
b0d623f7 | 104 | |
b0d623f7 A |
105 | #include <kern/kalloc.h> |
106 | #include <kern/spl.h> | |
107 | ||
108 | #include <vm/pmap.h> | |
109 | #include <vm/vm_map.h> | |
110 | #include <vm/vm_kern.h> | |
111 | #include <mach/vm_param.h> | |
112 | #include <mach/vm_prot.h> | |
113 | #include <vm/vm_object.h> | |
114 | #include <vm/vm_page.h> | |
115 | ||
116 | #include <mach/machine/vm_param.h> | |
117 | #include <machine/thread.h> | |
118 | ||
119 | #include <kern/misc_protos.h> /* prototyping */ | |
120 | #include <i386/misc_protos.h> | |
6d2010ae | 121 | #include <i386/i386_lowmem.h> |
b0d623f7 A |
122 | #include <x86_64/lowglobals.h> |
123 | ||
124 | #include <i386/cpuid.h> | |
125 | #include <i386/cpu_data.h> | |
126 | #include <i386/cpu_number.h> | |
127 | #include <i386/machine_cpu.h> | |
128 | #include <i386/seg.h> | |
129 | #include <i386/serial_io.h> | |
130 | #include <i386/cpu_capabilities.h> | |
131 | #include <i386/machine_routines.h> | |
132 | #include <i386/proc_reg.h> | |
133 | #include <i386/tsc.h> | |
134 | #include <i386/pmap_internal.h> | |
6d2010ae | 135 | #include <i386/pmap_pcid.h> |
3e170ce0 A |
136 | #if CONFIG_VMX |
137 | #include <i386/vmx/vmx_cpu.h> | |
138 | #endif | |
b0d623f7 | 139 | |
b0d623f7 A |
140 | #include <vm/vm_protos.h> |
141 | ||
142 | #include <i386/mp.h> | |
143 | #include <i386/mp_desc.h> | |
316670eb A |
144 | #include <libkern/kernel_mach_header.h> |
145 | ||
146 | #include <pexpert/i386/efi.h> | |
b0d623f7 A |
147 | |
148 | ||
b0d623f7 A |
149 | #ifdef IWANTTODEBUG |
150 | #undef DEBUG | |
151 | #define DEBUG 1 | |
152 | #define POSTCODE_DELAY 1 | |
153 | #include <i386/postcode.h> | |
154 | #endif /* IWANTTODEBUG */ | |
155 | ||
6d2010ae A |
156 | #ifdef PMAP_DEBUG |
157 | #define DBG(x...) kprintf("DBG: " x) | |
b0d623f7 A |
158 | #else |
159 | #define DBG(x...) | |
160 | #endif | |
6d2010ae A |
161 | /* Compile time assert to ensure adjacency/alignment of per-CPU data fields used |
162 | * in the trampolines for kernel/user boundary TLB coherency. | |
b0d623f7 | 163 | */ |
6d2010ae A |
164 | char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1]; |
165 | boolean_t pmap_trace = FALSE; | |
b0d623f7 | 166 | |
6d2010ae | 167 | boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */ |
b0d623f7 A |
168 | |
169 | int nx_enabled = 1; /* enable no-execute protection */ | |
170 | int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ | |
171 | int allow_stack_exec = 0; /* No apps may execute from the stack by default */ | |
172 | ||
173 | const boolean_t cpu_64bit = TRUE; /* Mais oui! */ | |
174 | ||
b0d623f7 A |
175 | uint64_t max_preemption_latency_tsc = 0; |
176 | ||
b0d623f7 A |
177 | pv_hashed_entry_t *pv_hash_table; /* hash lists */ |
178 | ||
fe8ab488 | 179 | uint32_t npvhashmask = 0, npvhashbuckets = 0; |
b0d623f7 | 180 | |
b0d623f7 A |
181 | pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL; |
182 | pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL; | |
183 | decl_simple_lock_data(,pv_hashed_free_list_lock) | |
184 | decl_simple_lock_data(,pv_hashed_kern_free_list_lock) | |
185 | decl_simple_lock_data(,pv_hash_table_lock) | |
186 | ||
fe8ab488 A |
187 | decl_simple_lock_data(,phys_backup_lock) |
188 | ||
b0d623f7 A |
189 | zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ |
190 | ||
b0d623f7 A |
191 | /* |
192 | * First and last physical addresses that we maintain any information | |
193 | * for. Initialized to zero so that pmap operations done before | |
194 | * pmap_init won't touch any non-existent structures. | |
195 | */ | |
196 | boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ | |
197 | ||
198 | static struct vm_object kptobj_object_store; | |
199 | static struct vm_object kpml4obj_object_store; | |
200 | static struct vm_object kpdptobj_object_store; | |
201 | ||
202 | /* | |
6d2010ae | 203 | * Array of physical page attribites for managed pages. |
b0d623f7 A |
204 | * One byte per physical page. |
205 | */ | |
206 | char *pmap_phys_attributes; | |
316670eb | 207 | ppnum_t last_managed_page = 0; |
6d2010ae A |
208 | |
209 | /* | |
210 | * Amount of virtual memory mapped by one | |
211 | * page-directory entry. | |
212 | */ | |
213 | ||
b0d623f7 A |
214 | uint64_t pde_mapped_size = PDE_MAPPED_SIZE; |
215 | ||
b0d623f7 A |
216 | unsigned pmap_memory_region_count; |
217 | unsigned pmap_memory_region_current; | |
218 | ||
219 | pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE]; | |
220 | ||
221 | /* | |
222 | * Other useful macros. | |
223 | */ | |
224 | #define current_pmap() (vm_map_pmap(current_thread()->map)) | |
225 | ||
226 | struct pmap kernel_pmap_store; | |
227 | pmap_t kernel_pmap; | |
228 | ||
b0d623f7 A |
229 | struct zone *pmap_zone; /* zone of pmap structures */ |
230 | ||
6d2010ae A |
231 | struct zone *pmap_anchor_zone; |
232 | int pmap_debug = 0; /* flag for debugging prints */ | |
233 | ||
b0d623f7 | 234 | unsigned int inuse_ptepages_count = 0; |
6d2010ae A |
235 | long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */ |
236 | unsigned int bootstrap_wired_pages = 0; | |
237 | int pt_fake_zone_index = -1; | |
b0d623f7 | 238 | |
6d2010ae | 239 | extern long NMIPI_acks; |
b0d623f7 | 240 | |
6d2010ae A |
241 | boolean_t kernel_text_ps_4K = TRUE; |
242 | boolean_t wpkernel = TRUE; | |
b0d623f7 A |
243 | |
244 | extern char end; | |
245 | ||
246 | static int nkpt; | |
247 | ||
248 | pt_entry_t *DMAP1, *DMAP2; | |
249 | caddr_t DADDR1; | |
250 | caddr_t DADDR2; | |
b0d623f7 | 251 | |
3e170ce0 A |
252 | boolean_t pmap_disable_kheap_nx = FALSE; |
253 | boolean_t pmap_disable_kstack_nx = FALSE; | |
316670eb | 254 | extern boolean_t doconstro_override; |
b0d623f7 | 255 | |
316670eb | 256 | extern long __stack_chk_guard[]; |
b0d623f7 | 257 | |
3e170ce0 A |
258 | boolean_t pmap_ept_support_ad = FALSE; |
259 | ||
260 | ||
b0d623f7 A |
261 | /* |
262 | * Map memory at initialization. The physical addresses being | |
263 | * mapped are not managed and are never unmapped. | |
264 | * | |
265 | * For now, VM is already on, we only need to map the | |
266 | * specified memory. | |
267 | */ | |
268 | vm_offset_t | |
269 | pmap_map( | |
270 | vm_offset_t virt, | |
271 | vm_map_offset_t start_addr, | |
272 | vm_map_offset_t end_addr, | |
273 | vm_prot_t prot, | |
274 | unsigned int flags) | |
275 | { | |
276 | int ps; | |
277 | ||
278 | ps = PAGE_SIZE; | |
279 | while (start_addr < end_addr) { | |
280 | pmap_enter(kernel_pmap, (vm_map_offset_t)virt, | |
316670eb | 281 | (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE); |
b0d623f7 A |
282 | virt += ps; |
283 | start_addr += ps; | |
284 | } | |
285 | return(virt); | |
286 | } | |
287 | ||
b0d623f7 A |
288 | extern char *first_avail; |
289 | extern vm_offset_t virtual_avail, virtual_end; | |
290 | extern pmap_paddr_t avail_start, avail_end; | |
291 | extern vm_offset_t sHIB; | |
292 | extern vm_offset_t eHIB; | |
293 | extern vm_offset_t stext; | |
294 | extern vm_offset_t etext; | |
316670eb A |
295 | extern vm_offset_t sdata, edata; |
296 | extern vm_offset_t sconstdata, econstdata; | |
b0d623f7 | 297 | |
6d2010ae A |
298 | extern void *KPTphys; |
299 | ||
13f56ec4 | 300 | boolean_t pmap_smep_enabled = FALSE; |
fe8ab488 | 301 | boolean_t pmap_smap_enabled = FALSE; |
13f56ec4 | 302 | |
b0d623f7 A |
303 | void |
304 | pmap_cpu_init(void) | |
305 | { | |
bd504ef0 | 306 | cpu_data_t *cdp = current_cpu_datap(); |
b0d623f7 A |
307 | /* |
308 | * Here early in the life of a processor (from cpu_mode_init()). | |
6d2010ae | 309 | * Ensure global page feature is disabled at this point. |
b0d623f7 | 310 | */ |
6d2010ae | 311 | |
b0d623f7 A |
312 | set_cr4(get_cr4() &~ CR4_PGE); |
313 | ||
314 | /* | |
315 | * Initialize the per-cpu, TLB-related fields. | |
316 | */ | |
bd504ef0 A |
317 | cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3; |
318 | cdp->cpu_active_cr3 = kernel_pmap->pm_cr3; | |
319 | cdp->cpu_tlb_invalid = FALSE; | |
320 | cdp->cpu_task_map = TASK_MAP_64BIT; | |
6d2010ae | 321 | pmap_pcid_configure(); |
13f56ec4 A |
322 | if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) { |
323 | boolean_t nsmep; | |
324 | if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) { | |
325 | set_cr4(get_cr4() | CR4_SMEP); | |
326 | pmap_smep_enabled = TRUE; | |
327 | } | |
328 | } | |
04b8595b A |
329 | if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) { |
330 | boolean_t nsmap; | |
331 | if (!PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) { | |
332 | set_cr4(get_cr4() | CR4_SMAP); | |
333 | pmap_smap_enabled = TRUE; | |
334 | } | |
335 | } | |
bd504ef0 A |
336 | |
337 | if (cdp->cpu_fixed_pmcs_enabled) { | |
338 | boolean_t enable = TRUE; | |
339 | cpu_pmc_control(&enable); | |
340 | } | |
b0d623f7 A |
341 | } |
342 | ||
fe8ab488 A |
343 | static uint32_t pmap_scale_shift(void) { |
344 | uint32_t scale = 0; | |
b0d623f7 | 345 | |
fe8ab488 A |
346 | if (sane_size <= 8*GB) { |
347 | scale = (uint32_t)(sane_size / (2 * GB)); | |
348 | } else if (sane_size <= 32*GB) { | |
349 | scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB)); | |
350 | } else { | |
351 | scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB))); | |
352 | } | |
353 | return scale; | |
354 | } | |
b0d623f7 A |
355 | |
356 | /* | |
357 | * Bootstrap the system enough to run with virtual memory. | |
358 | * Map the kernel's code and data, and allocate the system page table. | |
359 | * Called with mapping OFF. Page_size must already be set. | |
360 | */ | |
361 | ||
362 | void | |
363 | pmap_bootstrap( | |
364 | __unused vm_offset_t load_start, | |
365 | __unused boolean_t IA32e) | |
366 | { | |
367 | #if NCOPY_WINDOWS > 0 | |
368 | vm_offset_t va; | |
369 | int i; | |
370 | #endif | |
b0d623f7 A |
371 | assert(IA32e); |
372 | ||
373 | vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address | |
374 | * known to VM */ | |
375 | /* | |
376 | * The kernel's pmap is statically allocated so we don't | |
377 | * have to use pmap_create, which is unlikely to work | |
378 | * correctly at this part of the boot sequence. | |
379 | */ | |
380 | ||
381 | kernel_pmap = &kernel_pmap_store; | |
382 | kernel_pmap->ref_count = 1; | |
316670eb | 383 | kernel_pmap->nx_enabled = TRUE; |
b0d623f7 A |
384 | kernel_pmap->pm_task_map = TASK_MAP_64BIT; |
385 | kernel_pmap->pm_obj = (vm_object_t) NULL; | |
386 | kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD); | |
387 | kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT); | |
388 | kernel_pmap->pm_pml4 = IdlePML4; | |
389 | kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4); | |
3e170ce0 | 390 | kernel_pmap->pm_eptp = 0; |
6d2010ae | 391 | pmap_pcid_initialize_kernel(kernel_pmap); |
b0d623f7 | 392 | |
6d2010ae | 393 | |
b0d623f7 A |
394 | |
395 | current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3; | |
396 | ||
397 | nkpt = NKPT; | |
398 | OSAddAtomic(NKPT, &inuse_ptepages_count); | |
6d2010ae A |
399 | OSAddAtomic64(NKPT, &alloc_ptepages_count); |
400 | bootstrap_wired_pages = NKPT; | |
b0d623f7 A |
401 | |
402 | virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail; | |
403 | virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS); | |
404 | ||
405 | #if NCOPY_WINDOWS > 0 | |
406 | /* | |
407 | * Reserve some special page table entries/VA space for temporary | |
408 | * mapping of pages. | |
409 | */ | |
410 | #define SYSMAP(c, p, v, n) \ | |
411 | v = (c)va; va += ((n)*INTEL_PGBYTES); | |
412 | ||
413 | va = virtual_avail; | |
414 | ||
415 | for (i=0; i<PMAP_NWINDOWS; i++) { | |
416 | #if 1 | |
417 | kprintf("trying to do SYSMAP idx %d %p\n", i, | |
418 | current_cpu_datap()); | |
419 | kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap); | |
420 | kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow); | |
421 | kprintf("two stuff %p %p\n", | |
422 | (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), | |
423 | (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR)); | |
424 | #endif | |
425 | SYSMAP(caddr_t, | |
426 | (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), | |
427 | (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR), | |
428 | 1); | |
429 | current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = | |
430 | &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store); | |
431 | *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; | |
432 | } | |
433 | ||
434 | /* DMAP user for debugger */ | |
435 | SYSMAP(caddr_t, DMAP1, DADDR1, 1); | |
436 | SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */ | |
437 | ||
438 | virtual_avail = va; | |
439 | #endif | |
fe8ab488 A |
440 | if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) { |
441 | npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1; | |
b0d623f7 | 442 | |
fe8ab488 A |
443 | } |
444 | ||
445 | npvhashbuckets = npvhashmask + 1; | |
446 | ||
447 | if (0 != ((npvhashbuckets) & npvhashmask)) { | |
448 | panic("invalid hash %d, must be ((2^N)-1), " | |
449 | "using default %d\n", npvhashmask, NPVHASHMASK); | |
b0d623f7 A |
450 | } |
451 | ||
b0d623f7 A |
452 | simple_lock_init(&kernel_pmap->lock, 0); |
453 | simple_lock_init(&pv_hashed_free_list_lock, 0); | |
454 | simple_lock_init(&pv_hashed_kern_free_list_lock, 0); | |
455 | simple_lock_init(&pv_hash_table_lock,0); | |
fe8ab488 | 456 | simple_lock_init(&phys_backup_lock, 0); |
b0d623f7 A |
457 | |
458 | pmap_cpu_init(); | |
459 | ||
6d2010ae A |
460 | if (pmap_pcid_ncpus) |
461 | printf("PMAP: PCID enabled\n"); | |
462 | ||
13f56ec4 A |
463 | if (pmap_smep_enabled) |
464 | printf("PMAP: Supervisor Mode Execute Protection enabled\n"); | |
04b8595b A |
465 | if (pmap_smap_enabled) |
466 | printf("PMAP: Supervisor Mode Access Protection enabled\n"); | |
7ddcb079 | 467 | |
316670eb A |
468 | #if DEBUG |
469 | printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]); | |
fe8ab488 | 470 | printf("early_random(): 0x%qx\n", early_random()); |
316670eb A |
471 | #endif |
472 | boolean_t ptmp; | |
473 | /* Check if the user has requested disabling stack or heap no-execute | |
474 | * enforcement. These are "const" variables; that qualifier is cast away | |
475 | * when altering them. The TEXT/DATA const sections are marked | |
476 | * write protected later in the kernel startup sequence, so altering | |
477 | * them is possible at this point, in pmap_bootstrap(). | |
478 | */ | |
479 | if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) { | |
480 | boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx; | |
481 | *pdknxp = TRUE; | |
482 | } | |
483 | ||
484 | if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) { | |
485 | boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx; | |
486 | *pdknhp = TRUE; | |
487 | } | |
488 | ||
6d2010ae A |
489 | boot_args *args = (boot_args *)PE_state.bootArgs; |
490 | if (args->efiMode == kBootArgsEfiMode32) { | |
491 | printf("EFI32: kernel virtual space limited to 4GB\n"); | |
492 | virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32; | |
493 | } | |
b0d623f7 A |
494 | kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n", |
495 | (long)KERNEL_BASE, (long)virtual_end); | |
496 | kprintf("Available physical space from 0x%llx to 0x%llx\n", | |
497 | avail_start, avail_end); | |
498 | ||
499 | /* | |
500 | * The -no_shared_cr3 boot-arg is a debugging feature (set by default | |
501 | * in the DEBUG kernel) to force the kernel to switch to its own map | |
502 | * (and cr3) when control is in kernelspace. The kernel's map does not | |
503 | * include (i.e. share) userspace so wild references will cause | |
504 | * a panic. Only copyin and copyout are exempt from this. | |
505 | */ | |
506 | (void) PE_parse_boot_argn("-no_shared_cr3", | |
507 | &no_shared_cr3, sizeof (no_shared_cr3)); | |
508 | if (no_shared_cr3) | |
509 | kprintf("Kernel not sharing user map\n"); | |
510 | ||
511 | #ifdef PMAP_TRACES | |
512 | if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) { | |
513 | kprintf("Kernel traces for pmap operations enabled\n"); | |
514 | } | |
515 | #endif /* PMAP_TRACES */ | |
516 | } | |
517 | ||
518 | void | |
519 | pmap_virtual_space( | |
520 | vm_offset_t *startp, | |
521 | vm_offset_t *endp) | |
522 | { | |
523 | *startp = virtual_avail; | |
524 | *endp = virtual_end; | |
525 | } | |
526 | ||
39236c6e A |
527 | |
528 | ||
529 | ||
530 | #if HIBERNATION | |
531 | ||
532 | #include <IOKit/IOHibernatePrivate.h> | |
533 | ||
534 | int32_t pmap_npages; | |
535 | int32_t pmap_teardown_last_valid_compact_indx = -1; | |
536 | ||
537 | ||
538 | void hibernate_rebuild_pmap_structs(void); | |
539 | void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); | |
540 | void pmap_pack_index(uint32_t); | |
541 | int32_t pmap_unpack_index(pv_rooted_entry_t); | |
542 | ||
543 | ||
544 | int32_t | |
545 | pmap_unpack_index(pv_rooted_entry_t pv_h) | |
546 | { | |
547 | int32_t indx = 0; | |
548 | ||
549 | indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48); | |
550 | indx = indx << 16; | |
551 | indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48); | |
552 | ||
553 | *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48); | |
554 | *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48); | |
555 | ||
556 | return (indx); | |
557 | } | |
558 | ||
559 | ||
560 | void | |
561 | pmap_pack_index(uint32_t indx) | |
562 | { | |
563 | pv_rooted_entry_t pv_h; | |
564 | ||
565 | pv_h = &pv_head_table[indx]; | |
566 | ||
567 | *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48); | |
568 | *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48); | |
569 | ||
570 | *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48; | |
571 | *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48; | |
572 | } | |
573 | ||
574 | ||
575 | void | |
576 | hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end) | |
577 | { | |
578 | int32_t i; | |
579 | int32_t compact_target_indx; | |
580 | ||
581 | compact_target_indx = 0; | |
582 | ||
583 | for (i = 0; i < pmap_npages; i++) { | |
584 | if (pv_head_table[i].pmap == PMAP_NULL) { | |
585 | ||
586 | if (pv_head_table[compact_target_indx].pmap != PMAP_NULL) | |
587 | compact_target_indx = i; | |
588 | } else { | |
589 | pmap_pack_index((uint32_t)i); | |
590 | ||
591 | if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) { | |
592 | /* | |
593 | * we've got a hole to fill, so | |
594 | * move this pv_rooted_entry_t to it's new home | |
595 | */ | |
596 | pv_head_table[compact_target_indx] = pv_head_table[i]; | |
597 | pv_head_table[i].pmap = PMAP_NULL; | |
598 | ||
599 | pmap_teardown_last_valid_compact_indx = compact_target_indx; | |
600 | compact_target_indx++; | |
601 | } else | |
602 | pmap_teardown_last_valid_compact_indx = i; | |
603 | } | |
604 | } | |
605 | *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1]; | |
606 | *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1]; | |
607 | ||
608 | HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); | |
609 | } | |
610 | ||
611 | ||
612 | void | |
613 | hibernate_rebuild_pmap_structs(void) | |
614 | { | |
615 | int32_t cindx, eindx, rindx; | |
616 | pv_rooted_entry_t pv_h; | |
617 | ||
618 | eindx = (int32_t)pmap_npages; | |
619 | ||
620 | for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) { | |
621 | ||
622 | pv_h = &pv_head_table[cindx]; | |
623 | ||
624 | rindx = pmap_unpack_index(pv_h); | |
625 | assert(rindx < pmap_npages); | |
626 | ||
627 | if (rindx != cindx) { | |
628 | /* | |
629 | * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs, | |
630 | * so move it back to its real location | |
631 | */ | |
632 | pv_head_table[rindx] = pv_head_table[cindx]; | |
633 | } | |
634 | if (rindx+1 != eindx) { | |
635 | /* | |
636 | * the 'hole' between this vm_rooted_entry_t and the previous | |
637 | * vm_rooted_entry_t we moved needs to be initialized as | |
638 | * a range of zero'd vm_rooted_entry_t's | |
639 | */ | |
640 | bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry)); | |
641 | } | |
642 | eindx = rindx; | |
643 | } | |
644 | if (rindx) | |
645 | bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry)); | |
646 | ||
647 | HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); | |
648 | } | |
649 | ||
650 | #endif | |
651 | ||
b0d623f7 A |
652 | /* |
653 | * Initialize the pmap module. | |
654 | * Called by vm_init, to initialize any structures that the pmap | |
655 | * system needs to map virtual memory. | |
656 | */ | |
657 | void | |
658 | pmap_init(void) | |
659 | { | |
660 | long npages; | |
661 | vm_offset_t addr; | |
060df5ea | 662 | vm_size_t s, vsize; |
b0d623f7 A |
663 | vm_map_offset_t vaddr; |
664 | ppnum_t ppn; | |
665 | ||
666 | ||
667 | kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store; | |
39236c6e | 668 | _vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store); |
b0d623f7 A |
669 | |
670 | kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store; | |
39236c6e | 671 | _vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store); |
b0d623f7 A |
672 | |
673 | kernel_pmap->pm_obj = &kptobj_object_store; | |
39236c6e | 674 | _vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store); |
b0d623f7 A |
675 | |
676 | /* | |
677 | * Allocate memory for the pv_head_table and its lock bits, | |
678 | * the modify bit array, and the pte_page table. | |
679 | */ | |
680 | ||
681 | /* | |
682 | * zero bias all these arrays now instead of off avail_start | |
683 | * so we cover all memory | |
684 | */ | |
685 | ||
686 | npages = i386_btop(avail_end); | |
39236c6e A |
687 | #if HIBERNATION |
688 | pmap_npages = (uint32_t)npages; | |
689 | #endif | |
b0d623f7 | 690 | s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages |
fe8ab488 | 691 | + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets)) |
b0d623f7 | 692 | + pv_lock_table_size(npages) |
fe8ab488 | 693 | + pv_hash_lock_table_size((npvhashbuckets)) |
b0d623f7 | 694 | + npages); |
b0d623f7 A |
695 | s = round_page(s); |
696 | if (kernel_memory_allocate(kernel_map, &addr, s, 0, | |
3e170ce0 | 697 | KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP) |
b0d623f7 A |
698 | != KERN_SUCCESS) |
699 | panic("pmap_init"); | |
700 | ||
701 | memset((char *)addr, 0, s); | |
702 | ||
060df5ea A |
703 | vaddr = addr; |
704 | vsize = s; | |
705 | ||
b0d623f7 | 706 | #if PV_DEBUG |
fe8ab488 | 707 | if (0 == npvhashmask) panic("npvhashmask not initialized"); |
b0d623f7 A |
708 | #endif |
709 | ||
710 | /* | |
711 | * Allocate the structures first to preserve word-alignment. | |
712 | */ | |
713 | pv_head_table = (pv_rooted_entry_t) addr; | |
714 | addr = (vm_offset_t) (pv_head_table + npages); | |
715 | ||
716 | pv_hash_table = (pv_hashed_entry_t *)addr; | |
fe8ab488 | 717 | addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets)); |
b0d623f7 A |
718 | |
719 | pv_lock_table = (char *) addr; | |
720 | addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages)); | |
721 | ||
722 | pv_hash_lock_table = (char *) addr; | |
fe8ab488 | 723 | addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets))); |
b0d623f7 A |
724 | |
725 | pmap_phys_attributes = (char *) addr; | |
726 | ||
727 | ppnum_t last_pn = i386_btop(avail_end); | |
728 | unsigned int i; | |
729 | pmap_memory_region_t *pmptr = pmap_memory_regions; | |
730 | for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { | |
731 | if (pmptr->type != kEfiConventionalMemory) | |
732 | continue; | |
316670eb | 733 | ppnum_t pn; |
b0d623f7 A |
734 | for (pn = pmptr->base; pn <= pmptr->end; pn++) { |
735 | if (pn < last_pn) { | |
736 | pmap_phys_attributes[pn] |= PHYS_MANAGED; | |
060df5ea | 737 | |
b0d623f7 A |
738 | if (pn > last_managed_page) |
739 | last_managed_page = pn; | |
060df5ea | 740 | |
7ddcb079 | 741 | if (pn >= lowest_hi && pn <= highest_hi) |
060df5ea | 742 | pmap_phys_attributes[pn] |= PHYS_NOENCRYPT; |
b0d623f7 A |
743 | } |
744 | } | |
745 | } | |
060df5ea A |
746 | while (vsize) { |
747 | ppn = pmap_find_phys(kernel_pmap, vaddr); | |
b0d623f7 | 748 | |
060df5ea A |
749 | pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT; |
750 | ||
751 | vaddr += PAGE_SIZE; | |
752 | vsize -= PAGE_SIZE; | |
753 | } | |
b0d623f7 A |
754 | /* |
755 | * Create the zone of physical maps, | |
756 | * and of the physical-to-virtual entries. | |
757 | */ | |
758 | s = (vm_size_t) sizeof(struct pmap); | |
759 | pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */ | |
060df5ea A |
760 | zone_change(pmap_zone, Z_NOENCRYPT, TRUE); |
761 | ||
6d2010ae A |
762 | pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors"); |
763 | zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE); | |
764 | ||
6d2010ae | 765 | /* The anchor is required to be page aligned. Zone debugging adds |
316670eb A |
766 | * padding which may violate that requirement. Tell the zone |
767 | * subsystem that alignment is required. | |
6d2010ae | 768 | */ |
316670eb A |
769 | |
770 | zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE); | |
6d2010ae | 771 | |
b0d623f7 | 772 | s = (vm_size_t) sizeof(struct pv_hashed_entry); |
6d2010ae A |
773 | pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */, |
774 | 4096 * 3 /* LCM x86_64*/, "pv_list"); | |
060df5ea | 775 | zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE); |
b0d623f7 A |
776 | |
777 | /* create pv entries for kernel pages mapped by low level | |
778 | startup code. these have to exist so we can pmap_remove() | |
779 | e.g. kext pages from the middle of our addr space */ | |
780 | ||
781 | vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS; | |
6d2010ae | 782 | for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) { |
b0d623f7 A |
783 | pv_rooted_entry_t pv_e; |
784 | ||
785 | pv_e = pai_to_pvh(ppn); | |
786 | pv_e->va = vaddr; | |
787 | vaddr += PAGE_SIZE; | |
788 | pv_e->pmap = kernel_pmap; | |
789 | queue_init(&pv_e->qlink); | |
790 | } | |
791 | pmap_initialized = TRUE; | |
792 | ||
b0d623f7 A |
793 | max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t); |
794 | ||
795 | /* | |
796 | * Ensure the kernel's PML4 entry exists for the basement | |
797 | * before this is shared with any user. | |
798 | */ | |
316670eb | 799 | pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE); |
3e170ce0 A |
800 | |
801 | #if CONFIG_VMX | |
802 | pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE); | |
803 | #else | |
804 | pmap_ept_support_ad = FALSE; | |
805 | #endif /* CONFIG_VMX */ | |
316670eb A |
806 | } |
807 | ||
808 | static | |
809 | void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) { | |
810 | uint64_t ev = sv + nxrosz, cv = sv; | |
811 | pd_entry_t *pdep; | |
812 | pt_entry_t *ptep = NULL; | |
813 | ||
3e170ce0 A |
814 | assert(!is_ept_pmap(npmap)); |
815 | ||
316670eb A |
816 | assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0); |
817 | ||
818 | for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) { | |
819 | uint64_t pdev = (cv & ~((uint64_t)PDEMASK)); | |
820 | ||
821 | if (*pdep & INTEL_PTE_PS) { | |
822 | if (NX) | |
823 | *pdep |= INTEL_PTE_NX; | |
824 | if (ro) | |
825 | *pdep &= ~INTEL_PTE_WRITE; | |
826 | cv += NBPD; | |
827 | cv &= ~((uint64_t) PDEMASK); | |
828 | pdep = pmap_pde(npmap, cv); | |
829 | continue; | |
830 | } | |
831 | ||
832 | for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) { | |
833 | if (NX) | |
834 | *ptep |= INTEL_PTE_NX; | |
835 | if (ro) | |
836 | *ptep &= ~INTEL_PTE_WRITE; | |
837 | cv += NBPT; | |
838 | ptep = pmap_pte(npmap, cv); | |
839 | } | |
840 | } | |
841 | DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0); | |
b0d623f7 A |
842 | } |
843 | ||
6d2010ae A |
844 | /* |
845 | * Called once VM is fully initialized so that we can release unused | |
846 | * sections of low memory to the general pool. | |
847 | * Also complete the set-up of identity-mapped sections of the kernel: | |
848 | * 1) write-protect kernel text | |
849 | * 2) map kernel text using large pages if possible | |
850 | * 3) read and write-protect page zero (for K32) | |
851 | * 4) map the global page at the appropriate virtual address. | |
852 | * | |
853 | * Use of large pages | |
854 | * ------------------ | |
855 | * To effectively map and write-protect all kernel text pages, the text | |
856 | * must be 2M-aligned at the base, and the data section above must also be | |
857 | * 2M-aligned. That is, there's padding below and above. This is achieved | |
858 | * through linker directives. Large pages are used only if this alignment | |
859 | * exists (and not overriden by the -kernel_text_page_4K boot-arg). The | |
860 | * memory layout is: | |
861 | * | |
862 | * : : | |
863 | * | __DATA | | |
864 | * sdata: ================== 2Meg | |
865 | * | | | |
866 | * | zero-padding | | |
867 | * | | | |
868 | * etext: ------------------ | |
869 | * | | | |
870 | * : : | |
871 | * | | | |
872 | * | __TEXT | | |
873 | * | | | |
874 | * : : | |
875 | * | | | |
876 | * stext: ================== 2Meg | |
877 | * | | | |
878 | * | zero-padding | | |
879 | * | | | |
880 | * eHIB: ------------------ | |
881 | * | __HIB | | |
882 | * : : | |
883 | * | |
884 | * Prior to changing the mapping from 4K to 2M, the zero-padding pages | |
885 | * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the | |
886 | * 4K pages covering [stext,etext] are coalesced as 2M large pages. | |
887 | * The now unused level-1 PTE pages are also freed. | |
888 | */ | |
316670eb | 889 | extern ppnum_t vm_kernel_base_page; |
6d2010ae A |
890 | void |
891 | pmap_lowmem_finalize(void) | |
892 | { | |
893 | spl_t spl; | |
894 | int i; | |
895 | ||
6d2010ae A |
896 | /* |
897 | * Update wired memory statistics for early boot pages | |
898 | */ | |
316670eb | 899 | PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE); |
6d2010ae A |
900 | |
901 | /* | |
316670eb | 902 | * Free pages in pmap regions below the base: |
6d2010ae A |
903 | * rdar://6332712 |
904 | * We can't free all the pages to VM that EFI reports available. | |
905 | * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake. | |
906 | * There's also a size miscalculation here: pend is one page less | |
907 | * than it should be but this is not fixed to be backwards | |
908 | * compatible. | |
316670eb A |
909 | * This is important for KASLR because up to 256*2MB = 512MB of space |
910 | * needs has to be released to VM. | |
6d2010ae A |
911 | */ |
912 | for (i = 0; | |
316670eb | 913 | pmap_memory_regions[i].end < vm_kernel_base_page; |
6d2010ae | 914 | i++) { |
316670eb A |
915 | vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base); |
916 | vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1); | |
6d2010ae | 917 | |
316670eb A |
918 | DBG("pmap region %d [%p..[%p\n", |
919 | i, (void *) pbase, (void *) pend); | |
920 | ||
921 | if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED) | |
922 | continue; | |
923 | /* | |
924 | * rdar://6332712 | |
925 | * Adjust limits not to free pages in range 0xc0000-0xff000. | |
926 | */ | |
927 | if (pbase >= 0xc0000 && pend <= 0x100000) | |
928 | continue; | |
929 | if (pbase < 0xc0000 && pend > 0x100000) { | |
930 | /* page range entirely within region, free lower part */ | |
931 | DBG("- ml_static_mfree(%p,%p)\n", | |
932 | (void *) ml_static_ptovirt(pbase), | |
933 | (void *) (0xc0000-pbase)); | |
934 | ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase); | |
935 | pbase = 0x100000; | |
936 | } | |
937 | if (pbase < 0xc0000) | |
938 | pend = MIN(pend, 0xc0000); | |
939 | if (pend > 0x100000) | |
940 | pbase = MAX(pbase, 0x100000); | |
941 | DBG("- ml_static_mfree(%p,%p)\n", | |
6d2010ae | 942 | (void *) ml_static_ptovirt(pbase), |
316670eb | 943 | (void *) (pend - pbase)); |
6d2010ae A |
944 | ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase); |
945 | } | |
946 | ||
316670eb A |
947 | /* A final pass to get rid of all initial identity mappings to |
948 | * low pages. | |
949 | */ | |
950 | DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base); | |
951 | ||
143464d5 A |
952 | /* |
953 | * Remove all mappings past the boot-cpu descriptor aliases and low globals. | |
954 | * Non-boot-cpu GDT aliases will be remapped later as needed. | |
955 | */ | |
316670eb A |
956 | pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base); |
957 | ||
6d2010ae A |
958 | /* |
959 | * If text and data are both 2MB-aligned, | |
960 | * we can map text with large-pages, | |
961 | * unless the -kernel_text_ps_4K boot-arg overrides. | |
962 | */ | |
963 | if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) { | |
964 | kprintf("Kernel text is 2MB aligned"); | |
965 | kernel_text_ps_4K = FALSE; | |
966 | if (PE_parse_boot_argn("-kernel_text_ps_4K", | |
967 | &kernel_text_ps_4K, | |
968 | sizeof (kernel_text_ps_4K))) | |
969 | kprintf(" but will be mapped with 4K pages\n"); | |
970 | else | |
971 | kprintf(" and will be mapped with 2M pages\n"); | |
972 | } | |
973 | ||
974 | (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel)); | |
975 | if (wpkernel) | |
976 | kprintf("Kernel text %p-%p to be write-protected\n", | |
977 | (void *) stext, (void *) etext); | |
978 | ||
979 | spl = splhigh(); | |
980 | ||
981 | /* | |
982 | * Scan over text if mappings are to be changed: | |
983 | * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0 | |
984 | * - Change to large-pages if possible and not overriden. | |
985 | */ | |
986 | if (kernel_text_ps_4K && wpkernel) { | |
987 | vm_offset_t myva; | |
988 | for (myva = stext; myva < etext; myva += PAGE_SIZE) { | |
989 | pt_entry_t *ptep; | |
990 | ||
991 | ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva); | |
992 | if (ptep) | |
316670eb | 993 | pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE); |
6d2010ae A |
994 | } |
995 | } | |
996 | ||
997 | if (!kernel_text_ps_4K) { | |
998 | vm_offset_t myva; | |
999 | ||
1000 | /* | |
1001 | * Release zero-filled page padding used for 2M-alignment. | |
1002 | */ | |
1003 | DBG("ml_static_mfree(%p,%p) for padding below text\n", | |
1004 | (void *) eHIB, (void *) (stext - eHIB)); | |
1005 | ml_static_mfree(eHIB, stext - eHIB); | |
1006 | DBG("ml_static_mfree(%p,%p) for padding above text\n", | |
1007 | (void *) etext, (void *) (sdata - etext)); | |
1008 | ml_static_mfree(etext, sdata - etext); | |
1009 | ||
1010 | /* | |
1011 | * Coalesce text pages into large pages. | |
1012 | */ | |
1013 | for (myva = stext; myva < sdata; myva += I386_LPGBYTES) { | |
1014 | pt_entry_t *ptep; | |
1015 | vm_offset_t pte_phys; | |
1016 | pt_entry_t *pdep; | |
1017 | pt_entry_t pde; | |
1018 | ||
1019 | pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva); | |
1020 | ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva); | |
1021 | DBG("myva: %p pdep: %p ptep: %p\n", | |
1022 | (void *) myva, (void *) pdep, (void *) ptep); | |
1023 | if ((*ptep & INTEL_PTE_VALID) == 0) | |
1024 | continue; | |
1025 | pte_phys = (vm_offset_t)(*ptep & PG_FRAME); | |
1026 | pde = *pdep & PTMASK; /* page attributes from pde */ | |
1027 | pde |= INTEL_PTE_PS; /* make it a 2M entry */ | |
1028 | pde |= pte_phys; /* take page frame from pte */ | |
1029 | ||
1030 | if (wpkernel) | |
316670eb | 1031 | pde &= ~INTEL_PTE_WRITE; |
6d2010ae A |
1032 | DBG("pmap_store_pte(%p,0x%llx)\n", |
1033 | (void *)pdep, pde); | |
1034 | pmap_store_pte(pdep, pde); | |
1035 | ||
1036 | /* | |
1037 | * Free the now-unused level-1 pte. | |
1038 | * Note: ptep is a virtual address to the pte in the | |
1039 | * recursive map. We can't use this address to free | |
1040 | * the page. Instead we need to compute its address | |
1041 | * in the Idle PTEs in "low memory". | |
1042 | */ | |
1043 | vm_offset_t vm_ptep = (vm_offset_t) KPTphys | |
1044 | + (pte_phys >> PTPGSHIFT); | |
1045 | DBG("ml_static_mfree(%p,0x%x) for pte\n", | |
1046 | (void *) vm_ptep, PAGE_SIZE); | |
1047 | ml_static_mfree(vm_ptep, PAGE_SIZE); | |
1048 | } | |
1049 | ||
1050 | /* Change variable read by sysctl machdep.pmap */ | |
1051 | pmap_kernel_text_ps = I386_LPGBYTES; | |
1052 | } | |
1053 | ||
316670eb A |
1054 | boolean_t doconstro = TRUE; |
1055 | ||
1056 | (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro)); | |
1057 | ||
1058 | if ((sconstdata | econstdata) & PAGE_MASK) { | |
1059 | kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata); | |
1060 | if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE)) | |
1061 | doconstro = FALSE; | |
1062 | } | |
1063 | ||
1064 | if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) { | |
1065 | kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata); | |
1066 | doconstro = FALSE; | |
1067 | } | |
1068 | ||
1069 | if (doconstro) | |
1070 | kprintf("Marking const DATA read-only\n"); | |
1071 | ||
1072 | vm_offset_t dva; | |
1073 | ||
1074 | for (dva = sdata; dva < edata; dva += I386_PGBYTES) { | |
1075 | assert(((sdata | edata) & PAGE_MASK) == 0); | |
1076 | if ( (sdata | edata) & PAGE_MASK) { | |
1077 | kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata); | |
1078 | break; | |
1079 | } | |
1080 | ||
1081 | pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva); | |
1082 | ||
1083 | dpte = *dptep; | |
1084 | ||
1085 | assert((dpte & INTEL_PTE_VALID)); | |
1086 | if ((dpte & INTEL_PTE_VALID) == 0) { | |
1087 | kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata); | |
1088 | continue; | |
1089 | } | |
1090 | ||
1091 | dpte |= INTEL_PTE_NX; | |
1092 | if (doconstro && (dva >= sconstdata) && (dva < econstdata)) { | |
1093 | dpte &= ~INTEL_PTE_WRITE; | |
1094 | } | |
1095 | pmap_store_pte(dptep, dpte); | |
1096 | } | |
1097 | kernel_segment_command_t * seg; | |
1098 | kernel_section_t * sec; | |
1099 | ||
1100 | for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) { | |
1101 | if (!strcmp(seg->segname, "__TEXT") || | |
1102 | !strcmp(seg->segname, "__DATA")) { | |
1103 | continue; | |
1104 | } | |
1105 | //XXX | |
1106 | if (!strcmp(seg->segname, "__KLD")) { | |
1107 | continue; | |
1108 | } | |
1109 | if (!strcmp(seg->segname, "__HIB")) { | |
1110 | for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { | |
1111 | if (sec->addr & PAGE_MASK) | |
1112 | panic("__HIB segment's sections misaligned"); | |
1113 | if (!strcmp(sec->sectname, "__text")) { | |
1114 | pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE); | |
1115 | } else { | |
1116 | pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE); | |
1117 | } | |
1118 | } | |
1119 | } else { | |
1120 | pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE); | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * If we're debugging, map the low global vector page at the fixed | |
1126 | * virtual address. Otherwise, remove the mapping for this. | |
1127 | */ | |
1128 | if (debug_boot_arg) { | |
1129 | pt_entry_t *pte = NULL; | |
1130 | if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS))) | |
1131 | panic("lowmem pte"); | |
1132 | /* make sure it is defined on page boundary */ | |
1133 | assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK)); | |
1134 | pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo) | |
1135 | | INTEL_PTE_REF | |
1136 | | INTEL_PTE_MOD | |
1137 | | INTEL_PTE_WIRED | |
1138 | | INTEL_PTE_VALID | |
1139 | | INTEL_PTE_WRITE | |
1140 | | INTEL_PTE_NX); | |
1141 | } else { | |
1142 | pmap_remove(kernel_pmap, | |
1143 | LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE); | |
1144 | } | |
1145 | ||
6d2010ae A |
1146 | splx(spl); |
1147 | if (pmap_pcid_ncpus) | |
1148 | tlb_flush_global(); | |
1149 | else | |
1150 | flush_tlb_raw(); | |
1151 | } | |
b0d623f7 A |
1152 | |
1153 | /* | |
1154 | * this function is only used for debugging fron the vm layer | |
1155 | */ | |
1156 | boolean_t | |
1157 | pmap_verify_free( | |
1158 | ppnum_t pn) | |
1159 | { | |
1160 | pv_rooted_entry_t pv_h; | |
1161 | int pai; | |
1162 | boolean_t result; | |
1163 | ||
1164 | assert(pn != vm_page_fictitious_addr); | |
1165 | ||
1166 | if (!pmap_initialized) | |
1167 | return(TRUE); | |
1168 | ||
1169 | if (pn == vm_page_guard_addr) | |
1170 | return TRUE; | |
1171 | ||
1172 | pai = ppn_to_pai(pn); | |
1173 | if (!IS_MANAGED_PAGE(pai)) | |
1174 | return(FALSE); | |
1175 | pv_h = pai_to_pvh(pn); | |
1176 | result = (pv_h->pmap == PMAP_NULL); | |
1177 | return(result); | |
1178 | } | |
1179 | ||
1180 | boolean_t | |
1181 | pmap_is_empty( | |
1182 | pmap_t pmap, | |
1183 | vm_map_offset_t va_start, | |
1184 | vm_map_offset_t va_end) | |
1185 | { | |
1186 | vm_map_offset_t offset; | |
1187 | ppnum_t phys_page; | |
1188 | ||
1189 | if (pmap == PMAP_NULL) { | |
1190 | return TRUE; | |
1191 | } | |
1192 | ||
1193 | /* | |
1194 | * Check the resident page count | |
1195 | * - if it's zero, the pmap is completely empty. | |
1196 | * This short-circuit test prevents a virtual address scan which is | |
1197 | * painfully slow for 64-bit spaces. | |
1198 | * This assumes the count is correct | |
1199 | * .. the debug kernel ought to be checking perhaps by page table walk. | |
1200 | */ | |
1201 | if (pmap->stats.resident_count == 0) | |
1202 | return TRUE; | |
1203 | ||
1204 | for (offset = va_start; | |
1205 | offset < va_end; | |
1206 | offset += PAGE_SIZE_64) { | |
1207 | phys_page = pmap_find_phys(pmap, offset); | |
1208 | if (phys_page) { | |
1209 | kprintf("pmap_is_empty(%p,0x%llx,0x%llx): " | |
1210 | "page %d at 0x%llx\n", | |
1211 | pmap, va_start, va_end, phys_page, offset); | |
1212 | return FALSE; | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | return TRUE; | |
1217 | } | |
1218 | ||
3e170ce0 A |
1219 | void |
1220 | hv_ept_pmap_create(void **ept_pmap, void **eptp) | |
1221 | { | |
1222 | pmap_t p; | |
1223 | ||
1224 | if ((ept_pmap == NULL) || (eptp == NULL)) { | |
1225 | return; | |
1226 | } | |
1227 | ||
1228 | p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)); | |
1229 | if (p == PMAP_NULL) { | |
1230 | *ept_pmap = NULL; | |
1231 | *eptp = NULL; | |
1232 | return; | |
1233 | } | |
1234 | ||
1235 | assert(is_ept_pmap(p)); | |
1236 | ||
1237 | *ept_pmap = (void*)p; | |
1238 | *eptp = (void*)(p->pm_eptp); | |
1239 | return; | |
1240 | } | |
b0d623f7 A |
1241 | |
1242 | /* | |
1243 | * Create and return a physical map. | |
1244 | * | |
1245 | * If the size specified for the map | |
1246 | * is zero, the map is an actual physical | |
1247 | * map, and may be referenced by the | |
1248 | * hardware. | |
1249 | * | |
1250 | * If the size specified is non-zero, | |
1251 | * the map will be used in software only, and | |
1252 | * is bounded by that size. | |
1253 | */ | |
1254 | pmap_t | |
3e170ce0 A |
1255 | pmap_create_options( |
1256 | ledger_t ledger, | |
1257 | vm_map_size_t sz, | |
1258 | int flags) | |
b0d623f7 A |
1259 | { |
1260 | pmap_t p; | |
1261 | vm_size_t size; | |
1262 | pml4_entry_t *pml4; | |
1263 | pml4_entry_t *kpml4; | |
1264 | ||
1265 | PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, | |
3e170ce0 | 1266 | (uint32_t) (sz>>32), (uint32_t) sz, flags, 0, 0); |
b0d623f7 A |
1267 | |
1268 | size = (vm_size_t) sz; | |
1269 | ||
1270 | /* | |
1271 | * A software use-only map doesn't even need a map. | |
1272 | */ | |
1273 | ||
1274 | if (size != 0) { | |
1275 | return(PMAP_NULL); | |
1276 | } | |
1277 | ||
3e170ce0 A |
1278 | /* |
1279 | * Return error when unrecognized flags are passed. | |
1280 | */ | |
1281 | if ((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0) { | |
1282 | return(PMAP_NULL); | |
1283 | } | |
1284 | ||
b0d623f7 A |
1285 | p = (pmap_t) zalloc(pmap_zone); |
1286 | if (PMAP_NULL == p) | |
1287 | panic("pmap_create zalloc"); | |
6d2010ae A |
1288 | /* Zero all fields */ |
1289 | bzero(p, sizeof(*p)); | |
b0d623f7 A |
1290 | /* init counts now since we'll be bumping some */ |
1291 | simple_lock_init(&p->lock, 0); | |
39236c6e | 1292 | #if 00 |
b0d623f7 A |
1293 | p->stats.resident_count = 0; |
1294 | p->stats.resident_max = 0; | |
1295 | p->stats.wired_count = 0; | |
39236c6e A |
1296 | #else |
1297 | bzero(&p->stats, sizeof (p->stats)); | |
1298 | #endif | |
b0d623f7 A |
1299 | p->ref_count = 1; |
1300 | p->nx_enabled = 1; | |
1301 | p->pm_shared = FALSE; | |
316670eb A |
1302 | ledger_reference(ledger); |
1303 | p->ledger = ledger; | |
b0d623f7 | 1304 | |
3e170ce0 | 1305 | p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT); |
6d2010ae A |
1306 | if (pmap_pcid_ncpus) |
1307 | pmap_pcid_initialize(p); | |
316670eb | 1308 | |
6d2010ae | 1309 | p->pm_pml4 = zalloc(pmap_anchor_zone); |
b0d623f7 | 1310 | |
6d2010ae | 1311 | pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0); |
b0d623f7 | 1312 | |
6d2010ae | 1313 | memset((char *)p->pm_pml4, 0, PAGE_SIZE); |
b0d623f7 | 1314 | |
3e170ce0 A |
1315 | if (flags & PMAP_CREATE_EPT) { |
1316 | p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4); | |
1317 | p->pm_cr3 = 0; | |
1318 | } else { | |
1319 | p->pm_eptp = 0; | |
1320 | p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4); | |
1321 | } | |
b0d623f7 A |
1322 | |
1323 | /* allocate the vm_objs to hold the pdpt, pde and pte pages */ | |
1324 | ||
39236c6e | 1325 | p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE); |
b0d623f7 A |
1326 | if (NULL == p->pm_obj_pml4) |
1327 | panic("pmap_create pdpt obj"); | |
1328 | ||
39236c6e | 1329 | p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE); |
b0d623f7 A |
1330 | if (NULL == p->pm_obj_pdpt) |
1331 | panic("pmap_create pdpt obj"); | |
1332 | ||
39236c6e | 1333 | p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE); |
b0d623f7 A |
1334 | if (NULL == p->pm_obj) |
1335 | panic("pmap_create pte obj"); | |
1336 | ||
6d2010ae | 1337 | /* All pmaps share the kernel's pml4 */ |
b0d623f7 A |
1338 | pml4 = pmap64_pml4(p, 0ULL); |
1339 | kpml4 = kernel_pmap->pm_pml4; | |
1340 | pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX]; | |
1341 | pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX]; | |
316670eb | 1342 | pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX]; |
b0d623f7 A |
1343 | |
1344 | PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, | |
3e170ce0 | 1345 | p, flags, 0, 0, 0); |
b0d623f7 A |
1346 | |
1347 | return(p); | |
1348 | } | |
1349 | ||
3e170ce0 A |
1350 | pmap_t |
1351 | pmap_create( | |
1352 | ledger_t ledger, | |
1353 | vm_map_size_t sz, | |
1354 | boolean_t is_64bit) | |
1355 | { | |
1356 | return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0)); | |
1357 | } | |
1358 | ||
b0d623f7 A |
1359 | /* |
1360 | * Retire the given physical map from service. | |
1361 | * Should only be called if the map contains | |
1362 | * no valid mappings. | |
1363 | */ | |
3e170ce0 | 1364 | extern int vm_wired_objects_page_count; |
b0d623f7 A |
1365 | |
1366 | void | |
6d2010ae | 1367 | pmap_destroy(pmap_t p) |
b0d623f7 | 1368 | { |
6d2010ae | 1369 | int c; |
b0d623f7 A |
1370 | |
1371 | if (p == PMAP_NULL) | |
1372 | return; | |
1373 | ||
1374 | PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START, | |
1375 | p, 0, 0, 0, 0); | |
1376 | ||
1377 | PMAP_LOCK(p); | |
1378 | ||
1379 | c = --p->ref_count; | |
1380 | ||
6d2010ae A |
1381 | pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE); |
1382 | ||
b0d623f7 A |
1383 | if (c == 0) { |
1384 | /* | |
1385 | * If some cpu is not using the physical pmap pointer that it | |
1386 | * is supposed to be (see set_dirbase), we might be using the | |
1387 | * pmap that is being destroyed! Make sure we are | |
1388 | * physically on the right pmap: | |
1389 | */ | |
1390 | PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL); | |
ebb1b9f4 A |
1391 | if (pmap_pcid_ncpus) |
1392 | pmap_destroy_pcid_sync(p); | |
b0d623f7 | 1393 | } |
ebb1b9f4 | 1394 | |
b0d623f7 A |
1395 | PMAP_UNLOCK(p); |
1396 | ||
1397 | if (c != 0) { | |
1398 | PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END, | |
1399 | p, 1, 0, 0, 0); | |
6d2010ae | 1400 | pmap_assert(p == kernel_pmap); |
b0d623f7 A |
1401 | return; /* still in use */ |
1402 | } | |
1403 | ||
1404 | /* | |
1405 | * Free the memory maps, then the | |
1406 | * pmap structure. | |
1407 | */ | |
1408 | int inuse_ptepages = 0; | |
1409 | ||
6d2010ae | 1410 | zfree(pmap_anchor_zone, p->pm_pml4); |
b0d623f7 A |
1411 | |
1412 | inuse_ptepages += p->pm_obj_pml4->resident_page_count; | |
1413 | vm_object_deallocate(p->pm_obj_pml4); | |
1414 | ||
1415 | inuse_ptepages += p->pm_obj_pdpt->resident_page_count; | |
1416 | vm_object_deallocate(p->pm_obj_pdpt); | |
1417 | ||
1418 | inuse_ptepages += p->pm_obj->resident_page_count; | |
1419 | vm_object_deallocate(p->pm_obj); | |
1420 | ||
1421 | OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count); | |
316670eb A |
1422 | PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE); |
1423 | ledger_dereference(p->ledger); | |
b0d623f7 A |
1424 | zfree(pmap_zone, p); |
1425 | ||
1426 | PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END, | |
1427 | 0, 0, 0, 0, 0); | |
1428 | } | |
1429 | ||
1430 | /* | |
1431 | * Add a reference to the specified pmap. | |
1432 | */ | |
1433 | ||
1434 | void | |
1435 | pmap_reference(pmap_t p) | |
1436 | { | |
1437 | if (p != PMAP_NULL) { | |
1438 | PMAP_LOCK(p); | |
1439 | p->ref_count++; | |
1440 | PMAP_UNLOCK(p);; | |
1441 | } | |
1442 | } | |
1443 | ||
b0d623f7 A |
1444 | /* |
1445 | * Remove phys addr if mapped in specified map | |
1446 | * | |
1447 | */ | |
1448 | void | |
1449 | pmap_remove_some_phys( | |
1450 | __unused pmap_t map, | |
1451 | __unused ppnum_t pn) | |
1452 | { | |
1453 | ||
1454 | /* Implement to support working set code */ | |
1455 | ||
1456 | } | |
1457 | ||
39236c6e A |
1458 | |
1459 | void | |
1460 | pmap_protect( | |
1461 | pmap_t map, | |
1462 | vm_map_offset_t sva, | |
1463 | vm_map_offset_t eva, | |
1464 | vm_prot_t prot) | |
1465 | { | |
1466 | pmap_protect_options(map, sva, eva, prot, 0, NULL); | |
1467 | } | |
1468 | ||
1469 | ||
b0d623f7 A |
1470 | /* |
1471 | * Set the physical protection on the | |
1472 | * specified range of this map as requested. | |
1473 | * Will not increase permissions. | |
1474 | */ | |
1475 | void | |
39236c6e | 1476 | pmap_protect_options( |
b0d623f7 A |
1477 | pmap_t map, |
1478 | vm_map_offset_t sva, | |
1479 | vm_map_offset_t eva, | |
39236c6e A |
1480 | vm_prot_t prot, |
1481 | unsigned int options, | |
1482 | void *arg) | |
b0d623f7 A |
1483 | { |
1484 | pt_entry_t *pde; | |
1485 | pt_entry_t *spte, *epte; | |
1486 | vm_map_offset_t lva; | |
1487 | vm_map_offset_t orig_sva; | |
1488 | boolean_t set_NX; | |
1489 | int num_found = 0; | |
3e170ce0 | 1490 | boolean_t is_ept; |
b0d623f7 A |
1491 | |
1492 | pmap_intr_assert(); | |
1493 | ||
1494 | if (map == PMAP_NULL) | |
1495 | return; | |
1496 | ||
1497 | if (prot == VM_PROT_NONE) { | |
39236c6e | 1498 | pmap_remove_options(map, sva, eva, options); |
b0d623f7 A |
1499 | return; |
1500 | } | |
1501 | PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START, | |
1502 | map, | |
1503 | (uint32_t) (sva >> 32), (uint32_t) sva, | |
1504 | (uint32_t) (eva >> 32), (uint32_t) eva); | |
1505 | ||
1506 | if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled) | |
1507 | set_NX = FALSE; | |
1508 | else | |
1509 | set_NX = TRUE; | |
1510 | ||
3e170ce0 A |
1511 | is_ept = is_ept_pmap(map); |
1512 | ||
1513 | ||
b0d623f7 A |
1514 | PMAP_LOCK(map); |
1515 | ||
1516 | orig_sva = sva; | |
1517 | while (sva < eva) { | |
1518 | lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1); | |
1519 | if (lva > eva) | |
1520 | lva = eva; | |
1521 | pde = pmap_pde(map, sva); | |
3e170ce0 A |
1522 | if (pde && (*pde & PTE_VALID_MASK(is_ept))) { |
1523 | if (*pde & PTE_PS) { | |
b0d623f7 A |
1524 | /* superpage */ |
1525 | spte = pde; | |
1526 | epte = spte+1; /* excluded */ | |
1527 | } else { | |
1528 | spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1))); | |
1529 | spte = &spte[ptenum(sva)]; | |
1530 | epte = &spte[intel_btop(lva - sva)]; | |
1531 | } | |
1532 | ||
1533 | for (; spte < epte; spte++) { | |
3e170ce0 | 1534 | if (!(*spte & PTE_VALID_MASK(is_ept))) |
b0d623f7 A |
1535 | continue; |
1536 | ||
3e170ce0 A |
1537 | if (is_ept) { |
1538 | if (prot & VM_PROT_READ) | |
1539 | pmap_update_pte(spte, 0, PTE_READ(is_ept)); | |
1540 | else | |
1541 | pmap_update_pte(spte, PTE_READ(is_ept), 0); | |
1542 | } | |
b0d623f7 | 1543 | if (prot & VM_PROT_WRITE) |
3e170ce0 | 1544 | pmap_update_pte(spte, 0, PTE_WRITE(is_ept)); |
b0d623f7 | 1545 | else |
3e170ce0 | 1546 | pmap_update_pte(spte, PTE_WRITE(is_ept), 0); |
b0d623f7 | 1547 | |
3e170ce0 A |
1548 | if (set_NX) { |
1549 | if (!is_ept) | |
1550 | pmap_update_pte(spte, 0, INTEL_PTE_NX); | |
1551 | else | |
1552 | pmap_update_pte(spte, INTEL_EPT_EX, 0); | |
1553 | } else { | |
1554 | if (!is_ept) | |
1555 | pmap_update_pte(spte, INTEL_PTE_NX, 0); | |
1556 | else | |
1557 | pmap_update_pte(spte, 0, INTEL_EPT_EX); | |
1558 | } | |
b0d623f7 A |
1559 | num_found++; |
1560 | } | |
1561 | } | |
1562 | sva = lva; | |
1563 | } | |
39236c6e A |
1564 | if (num_found) { |
1565 | if (options & PMAP_OPTIONS_NOFLUSH) | |
1566 | PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg); | |
1567 | else | |
1568 | PMAP_UPDATE_TLBS(map, orig_sva, eva); | |
1569 | } | |
b0d623f7 A |
1570 | PMAP_UNLOCK(map); |
1571 | ||
1572 | PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END, | |
1573 | 0, 0, 0, 0, 0); | |
1574 | ||
1575 | } | |
1576 | ||
1577 | /* Map a (possibly) autogenned block */ | |
1578 | void | |
1579 | pmap_map_block( | |
1580 | pmap_t pmap, | |
1581 | addr64_t va, | |
1582 | ppnum_t pa, | |
1583 | uint32_t size, | |
1584 | vm_prot_t prot, | |
1585 | int attr, | |
1586 | __unused unsigned int flags) | |
1587 | { | |
1588 | uint32_t page; | |
1589 | int cur_page_size; | |
1590 | ||
1591 | if (attr & VM_MEM_SUPERPAGE) | |
1592 | cur_page_size = SUPERPAGE_SIZE; | |
1593 | else | |
1594 | cur_page_size = PAGE_SIZE; | |
1595 | ||
1596 | for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) { | |
316670eb | 1597 | pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE); |
b0d623f7 A |
1598 | va += cur_page_size; |
1599 | pa+=cur_page_size/PAGE_SIZE; | |
1600 | } | |
1601 | } | |
1602 | ||
316670eb | 1603 | kern_return_t |
b0d623f7 A |
1604 | pmap_expand_pml4( |
1605 | pmap_t map, | |
316670eb A |
1606 | vm_map_offset_t vaddr, |
1607 | unsigned int options) | |
b0d623f7 A |
1608 | { |
1609 | vm_page_t m; | |
1610 | pmap_paddr_t pa; | |
1611 | uint64_t i; | |
1612 | ppnum_t pn; | |
1613 | pml4_entry_t *pml4p; | |
3e170ce0 | 1614 | boolean_t is_ept = is_ept_pmap(map); |
b0d623f7 A |
1615 | |
1616 | DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr); | |
1617 | ||
1618 | /* | |
1619 | * Allocate a VM page for the pml4 page | |
1620 | */ | |
316670eb A |
1621 | while ((m = vm_page_grab()) == VM_PAGE_NULL) { |
1622 | if (options & PMAP_EXPAND_OPTIONS_NOWAIT) | |
1623 | return KERN_RESOURCE_SHORTAGE; | |
b0d623f7 | 1624 | VM_PAGE_WAIT(); |
316670eb | 1625 | } |
b0d623f7 A |
1626 | /* |
1627 | * put the page into the pmap's obj list so it | |
1628 | * can be found later. | |
1629 | */ | |
1630 | pn = m->phys_page; | |
1631 | pa = i386_ptob(pn); | |
1632 | i = pml4idx(map, vaddr); | |
1633 | ||
1634 | /* | |
1635 | * Zero the page. | |
1636 | */ | |
1637 | pmap_zero_page(pn); | |
1638 | ||
1639 | vm_page_lockspin_queues(); | |
3e170ce0 | 1640 | vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); |
b0d623f7 A |
1641 | vm_page_unlock_queues(); |
1642 | ||
1643 | OSAddAtomic(1, &inuse_ptepages_count); | |
6d2010ae | 1644 | OSAddAtomic64(1, &alloc_ptepages_count); |
316670eb | 1645 | PMAP_ZINFO_PALLOC(map, PAGE_SIZE); |
b0d623f7 A |
1646 | |
1647 | /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ | |
1648 | vm_object_lock(map->pm_obj_pml4); | |
1649 | ||
1650 | PMAP_LOCK(map); | |
1651 | /* | |
1652 | * See if someone else expanded us first | |
1653 | */ | |
1654 | if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) { | |
1655 | PMAP_UNLOCK(map); | |
1656 | vm_object_unlock(map->pm_obj_pml4); | |
1657 | ||
1658 | VM_PAGE_FREE(m); | |
1659 | ||
1660 | OSAddAtomic(-1, &inuse_ptepages_count); | |
316670eb A |
1661 | PMAP_ZINFO_PFREE(map, PAGE_SIZE); |
1662 | return KERN_SUCCESS; | |
b0d623f7 A |
1663 | } |
1664 | ||
1665 | #if 0 /* DEBUG */ | |
39236c6e | 1666 | if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) { |
b0d623f7 A |
1667 | panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", |
1668 | map, map->pm_obj_pml4, vaddr, i); | |
1669 | } | |
1670 | #endif | |
3e170ce0 | 1671 | vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); |
b0d623f7 A |
1672 | vm_object_unlock(map->pm_obj_pml4); |
1673 | ||
1674 | /* | |
1675 | * Set the page directory entry for this page table. | |
1676 | */ | |
1677 | pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */ | |
1678 | ||
1679 | pmap_store_pte(pml4p, pa_to_pte(pa) | |
3e170ce0 A |
1680 | | PTE_READ(is_ept) |
1681 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1682 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1683 | |
1684 | PMAP_UNLOCK(map); | |
1685 | ||
316670eb | 1686 | return KERN_SUCCESS; |
b0d623f7 A |
1687 | } |
1688 | ||
316670eb A |
1689 | kern_return_t |
1690 | pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options) | |
b0d623f7 A |
1691 | { |
1692 | vm_page_t m; | |
1693 | pmap_paddr_t pa; | |
1694 | uint64_t i; | |
1695 | ppnum_t pn; | |
1696 | pdpt_entry_t *pdptp; | |
3e170ce0 | 1697 | boolean_t is_ept = is_ept_pmap(map); |
b0d623f7 A |
1698 | |
1699 | DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr); | |
1700 | ||
1701 | while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) { | |
316670eb A |
1702 | kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options); |
1703 | if (pep4kr != KERN_SUCCESS) | |
1704 | return pep4kr; | |
b0d623f7 A |
1705 | } |
1706 | ||
1707 | /* | |
1708 | * Allocate a VM page for the pdpt page | |
1709 | */ | |
316670eb A |
1710 | while ((m = vm_page_grab()) == VM_PAGE_NULL) { |
1711 | if (options & PMAP_EXPAND_OPTIONS_NOWAIT) | |
1712 | return KERN_RESOURCE_SHORTAGE; | |
b0d623f7 | 1713 | VM_PAGE_WAIT(); |
316670eb | 1714 | } |
b0d623f7 A |
1715 | |
1716 | /* | |
1717 | * put the page into the pmap's obj list so it | |
1718 | * can be found later. | |
1719 | */ | |
1720 | pn = m->phys_page; | |
1721 | pa = i386_ptob(pn); | |
1722 | i = pdptidx(map, vaddr); | |
1723 | ||
1724 | /* | |
1725 | * Zero the page. | |
1726 | */ | |
1727 | pmap_zero_page(pn); | |
1728 | ||
1729 | vm_page_lockspin_queues(); | |
3e170ce0 | 1730 | vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); |
b0d623f7 A |
1731 | vm_page_unlock_queues(); |
1732 | ||
1733 | OSAddAtomic(1, &inuse_ptepages_count); | |
6d2010ae | 1734 | OSAddAtomic64(1, &alloc_ptepages_count); |
316670eb | 1735 | PMAP_ZINFO_PALLOC(map, PAGE_SIZE); |
b0d623f7 A |
1736 | |
1737 | /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ | |
1738 | vm_object_lock(map->pm_obj_pdpt); | |
1739 | ||
1740 | PMAP_LOCK(map); | |
1741 | /* | |
1742 | * See if someone else expanded us first | |
1743 | */ | |
1744 | if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) { | |
1745 | PMAP_UNLOCK(map); | |
1746 | vm_object_unlock(map->pm_obj_pdpt); | |
1747 | ||
1748 | VM_PAGE_FREE(m); | |
1749 | ||
1750 | OSAddAtomic(-1, &inuse_ptepages_count); | |
316670eb A |
1751 | PMAP_ZINFO_PFREE(map, PAGE_SIZE); |
1752 | return KERN_SUCCESS; | |
b0d623f7 A |
1753 | } |
1754 | ||
1755 | #if 0 /* DEBUG */ | |
39236c6e | 1756 | if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) { |
b0d623f7 A |
1757 | panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", |
1758 | map, map->pm_obj_pdpt, vaddr, i); | |
1759 | } | |
1760 | #endif | |
3e170ce0 | 1761 | vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); |
b0d623f7 A |
1762 | vm_object_unlock(map->pm_obj_pdpt); |
1763 | ||
1764 | /* | |
1765 | * Set the page directory entry for this page table. | |
1766 | */ | |
1767 | pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */ | |
1768 | ||
1769 | pmap_store_pte(pdptp, pa_to_pte(pa) | |
3e170ce0 A |
1770 | | PTE_READ(is_ept) |
1771 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1772 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1773 | |
1774 | PMAP_UNLOCK(map); | |
1775 | ||
316670eb | 1776 | return KERN_SUCCESS; |
b0d623f7 A |
1777 | |
1778 | } | |
1779 | ||
1780 | ||
1781 | ||
1782 | /* | |
1783 | * Routine: pmap_expand | |
1784 | * | |
1785 | * Expands a pmap to be able to map the specified virtual address. | |
1786 | * | |
1787 | * Allocates new virtual memory for the P0 or P1 portion of the | |
1788 | * pmap, then re-maps the physical pages that were in the old | |
1789 | * pmap to be in the new pmap. | |
1790 | * | |
1791 | * Must be called with the pmap system and the pmap unlocked, | |
1792 | * since these must be unlocked to use vm_allocate or vm_deallocate. | |
1793 | * Thus it must be called in a loop that checks whether the map | |
1794 | * has been expanded enough. | |
1795 | * (We won't loop forever, since page tables aren't shrunk.) | |
1796 | */ | |
316670eb | 1797 | kern_return_t |
b0d623f7 A |
1798 | pmap_expand( |
1799 | pmap_t map, | |
316670eb A |
1800 | vm_map_offset_t vaddr, |
1801 | unsigned int options) | |
b0d623f7 A |
1802 | { |
1803 | pt_entry_t *pdp; | |
1804 | register vm_page_t m; | |
1805 | register pmap_paddr_t pa; | |
1806 | uint64_t i; | |
1807 | ppnum_t pn; | |
3e170ce0 | 1808 | boolean_t is_ept = is_ept_pmap(map); |
b0d623f7 A |
1809 | |
1810 | ||
1811 | /* | |
1812 | * For the kernel, the virtual address must be in or above the basement | |
1813 | * which is for kexts and is in the 512GB immediately below the kernel.. | |
1814 | * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT | |
1815 | */ | |
1816 | if (map == kernel_pmap && | |
1817 | !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS)) | |
1818 | panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr); | |
1819 | ||
1820 | ||
1821 | while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) { | |
316670eb A |
1822 | kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options); |
1823 | if (pepkr != KERN_SUCCESS) | |
1824 | return pepkr; | |
b0d623f7 A |
1825 | } |
1826 | ||
1827 | /* | |
1828 | * Allocate a VM page for the pde entries. | |
1829 | */ | |
316670eb A |
1830 | while ((m = vm_page_grab()) == VM_PAGE_NULL) { |
1831 | if (options & PMAP_EXPAND_OPTIONS_NOWAIT) | |
1832 | return KERN_RESOURCE_SHORTAGE; | |
b0d623f7 | 1833 | VM_PAGE_WAIT(); |
316670eb | 1834 | } |
b0d623f7 A |
1835 | |
1836 | /* | |
1837 | * put the page into the pmap's obj list so it | |
1838 | * can be found later. | |
1839 | */ | |
1840 | pn = m->phys_page; | |
1841 | pa = i386_ptob(pn); | |
1842 | i = pdeidx(map, vaddr); | |
1843 | ||
1844 | /* | |
1845 | * Zero the page. | |
1846 | */ | |
1847 | pmap_zero_page(pn); | |
1848 | ||
1849 | vm_page_lockspin_queues(); | |
3e170ce0 | 1850 | vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); |
b0d623f7 A |
1851 | vm_page_unlock_queues(); |
1852 | ||
1853 | OSAddAtomic(1, &inuse_ptepages_count); | |
6d2010ae | 1854 | OSAddAtomic64(1, &alloc_ptepages_count); |
316670eb | 1855 | PMAP_ZINFO_PALLOC(map, PAGE_SIZE); |
b0d623f7 A |
1856 | |
1857 | /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ | |
1858 | vm_object_lock(map->pm_obj); | |
1859 | ||
1860 | PMAP_LOCK(map); | |
1861 | ||
1862 | /* | |
1863 | * See if someone else expanded us first | |
1864 | */ | |
1865 | if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) { | |
1866 | PMAP_UNLOCK(map); | |
1867 | vm_object_unlock(map->pm_obj); | |
1868 | ||
1869 | VM_PAGE_FREE(m); | |
1870 | ||
1871 | OSAddAtomic(-1, &inuse_ptepages_count); | |
316670eb A |
1872 | PMAP_ZINFO_PFREE(map, PAGE_SIZE); |
1873 | return KERN_SUCCESS; | |
b0d623f7 A |
1874 | } |
1875 | ||
1876 | #if 0 /* DEBUG */ | |
39236c6e | 1877 | if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) { |
b0d623f7 A |
1878 | panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n", |
1879 | map, map->pm_obj, vaddr, i); | |
1880 | } | |
1881 | #endif | |
3e170ce0 | 1882 | vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); |
b0d623f7 A |
1883 | vm_object_unlock(map->pm_obj); |
1884 | ||
1885 | /* | |
1886 | * Set the page directory entry for this page table. | |
1887 | */ | |
1888 | pdp = pmap_pde(map, vaddr); | |
1889 | pmap_store_pte(pdp, pa_to_pte(pa) | |
3e170ce0 A |
1890 | | PTE_READ(is_ept) |
1891 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1892 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1893 | |
1894 | PMAP_UNLOCK(map); | |
1895 | ||
316670eb | 1896 | return KERN_SUCCESS; |
b0d623f7 A |
1897 | } |
1898 | ||
1899 | /* On K64 machines with more than 32GB of memory, pmap_steal_memory | |
1900 | * will allocate past the 1GB of pre-expanded virtual kernel area. This | |
1901 | * function allocates all the page tables using memory from the same pool | |
1902 | * that pmap_steal_memory uses, rather than calling vm_page_grab (which | |
1903 | * isn't available yet). */ | |
1904 | void | |
6d2010ae A |
1905 | pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) |
1906 | { | |
b0d623f7 A |
1907 | ppnum_t pn; |
1908 | pt_entry_t *pte; | |
3e170ce0 | 1909 | boolean_t is_ept = is_ept_pmap(pmap); |
b0d623f7 A |
1910 | |
1911 | PMAP_LOCK(pmap); | |
1912 | ||
1913 | if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) { | |
0b4c1975 | 1914 | if (!pmap_next_page_hi(&pn)) |
b0d623f7 A |
1915 | panic("pmap_pre_expand"); |
1916 | ||
1917 | pmap_zero_page(pn); | |
1918 | ||
1919 | pte = pmap64_pml4(pmap, vaddr); | |
1920 | ||
1921 | pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) | |
3e170ce0 A |
1922 | | PTE_READ(is_ept) |
1923 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1924 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1925 | } |
1926 | ||
1927 | if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) { | |
0b4c1975 | 1928 | if (!pmap_next_page_hi(&pn)) |
b0d623f7 A |
1929 | panic("pmap_pre_expand"); |
1930 | ||
1931 | pmap_zero_page(pn); | |
1932 | ||
1933 | pte = pmap64_pdpt(pmap, vaddr); | |
1934 | ||
1935 | pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) | |
3e170ce0 A |
1936 | | PTE_READ(is_ept) |
1937 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1938 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1939 | } |
1940 | ||
1941 | if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) { | |
0b4c1975 | 1942 | if (!pmap_next_page_hi(&pn)) |
b0d623f7 A |
1943 | panic("pmap_pre_expand"); |
1944 | ||
1945 | pmap_zero_page(pn); | |
1946 | ||
1947 | pte = pmap64_pde(pmap, vaddr); | |
1948 | ||
1949 | pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) | |
3e170ce0 A |
1950 | | PTE_READ(is_ept) |
1951 | | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) | |
1952 | | PTE_WRITE(is_ept)); | |
b0d623f7 A |
1953 | } |
1954 | ||
1955 | PMAP_UNLOCK(pmap); | |
1956 | } | |
1957 | ||
1958 | /* | |
1959 | * pmap_sync_page_data_phys(ppnum_t pa) | |
1960 | * | |
1961 | * Invalidates all of the instruction cache on a physical page and | |
1962 | * pushes any dirty data from the data cache for the same physical page | |
1963 | * Not required in i386. | |
1964 | */ | |
1965 | void | |
1966 | pmap_sync_page_data_phys(__unused ppnum_t pa) | |
1967 | { | |
1968 | return; | |
1969 | } | |
1970 | ||
1971 | /* | |
1972 | * pmap_sync_page_attributes_phys(ppnum_t pa) | |
1973 | * | |
1974 | * Write back and invalidate all cachelines on a physical page. | |
1975 | */ | |
1976 | void | |
1977 | pmap_sync_page_attributes_phys(ppnum_t pa) | |
1978 | { | |
1979 | cache_flush_page_phys(pa); | |
1980 | } | |
1981 | ||
1982 | ||
1983 | ||
1984 | #ifdef CURRENTLY_UNUSED_AND_UNTESTED | |
1985 | ||
1986 | int collect_ref; | |
1987 | int collect_unref; | |
1988 | ||
1989 | /* | |
1990 | * Routine: pmap_collect | |
1991 | * Function: | |
1992 | * Garbage collects the physical map system for | |
1993 | * pages which are no longer used. | |
1994 | * Success need not be guaranteed -- that is, there | |
1995 | * may well be pages which are not referenced, but | |
1996 | * others may be collected. | |
1997 | * Usage: | |
1998 | * Called by the pageout daemon when pages are scarce. | |
1999 | */ | |
2000 | void | |
2001 | pmap_collect( | |
2002 | pmap_t p) | |
2003 | { | |
2004 | register pt_entry_t *pdp, *ptp; | |
2005 | pt_entry_t *eptp; | |
2006 | int wired; | |
3e170ce0 | 2007 | boolean_t is_ept; |
b0d623f7 A |
2008 | |
2009 | if (p == PMAP_NULL) | |
2010 | return; | |
2011 | ||
2012 | if (p == kernel_pmap) | |
2013 | return; | |
2014 | ||
3e170ce0 A |
2015 | is_ept = is_ept_pmap(p); |
2016 | ||
b0d623f7 A |
2017 | /* |
2018 | * Garbage collect map. | |
2019 | */ | |
2020 | PMAP_LOCK(p); | |
2021 | ||
2022 | for (pdp = (pt_entry_t *)p->dirbase; | |
2023 | pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]; | |
2024 | pdp++) | |
2025 | { | |
3e170ce0 A |
2026 | if (*pdp & PTE_VALID_MASK(is_ept)) { |
2027 | if (*pdp & PTE_REF(is_ept)) { | |
2028 | pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept)); | |
2029 | collect_ref++; | |
2030 | } else { | |
2031 | collect_unref++; | |
2032 | ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase)); | |
2033 | eptp = ptp + NPTEPG; | |
b0d623f7 | 2034 | |
3e170ce0 A |
2035 | /* |
2036 | * If the pte page has any wired mappings, we cannot | |
2037 | * free it. | |
2038 | */ | |
2039 | wired = 0; | |
2040 | { | |
2041 | register pt_entry_t *ptep; | |
2042 | for (ptep = ptp; ptep < eptp; ptep++) { | |
2043 | if (iswired(*ptep)) { | |
2044 | wired = 1; | |
2045 | break; | |
2046 | } | |
2047 | } | |
2048 | } | |
2049 | if (!wired) { | |
2050 | /* | |
2051 | * Remove the virtual addresses mapped by this pte page. | |
2052 | */ | |
2053 | pmap_remove_range(p, | |
2054 | pdetova(pdp - (pt_entry_t *)p->dirbase), | |
2055 | ptp, | |
2056 | eptp); | |
2057 | ||
2058 | /* | |
2059 | * Invalidate the page directory pointer. | |
2060 | */ | |
2061 | pmap_store_pte(pdp, 0x0); | |
2062 | ||
2063 | PMAP_UNLOCK(p); | |
2064 | ||
2065 | /* | |
2066 | * And free the pte page itself. | |
2067 | */ | |
2068 | { | |
2069 | register vm_page_t m; | |
2070 | ||
2071 | vm_object_lock(p->pm_obj); | |
2072 | ||
2073 | m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE); | |
2074 | if (m == VM_PAGE_NULL) | |
2075 | panic("pmap_collect: pte page not in object"); | |
2076 | ||
2077 | vm_object_unlock(p->pm_obj); | |
2078 | ||
2079 | VM_PAGE_FREE(m); | |
2080 | ||
2081 | OSAddAtomic(-1, &inuse_ptepages_count); | |
2082 | PMAP_ZINFO_PFREE(p, PAGE_SIZE); | |
2083 | } | |
2084 | ||
2085 | PMAP_LOCK(p); | |
2086 | } | |
b0d623f7 | 2087 | } |
b0d623f7 | 2088 | } |
b0d623f7 A |
2089 | } |
2090 | ||
2091 | PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL); | |
2092 | PMAP_UNLOCK(p); | |
2093 | return; | |
b0d623f7 A |
2094 | } |
2095 | #endif | |
2096 | ||
2097 | ||
2098 | void | |
2099 | pmap_copy_page(ppnum_t src, ppnum_t dst) | |
2100 | { | |
2101 | bcopy_phys((addr64_t)i386_ptob(src), | |
2102 | (addr64_t)i386_ptob(dst), | |
2103 | PAGE_SIZE); | |
2104 | } | |
2105 | ||
2106 | ||
2107 | /* | |
2108 | * Routine: pmap_pageable | |
2109 | * Function: | |
2110 | * Make the specified pages (by pmap, offset) | |
2111 | * pageable (or not) as requested. | |
2112 | * | |
2113 | * A page which is not pageable may not take | |
2114 | * a fault; therefore, its page table entry | |
2115 | * must remain valid for the duration. | |
2116 | * | |
2117 | * This routine is merely advisory; pmap_enter | |
2118 | * will specify that these pages are to be wired | |
2119 | * down (or not) as appropriate. | |
2120 | */ | |
2121 | void | |
2122 | pmap_pageable( | |
2123 | __unused pmap_t pmap, | |
2124 | __unused vm_map_offset_t start_addr, | |
2125 | __unused vm_map_offset_t end_addr, | |
2126 | __unused boolean_t pageable) | |
2127 | { | |
2128 | #ifdef lint | |
2129 | pmap++; start_addr++; end_addr++; pageable++; | |
2130 | #endif /* lint */ | |
2131 | } | |
2132 | ||
b0d623f7 A |
2133 | void |
2134 | invalidate_icache(__unused vm_offset_t addr, | |
2135 | __unused unsigned cnt, | |
2136 | __unused int phys) | |
2137 | { | |
2138 | return; | |
2139 | } | |
2140 | ||
2141 | void | |
2142 | flush_dcache(__unused vm_offset_t addr, | |
2143 | __unused unsigned count, | |
2144 | __unused int phys) | |
2145 | { | |
2146 | return; | |
2147 | } | |
2148 | ||
2149 | #if CONFIG_DTRACE | |
2150 | /* | |
2151 | * Constrain DTrace copyin/copyout actions | |
2152 | */ | |
2153 | extern kern_return_t dtrace_copyio_preflight(addr64_t); | |
2154 | extern kern_return_t dtrace_copyio_postflight(addr64_t); | |
2155 | ||
2156 | kern_return_t dtrace_copyio_preflight(__unused addr64_t va) | |
2157 | { | |
2158 | thread_t thread = current_thread(); | |
6d2010ae | 2159 | uint64_t ccr3; |
b0d623f7 A |
2160 | if (current_map() == kernel_map) |
2161 | return KERN_FAILURE; | |
6d2010ae A |
2162 | else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE)) |
2163 | return KERN_FAILURE; | |
2164 | else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3)) | |
b0d623f7 | 2165 | return KERN_FAILURE; |
b0d623f7 A |
2166 | else |
2167 | return KERN_SUCCESS; | |
2168 | } | |
2169 | ||
2170 | kern_return_t dtrace_copyio_postflight(__unused addr64_t va) | |
2171 | { | |
2172 | return KERN_SUCCESS; | |
2173 | } | |
2174 | #endif /* CONFIG_DTRACE */ | |
2175 | ||
2176 | #include <mach_vm_debug.h> | |
2177 | #if MACH_VM_DEBUG | |
2178 | #include <vm/vm_debug.h> | |
2179 | ||
2180 | int | |
2181 | pmap_list_resident_pages( | |
2182 | __unused pmap_t pmap, | |
2183 | __unused vm_offset_t *listp, | |
2184 | __unused int space) | |
2185 | { | |
2186 | return 0; | |
2187 | } | |
2188 | #endif /* MACH_VM_DEBUG */ | |
2189 | ||
2190 | ||
2191 | ||
2192 | /* temporary workaround */ | |
2193 | boolean_t | |
2194 | coredumpok(__unused vm_map_t map, __unused vm_offset_t va) | |
2195 | { | |
2196 | #if 0 | |
2197 | pt_entry_t *ptep; | |
2198 | ||
2199 | ptep = pmap_pte(map->pmap, va); | |
2200 | if (0 == ptep) | |
2201 | return FALSE; | |
2202 | return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)); | |
2203 | #else | |
2204 | return TRUE; | |
2205 | #endif | |
2206 | } | |
2207 | ||
2208 | ||
2209 | boolean_t | |
2210 | phys_page_exists(ppnum_t pn) | |
2211 | { | |
2212 | assert(pn != vm_page_fictitious_addr); | |
2213 | ||
2214 | if (!pmap_initialized) | |
2215 | return TRUE; | |
2216 | ||
2217 | if (pn == vm_page_guard_addr) | |
2218 | return FALSE; | |
2219 | ||
2220 | if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) | |
2221 | return FALSE; | |
2222 | ||
2223 | return TRUE; | |
2224 | } | |
2225 | ||
6d2010ae A |
2226 | |
2227 | ||
b0d623f7 A |
2228 | void |
2229 | pmap_switch(pmap_t tpmap) | |
2230 | { | |
2231 | spl_t s; | |
2232 | ||
2233 | s = splhigh(); /* Make sure interruptions are disabled */ | |
fe8ab488 | 2234 | set_dirbase(tpmap, current_thread(), cpu_number()); |
b0d623f7 A |
2235 | splx(s); |
2236 | } | |
2237 | ||
2238 | ||
2239 | /* | |
2240 | * disable no-execute capability on | |
2241 | * the specified pmap | |
2242 | */ | |
2243 | void | |
2244 | pmap_disable_NX(pmap_t pmap) | |
2245 | { | |
2246 | pmap->nx_enabled = 0; | |
2247 | } | |
2248 | ||
6d2010ae A |
2249 | void |
2250 | pt_fake_zone_init(int zone_index) | |
2251 | { | |
2252 | pt_fake_zone_index = zone_index; | |
2253 | } | |
2254 | ||
b0d623f7 A |
2255 | void |
2256 | pt_fake_zone_info( | |
2257 | int *count, | |
2258 | vm_size_t *cur_size, | |
2259 | vm_size_t *max_size, | |
2260 | vm_size_t *elem_size, | |
2261 | vm_size_t *alloc_size, | |
6d2010ae | 2262 | uint64_t *sum_size, |
b0d623f7 | 2263 | int *collectable, |
6d2010ae A |
2264 | int *exhaustable, |
2265 | int *caller_acct) | |
b0d623f7 A |
2266 | { |
2267 | *count = inuse_ptepages_count; | |
2268 | *cur_size = PAGE_SIZE * inuse_ptepages_count; | |
2269 | *max_size = PAGE_SIZE * (inuse_ptepages_count + | |
2270 | vm_page_inactive_count + | |
2271 | vm_page_active_count + | |
2272 | vm_page_free_count); | |
2273 | *elem_size = PAGE_SIZE; | |
2274 | *alloc_size = PAGE_SIZE; | |
6d2010ae | 2275 | *sum_size = alloc_ptepages_count * PAGE_SIZE; |
b0d623f7 A |
2276 | |
2277 | *collectable = 1; | |
2278 | *exhaustable = 0; | |
6d2010ae | 2279 | *caller_acct = 1; |
b0d623f7 A |
2280 | } |
2281 | ||
39236c6e A |
2282 | |
2283 | void | |
2284 | pmap_flush_context_init(pmap_flush_context *pfc) | |
2285 | { | |
2286 | pfc->pfc_cpus = 0; | |
2287 | pfc->pfc_invalid_global = 0; | |
2288 | } | |
2289 | ||
fe8ab488 | 2290 | extern unsigned TLBTimeOut; |
39236c6e A |
2291 | void |
2292 | pmap_flush( | |
2293 | pmap_flush_context *pfc) | |
2294 | { | |
2295 | unsigned int my_cpu; | |
2296 | unsigned int cpu; | |
2297 | unsigned int cpu_bit; | |
fe8ab488 A |
2298 | cpumask_t cpus_to_respond = 0; |
2299 | cpumask_t cpus_to_signal = 0; | |
2300 | cpumask_t cpus_signaled = 0; | |
39236c6e A |
2301 | boolean_t flush_self = FALSE; |
2302 | uint64_t deadline; | |
2303 | ||
2304 | mp_disable_preemption(); | |
2305 | ||
2306 | my_cpu = cpu_number(); | |
2307 | cpus_to_signal = pfc->pfc_cpus; | |
2308 | ||
2309 | PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START, | |
2310 | NULL, cpus_to_signal, 0, 0, 0); | |
2311 | ||
2312 | for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) { | |
2313 | ||
2314 | if (cpus_to_signal & cpu_bit) { | |
2315 | ||
2316 | cpus_to_signal &= ~cpu_bit; | |
2317 | ||
2318 | if (!cpu_datap(cpu)->cpu_running) | |
2319 | continue; | |
2320 | ||
2321 | if (pfc->pfc_invalid_global & cpu_bit) | |
2322 | cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE; | |
2323 | else | |
2324 | cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE; | |
2325 | mfence(); | |
2326 | ||
2327 | if (cpu == my_cpu) { | |
2328 | flush_self = TRUE; | |
2329 | continue; | |
2330 | } | |
2331 | if (CPU_CR3_IS_ACTIVE(cpu)) { | |
2332 | cpus_to_respond |= cpu_bit; | |
2333 | i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC); | |
2334 | } | |
2335 | } | |
2336 | } | |
2337 | cpus_signaled = cpus_to_respond; | |
2338 | ||
2339 | /* | |
2340 | * Flush local tlb if required. | |
2341 | * Do this now to overlap with other processors responding. | |
2342 | */ | |
2343 | if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE) | |
2344 | process_pmap_updates(); | |
2345 | ||
2346 | if (cpus_to_respond) { | |
2347 | ||
fe8ab488 A |
2348 | deadline = mach_absolute_time() + |
2349 | (TLBTimeOut ? TLBTimeOut : LockTimeOut); | |
2350 | boolean_t is_timeout_traced = FALSE; | |
2351 | ||
39236c6e A |
2352 | /* |
2353 | * Wait for those other cpus to acknowledge | |
2354 | */ | |
2355 | while (cpus_to_respond != 0) { | |
2356 | long orig_acks = 0; | |
2357 | ||
2358 | for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { | |
2359 | /* Consider checking local/global invalidity | |
2360 | * as appropriate in the PCID case. | |
2361 | */ | |
2362 | if ((cpus_to_respond & cpu_bit) != 0) { | |
2363 | if (!cpu_datap(cpu)->cpu_running || | |
2364 | cpu_datap(cpu)->cpu_tlb_invalid == FALSE || | |
2365 | !CPU_CR3_IS_ACTIVE(cpu)) { | |
2366 | cpus_to_respond &= ~cpu_bit; | |
2367 | } | |
2368 | cpu_pause(); | |
2369 | } | |
2370 | if (cpus_to_respond == 0) | |
2371 | break; | |
2372 | } | |
2373 | if (cpus_to_respond && (mach_absolute_time() > deadline)) { | |
2374 | if (machine_timeout_suspended()) | |
2375 | continue; | |
fe8ab488 A |
2376 | if (TLBTimeOut == 0) { |
2377 | if (is_timeout_traced) | |
2378 | continue; | |
2379 | PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO), | |
2380 | NULL, cpus_to_signal, cpus_to_respond, 0, 0); | |
2381 | is_timeout_traced = TRUE; | |
2382 | continue; | |
2383 | } | |
39236c6e A |
2384 | pmap_tlb_flush_timeout = TRUE; |
2385 | orig_acks = NMIPI_acks; | |
fe8ab488 | 2386 | mp_cpus_NMIPI(cpus_to_respond); |
39236c6e A |
2387 | |
2388 | panic("TLB invalidation IPI timeout: " | |
3e170ce0 | 2389 | "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx", |
39236c6e A |
2390 | cpus_to_respond, orig_acks, NMIPI_acks); |
2391 | } | |
2392 | } | |
2393 | } | |
2394 | PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END, | |
2395 | NULL, cpus_signaled, flush_self, 0, 0); | |
2396 | ||
2397 | mp_enable_preemption(); | |
2398 | } | |
2399 | ||
2400 | ||
3e170ce0 A |
2401 | static void |
2402 | invept(void *eptp) | |
2403 | { | |
2404 | struct { | |
2405 | uint64_t eptp; | |
2406 | uint64_t reserved; | |
2407 | } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0}; | |
2408 | ||
2409 | __asm__ volatile("invept (%%rax), %%rcx" | |
2410 | : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor) | |
2411 | : "cc", "memory"); | |
2412 | } | |
2413 | ||
b0d623f7 A |
2414 | /* |
2415 | * Called with pmap locked, we: | |
2416 | * - scan through per-cpu data to see which other cpus need to flush | |
2417 | * - send an IPI to each non-idle cpu to be flushed | |
2418 | * - wait for all to signal back that they are inactive or we see that | |
2419 | * they are at a safe point (idle). | |
2420 | * - flush the local tlb if active for this pmap | |
2421 | * - return ... the caller will unlock the pmap | |
2422 | */ | |
6d2010ae | 2423 | |
b0d623f7 | 2424 | void |
39236c6e | 2425 | pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc) |
b0d623f7 A |
2426 | { |
2427 | unsigned int cpu; | |
2428 | unsigned int cpu_bit; | |
fe8ab488 | 2429 | cpumask_t cpus_to_signal; |
b0d623f7 A |
2430 | unsigned int my_cpu = cpu_number(); |
2431 | pmap_paddr_t pmap_cr3 = pmap->pm_cr3; | |
2432 | boolean_t flush_self = FALSE; | |
2433 | uint64_t deadline; | |
6d2010ae | 2434 | boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap)); |
39236c6e | 2435 | boolean_t need_global_flush = FALSE; |
fe8ab488 | 2436 | uint32_t event_code; |
4bd07ac2 | 2437 | vm_map_offset_t event_startv, event_endv; |
3e170ce0 | 2438 | boolean_t is_ept = is_ept_pmap(pmap); |
b0d623f7 A |
2439 | |
2440 | assert((processor_avail_count < 2) || | |
2441 | (ml_get_interrupts_enabled() && get_preemption_level() != 0)); | |
2442 | ||
3e170ce0 A |
2443 | if (pmap == kernel_pmap) { |
2444 | event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS); | |
4bd07ac2 A |
2445 | event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv); |
2446 | event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv); | |
3e170ce0 A |
2447 | } else if (is_ept) { |
2448 | event_code = PMAP_CODE(PMAP__FLUSH_EPT); | |
4bd07ac2 A |
2449 | event_startv = startv; |
2450 | event_endv = endv; | |
3e170ce0 A |
2451 | } else { |
2452 | event_code = PMAP_CODE(PMAP__FLUSH_TLBS); | |
4bd07ac2 A |
2453 | event_startv = startv; |
2454 | event_endv = endv; | |
3e170ce0 A |
2455 | } |
2456 | ||
fe8ab488 | 2457 | PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START, |
4bd07ac2 | 2458 | VM_KERNEL_UNSLIDE_OR_PERM(pmap), options, event_startv, event_endv, 0); |
fe8ab488 | 2459 | |
3e170ce0 A |
2460 | if (is_ept) { |
2461 | mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp); | |
2462 | goto out; | |
2463 | } | |
2464 | ||
b0d623f7 A |
2465 | /* |
2466 | * Scan other cpus for matching active or task CR3. | |
2467 | * For idle cpus (with no active map) we mark them invalid but | |
2468 | * don't signal -- they'll check as they go busy. | |
2469 | */ | |
2470 | cpus_to_signal = 0; | |
6d2010ae A |
2471 | |
2472 | if (pmap_pcid_ncpus) { | |
39236c6e A |
2473 | if (pmap_is_shared) |
2474 | need_global_flush = TRUE; | |
6d2010ae | 2475 | pmap_pcid_invalidate_all_cpus(pmap); |
39236c6e | 2476 | mfence(); |
6d2010ae | 2477 | } |
b0d623f7 A |
2478 | for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { |
2479 | if (!cpu_datap(cpu)->cpu_running) | |
2480 | continue; | |
2481 | uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu); | |
2482 | uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu); | |
2483 | ||
2484 | if ((pmap_cr3 == cpu_task_cr3) || | |
2485 | (pmap_cr3 == cpu_active_cr3) || | |
6d2010ae | 2486 | (pmap_is_shared)) { |
39236c6e A |
2487 | |
2488 | if (options & PMAP_DELAY_TLB_FLUSH) { | |
2489 | if (need_global_flush == TRUE) | |
2490 | pfc->pfc_invalid_global |= cpu_bit; | |
2491 | pfc->pfc_cpus |= cpu_bit; | |
2492 | ||
2493 | continue; | |
2494 | } | |
b0d623f7 A |
2495 | if (cpu == my_cpu) { |
2496 | flush_self = TRUE; | |
2497 | continue; | |
2498 | } | |
39236c6e | 2499 | if (need_global_flush == TRUE) |
6d2010ae A |
2500 | cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE; |
2501 | else | |
2502 | cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE; | |
39236c6e | 2503 | mfence(); |
b0d623f7 A |
2504 | |
2505 | /* | |
2506 | * We don't need to signal processors which will flush | |
2507 | * lazily at the idle state or kernel boundary. | |
2508 | * For example, if we're invalidating the kernel pmap, | |
2509 | * processors currently in userspace don't need to flush | |
2510 | * their TLBs until the next time they enter the kernel. | |
2511 | * Alterations to the address space of a task active | |
2512 | * on a remote processor result in a signal, to | |
2513 | * account for copy operations. (There may be room | |
2514 | * for optimization in such cases). | |
2515 | * The order of the loads below with respect | |
2516 | * to the store to the "cpu_tlb_invalid" field above | |
2517 | * is important--hence the barrier. | |
2518 | */ | |
2519 | if (CPU_CR3_IS_ACTIVE(cpu) && | |
2520 | (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) || | |
39236c6e A |
2521 | pmap->pm_shared || |
2522 | (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) { | |
b0d623f7 A |
2523 | cpus_to_signal |= cpu_bit; |
2524 | i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC); | |
2525 | } | |
2526 | } | |
2527 | } | |
39236c6e | 2528 | if ((options & PMAP_DELAY_TLB_FLUSH)) |
fe8ab488 | 2529 | goto out; |
b0d623f7 | 2530 | |
b0d623f7 A |
2531 | /* |
2532 | * Flush local tlb if required. | |
2533 | * Do this now to overlap with other processors responding. | |
2534 | */ | |
6d2010ae A |
2535 | if (flush_self) { |
2536 | if (pmap_pcid_ncpus) { | |
2537 | pmap_pcid_validate_cpu(pmap, my_cpu); | |
2538 | if (pmap_is_shared) | |
2539 | tlb_flush_global(); | |
2540 | else | |
2541 | flush_tlb_raw(); | |
2542 | } | |
2543 | else | |
2544 | flush_tlb_raw(); | |
2545 | } | |
b0d623f7 A |
2546 | |
2547 | if (cpus_to_signal) { | |
fe8ab488 A |
2548 | cpumask_t cpus_to_respond = cpus_to_signal; |
2549 | ||
2550 | deadline = mach_absolute_time() + | |
2551 | (TLBTimeOut ? TLBTimeOut : LockTimeOut); | |
2552 | boolean_t is_timeout_traced = FALSE; | |
b0d623f7 | 2553 | |
b0d623f7 A |
2554 | /* |
2555 | * Wait for those other cpus to acknowledge | |
2556 | */ | |
2557 | while (cpus_to_respond != 0) { | |
060df5ea | 2558 | long orig_acks = 0; |
b0d623f7 A |
2559 | |
2560 | for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { | |
6d2010ae A |
2561 | /* Consider checking local/global invalidity |
2562 | * as appropriate in the PCID case. | |
2563 | */ | |
b0d623f7 A |
2564 | if ((cpus_to_respond & cpu_bit) != 0) { |
2565 | if (!cpu_datap(cpu)->cpu_running || | |
2566 | cpu_datap(cpu)->cpu_tlb_invalid == FALSE || | |
2567 | !CPU_CR3_IS_ACTIVE(cpu)) { | |
2568 | cpus_to_respond &= ~cpu_bit; | |
2569 | } | |
2570 | cpu_pause(); | |
2571 | } | |
2572 | if (cpus_to_respond == 0) | |
2573 | break; | |
2574 | } | |
6d2010ae | 2575 | if (cpus_to_respond && (mach_absolute_time() > deadline)) { |
060df5ea A |
2576 | if (machine_timeout_suspended()) |
2577 | continue; | |
fe8ab488 A |
2578 | if (TLBTimeOut == 0) { |
2579 | /* cut tracepoint but don't panic */ | |
2580 | if (is_timeout_traced) | |
2581 | continue; | |
2582 | PMAP_TRACE_CONSTANT( | |
2583 | PMAP_CODE(PMAP__FLUSH_TLBS_TO), | |
4bd07ac2 | 2584 | VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, cpus_to_respond, 0, 0); |
fe8ab488 A |
2585 | is_timeout_traced = TRUE; |
2586 | continue; | |
2587 | } | |
060df5ea A |
2588 | pmap_tlb_flush_timeout = TRUE; |
2589 | orig_acks = NMIPI_acks; | |
fe8ab488 | 2590 | mp_cpus_NMIPI(cpus_to_respond); |
060df5ea A |
2591 | |
2592 | panic("TLB invalidation IPI timeout: " | |
3e170ce0 | 2593 | "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx", |
060df5ea A |
2594 | cpus_to_respond, orig_acks, NMIPI_acks); |
2595 | } | |
b0d623f7 A |
2596 | } |
2597 | } | |
2598 | ||
316670eb | 2599 | if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) { |
39236c6e A |
2600 | panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map); |
2601 | } | |
2602 | ||
fe8ab488 A |
2603 | out: |
2604 | PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END, | |
4bd07ac2 | 2605 | VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, event_startv, event_endv, 0); |
316670eb | 2606 | |
b0d623f7 A |
2607 | } |
2608 | ||
2609 | void | |
2610 | process_pmap_updates(void) | |
2611 | { | |
6d2010ae A |
2612 | int ccpu = cpu_number(); |
2613 | pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); | |
2614 | if (pmap_pcid_ncpus) { | |
2615 | pmap_pcid_validate_current(); | |
2616 | if (cpu_datap(ccpu)->cpu_tlb_invalid_global) { | |
2617 | cpu_datap(ccpu)->cpu_tlb_invalid = FALSE; | |
2618 | tlb_flush_global(); | |
2619 | } | |
2620 | else { | |
2621 | cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE; | |
2622 | flush_tlb_raw(); | |
2623 | } | |
2624 | } | |
2625 | else { | |
2626 | current_cpu_datap()->cpu_tlb_invalid = FALSE; | |
2627 | flush_tlb_raw(); | |
2628 | } | |
b0d623f7 | 2629 | |
39236c6e | 2630 | mfence(); |
b0d623f7 A |
2631 | } |
2632 | ||
2633 | void | |
2634 | pmap_update_interrupt(void) | |
2635 | { | |
2636 | PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START, | |
2637 | 0, 0, 0, 0, 0); | |
2638 | ||
39236c6e A |
2639 | if (current_cpu_datap()->cpu_tlb_invalid) |
2640 | process_pmap_updates(); | |
b0d623f7 A |
2641 | |
2642 | PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END, | |
2643 | 0, 0, 0, 0, 0); | |
2644 | } | |
316670eb A |
2645 | |
2646 | #include <mach/mach_vm.h> /* mach_vm_region_recurse() */ | |
2647 | /* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries | |
2648 | * and identify ranges with mismatched VM permissions and PTE permissions | |
2649 | */ | |
2650 | kern_return_t | |
2651 | pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) { | |
2652 | vm_offset_t cv = sv; | |
2653 | kern_return_t rv = KERN_SUCCESS; | |
2654 | uint64_t skip4 = 0, skip2 = 0; | |
2655 | ||
3e170ce0 A |
2656 | assert(!is_ept_pmap(ipmap)); |
2657 | ||
316670eb A |
2658 | sv &= ~PAGE_MASK_64; |
2659 | ev &= ~PAGE_MASK_64; | |
2660 | while (cv < ev) { | |
2661 | if (__improbable((cv > 0x00007FFFFFFFFFFFULL) && | |
2662 | (cv < 0xFFFF800000000000ULL))) { | |
2663 | cv = 0xFFFF800000000000ULL; | |
2664 | } | |
2665 | /* Potential inconsistencies from not holding pmap lock | |
2666 | * but harmless for the moment. | |
2667 | */ | |
2668 | if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) { | |
2669 | if ((cv + NBPML4) > cv) | |
2670 | cv += NBPML4; | |
2671 | else | |
2672 | break; | |
2673 | skip4++; | |
2674 | continue; | |
2675 | } | |
2676 | if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) { | |
2677 | if ((cv + NBPD) > cv) | |
2678 | cv += NBPD; | |
2679 | else | |
2680 | break; | |
2681 | skip2++; | |
2682 | continue; | |
2683 | } | |
2684 | ||
2685 | pt_entry_t *ptep = pmap_pte(ipmap, cv); | |
2686 | if (ptep && (*ptep & INTEL_PTE_VALID)) { | |
2687 | if (*ptep & INTEL_PTE_WRITE) { | |
2688 | if (!(*ptep & INTEL_PTE_NX)) { | |
2689 | kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep))))); | |
2690 | rv = KERN_FAILURE; | |
2691 | } | |
2692 | } | |
2693 | } | |
2694 | cv += PAGE_SIZE; | |
2695 | } | |
2696 | kprintf("Completed pmap scan\n"); | |
2697 | cv = sv; | |
2698 | ||
2699 | struct vm_region_submap_info_64 vbr; | |
2700 | mach_msg_type_number_t vbrcount = 0; | |
2701 | mach_vm_size_t vmsize; | |
2702 | vm_prot_t prot; | |
2703 | uint32_t nesting_depth = 0; | |
2704 | kern_return_t kret; | |
2705 | ||
2706 | while (cv < ev) { | |
2707 | ||
2708 | for (;;) { | |
2709 | vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; | |
2710 | if((kret = mach_vm_region_recurse(ivmmap, | |
2711 | (mach_vm_address_t *) &cv, &vmsize, &nesting_depth, | |
2712 | (vm_region_recurse_info_t)&vbr, | |
2713 | &vbrcount)) != KERN_SUCCESS) { | |
2714 | break; | |
2715 | } | |
2716 | ||
2717 | if(vbr.is_submap) { | |
2718 | nesting_depth++; | |
2719 | continue; | |
2720 | } else { | |
2721 | break; | |
2722 | } | |
2723 | } | |
2724 | ||
2725 | if(kret != KERN_SUCCESS) | |
2726 | break; | |
2727 | ||
2728 | prot = vbr.protection; | |
2729 | ||
2730 | if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) { | |
2731 | kprintf("W+X map entry at address 0x%lx\n", cv); | |
2732 | rv = KERN_FAILURE; | |
2733 | } | |
2734 | ||
2735 | if (prot) { | |
2736 | vm_offset_t pcv; | |
2737 | for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) { | |
2738 | pt_entry_t *ptep = pmap_pte(ipmap, pcv); | |
2739 | vm_prot_t tprot; | |
2740 | ||
2741 | if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID)) | |
2742 | continue; | |
2743 | tprot = VM_PROT_READ; | |
2744 | if (*ptep & INTEL_PTE_WRITE) | |
2745 | tprot |= VM_PROT_WRITE; | |
2746 | if ((*ptep & INTEL_PTE_NX) == 0) | |
2747 | tprot |= VM_PROT_EXECUTE; | |
2748 | if (tprot != prot) { | |
2749 | kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot); | |
2750 | rv = KERN_FAILURE; | |
2751 | } | |
2752 | } | |
2753 | } | |
2754 | cv += vmsize; | |
2755 | } | |
2756 | return rv; | |
2757 | } |