]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/pmap.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * File: pmap.c
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * (These guys wrote the Vax version)
63 *
64 * Physical Map management code for Intel i386, i486, and i860.
65 *
66 * Manages physical address maps.
67 *
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
74 *
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
80 * requested.
81 *
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information as
87 * to which processors are currently using which maps,
88 * and to when physical maps must be made correct.
89 */
90
91#include <string.h>
b0d623f7
A
92#include <mach_ldebug.h>
93
94#include <libkern/OSAtomic.h>
95
96#include <mach/machine/vm_types.h>
97
98#include <mach/boolean.h>
99#include <kern/thread.h>
100#include <kern/zalloc.h>
101#include <kern/queue.h>
316670eb 102#include <kern/ledger.h>
6d2010ae 103#include <kern/mach_param.h>
b0d623f7 104
b0d623f7
A
105#include <kern/kalloc.h>
106#include <kern/spl.h>
107
108#include <vm/pmap.h>
109#include <vm/vm_map.h>
110#include <vm/vm_kern.h>
111#include <mach/vm_param.h>
112#include <mach/vm_prot.h>
113#include <vm/vm_object.h>
114#include <vm/vm_page.h>
115
116#include <mach/machine/vm_param.h>
117#include <machine/thread.h>
118
119#include <kern/misc_protos.h> /* prototyping */
120#include <i386/misc_protos.h>
6d2010ae 121#include <i386/i386_lowmem.h>
b0d623f7
A
122#include <x86_64/lowglobals.h>
123
124#include <i386/cpuid.h>
125#include <i386/cpu_data.h>
126#include <i386/cpu_number.h>
127#include <i386/machine_cpu.h>
128#include <i386/seg.h>
129#include <i386/serial_io.h>
130#include <i386/cpu_capabilities.h>
131#include <i386/machine_routines.h>
132#include <i386/proc_reg.h>
133#include <i386/tsc.h>
134#include <i386/pmap_internal.h>
6d2010ae 135#include <i386/pmap_pcid.h>
3e170ce0
A
136#if CONFIG_VMX
137#include <i386/vmx/vmx_cpu.h>
138#endif
b0d623f7 139
b0d623f7 140#include <vm/vm_protos.h>
5ba3f43e 141#include <san/kasan.h>
b0d623f7
A
142
143#include <i386/mp.h>
144#include <i386/mp_desc.h>
316670eb
A
145#include <libkern/kernel_mach_header.h>
146
147#include <pexpert/i386/efi.h>
b0d623f7 148
39037602
A
149#if MACH_ASSERT
150int pmap_stats_assert = 1;
151#endif /* MACH_ASSERT */
b0d623f7 152
b0d623f7
A
153#ifdef IWANTTODEBUG
154#undef DEBUG
155#define DEBUG 1
156#define POSTCODE_DELAY 1
157#include <i386/postcode.h>
158#endif /* IWANTTODEBUG */
159
6d2010ae
A
160#ifdef PMAP_DEBUG
161#define DBG(x...) kprintf("DBG: " x)
b0d623f7
A
162#else
163#define DBG(x...)
164#endif
6d2010ae
A
165/* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
166 * in the trampolines for kernel/user boundary TLB coherency.
b0d623f7 167 */
6d2010ae
A
168char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
169boolean_t pmap_trace = FALSE;
b0d623f7 170
6d2010ae 171boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */
b0d623f7 172
39037602
A
173int nx_enabled = 1; /* enable no-execute protection -- set during boot */
174
175#if DEBUG || DEVELOPMENT
b0d623f7
A
176int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
177int allow_stack_exec = 0; /* No apps may execute from the stack by default */
39037602
A
178#else /* DEBUG || DEVELOPMENT */
179const int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
180const int allow_stack_exec = 0; /* No apps may execute from the stack by default */
181#endif /* DEBUG || DEVELOPMENT */
b0d623f7
A
182
183const boolean_t cpu_64bit = TRUE; /* Mais oui! */
184
b0d623f7
A
185uint64_t max_preemption_latency_tsc = 0;
186
b0d623f7
A
187pv_hashed_entry_t *pv_hash_table; /* hash lists */
188
fe8ab488 189uint32_t npvhashmask = 0, npvhashbuckets = 0;
b0d623f7 190
b0d623f7
A
191pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
192pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
193decl_simple_lock_data(,pv_hashed_free_list_lock)
194decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
195decl_simple_lock_data(,pv_hash_table_lock)
196
fe8ab488
A
197decl_simple_lock_data(,phys_backup_lock)
198
b0d623f7
A
199zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */
200
b0d623f7
A
201/*
202 * First and last physical addresses that we maintain any information
203 * for. Initialized to zero so that pmap operations done before
204 * pmap_init won't touch any non-existent structures.
205 */
206boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
207
39037602
A
208static struct vm_object kptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
209static struct vm_object kpml4obj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
210static struct vm_object kpdptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
b0d623f7
A
211
212/*
6d2010ae 213 * Array of physical page attribites for managed pages.
b0d623f7
A
214 * One byte per physical page.
215 */
216char *pmap_phys_attributes;
316670eb 217ppnum_t last_managed_page = 0;
6d2010ae
A
218
219/*
220 * Amount of virtual memory mapped by one
221 * page-directory entry.
222 */
223
b0d623f7
A
224uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
225
b0d623f7
A
226unsigned pmap_memory_region_count;
227unsigned pmap_memory_region_current;
228
229pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
230
231/*
232 * Other useful macros.
233 */
234#define current_pmap() (vm_map_pmap(current_thread()->map))
235
236struct pmap kernel_pmap_store;
237pmap_t kernel_pmap;
238
b0d623f7
A
239struct zone *pmap_zone; /* zone of pmap structures */
240
6d2010ae 241struct zone *pmap_anchor_zone;
5c9f4661 242struct zone *pmap_uanchor_zone;
6d2010ae
A
243int pmap_debug = 0; /* flag for debugging prints */
244
b0d623f7 245unsigned int inuse_ptepages_count = 0;
6d2010ae
A
246long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
247unsigned int bootstrap_wired_pages = 0;
248int pt_fake_zone_index = -1;
b0d623f7 249
6d2010ae 250extern long NMIPI_acks;
b0d623f7 251
6d2010ae
A
252boolean_t kernel_text_ps_4K = TRUE;
253boolean_t wpkernel = TRUE;
b0d623f7
A
254
255extern char end;
256
257static int nkpt;
258
259pt_entry_t *DMAP1, *DMAP2;
260caddr_t DADDR1;
261caddr_t DADDR2;
b0d623f7 262
3e170ce0
A
263boolean_t pmap_disable_kheap_nx = FALSE;
264boolean_t pmap_disable_kstack_nx = FALSE;
b0d623f7 265
316670eb 266extern long __stack_chk_guard[];
b0d623f7 267
7e41aa88 268static uint64_t pmap_eptp_flags = 0;
3e170ce0
A
269boolean_t pmap_ept_support_ad = FALSE;
270
271
b0d623f7
A
272/*
273 * Map memory at initialization. The physical addresses being
274 * mapped are not managed and are never unmapped.
275 *
276 * For now, VM is already on, we only need to map the
277 * specified memory.
278 */
279vm_offset_t
280pmap_map(
281 vm_offset_t virt,
282 vm_map_offset_t start_addr,
283 vm_map_offset_t end_addr,
284 vm_prot_t prot,
285 unsigned int flags)
286{
5ba3f43e 287 kern_return_t kr;
b0d623f7
A
288 int ps;
289
290 ps = PAGE_SIZE;
291 while (start_addr < end_addr) {
5ba3f43e
A
292 kr = pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
293 (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
294
295 if (kr != KERN_SUCCESS) {
296 panic("%s: failed pmap_enter, "
297 "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x",
298 __FUNCTION__,
299 (void *)virt, (void *)start_addr, (void *)end_addr, prot, flags);
300 }
301
b0d623f7
A
302 virt += ps;
303 start_addr += ps;
304 }
305 return(virt);
306}
307
b0d623f7
A
308extern char *first_avail;
309extern vm_offset_t virtual_avail, virtual_end;
310extern pmap_paddr_t avail_start, avail_end;
311extern vm_offset_t sHIB;
312extern vm_offset_t eHIB;
313extern vm_offset_t stext;
314extern vm_offset_t etext;
316670eb 315extern vm_offset_t sdata, edata;
39037602 316extern vm_offset_t sconst, econst;
b0d623f7 317
6d2010ae
A
318extern void *KPTphys;
319
13f56ec4 320boolean_t pmap_smep_enabled = FALSE;
fe8ab488 321boolean_t pmap_smap_enabled = FALSE;
13f56ec4 322
b0d623f7
A
323void
324pmap_cpu_init(void)
325{
bd504ef0 326 cpu_data_t *cdp = current_cpu_datap();
6d2010ae 327
5c9f4661 328 set_cr4(get_cr4() | CR4_PGE);
b0d623f7
A
329
330 /*
331 * Initialize the per-cpu, TLB-related fields.
332 */
bd504ef0 333 cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
a39ff7e2 334 cpu_shadowp(cdp->cpu_number)->cpu_kernel_cr3 = cdp->cpu_kernel_cr3;
bd504ef0
A
335 cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
336 cdp->cpu_tlb_invalid = FALSE;
337 cdp->cpu_task_map = TASK_MAP_64BIT;
5c9f4661 338
6d2010ae 339 pmap_pcid_configure();
13f56ec4 340 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
39037602
A
341 pmap_smep_enabled = TRUE;
342#if DEVELOPMENT || DEBUG
13f56ec4 343 boolean_t nsmep;
39037602
A
344 if (PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
345 pmap_smep_enabled = FALSE;
346 }
347#endif
348 if (pmap_smep_enabled) {
13f56ec4 349 set_cr4(get_cr4() | CR4_SMEP);
13f56ec4 350 }
39037602 351
13f56ec4 352 }
04b8595b 353 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) {
39037602
A
354 pmap_smap_enabled = TRUE;
355#if DEVELOPMENT || DEBUG
04b8595b 356 boolean_t nsmap;
39037602
A
357 if (PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) {
358 pmap_smap_enabled = FALSE;
359 }
360#endif
361 if (pmap_smap_enabled) {
04b8595b 362 set_cr4(get_cr4() | CR4_SMAP);
04b8595b
A
363 }
364 }
bd504ef0 365
5ba3f43e 366#if !MONOTONIC
bd504ef0
A
367 if (cdp->cpu_fixed_pmcs_enabled) {
368 boolean_t enable = TRUE;
369 cpu_pmc_control(&enable);
370 }
5ba3f43e 371#endif /* !MONOTONIC */
b0d623f7
A
372}
373
fe8ab488
A
374static uint32_t pmap_scale_shift(void) {
375 uint32_t scale = 0;
b0d623f7 376
fe8ab488
A
377 if (sane_size <= 8*GB) {
378 scale = (uint32_t)(sane_size / (2 * GB));
379 } else if (sane_size <= 32*GB) {
380 scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB));
381 } else {
382 scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB)));
383 }
384 return scale;
385}
b0d623f7
A
386
387/*
388 * Bootstrap the system enough to run with virtual memory.
389 * Map the kernel's code and data, and allocate the system page table.
390 * Called with mapping OFF. Page_size must already be set.
391 */
392
393void
394pmap_bootstrap(
395 __unused vm_offset_t load_start,
396 __unused boolean_t IA32e)
397{
398#if NCOPY_WINDOWS > 0
399 vm_offset_t va;
400 int i;
401#endif
b0d623f7
A
402 assert(IA32e);
403
404 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address
405 * known to VM */
406 /*
407 * The kernel's pmap is statically allocated so we don't
408 * have to use pmap_create, which is unlikely to work
409 * correctly at this part of the boot sequence.
410 */
411
412 kernel_pmap = &kernel_pmap_store;
413 kernel_pmap->ref_count = 1;
316670eb 414 kernel_pmap->nx_enabled = TRUE;
b0d623f7
A
415 kernel_pmap->pm_task_map = TASK_MAP_64BIT;
416 kernel_pmap->pm_obj = (vm_object_t) NULL;
b0d623f7 417 kernel_pmap->pm_pml4 = IdlePML4;
5c9f4661 418 kernel_pmap->pm_upml4 = IdlePML4;
b0d623f7 419 kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
5c9f4661 420 kernel_pmap->pm_ucr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
3e170ce0 421 kernel_pmap->pm_eptp = 0;
b0d623f7 422
5c9f4661 423 pmap_pcid_initialize_kernel(kernel_pmap);
b0d623f7 424
a39ff7e2 425 current_cpu_datap()->cpu_kernel_cr3 = cpu_shadowp(cpu_number())->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
b0d623f7
A
426
427 nkpt = NKPT;
428 OSAddAtomic(NKPT, &inuse_ptepages_count);
6d2010ae
A
429 OSAddAtomic64(NKPT, &alloc_ptepages_count);
430 bootstrap_wired_pages = NKPT;
b0d623f7
A
431
432 virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
433 virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
434
435#if NCOPY_WINDOWS > 0
436 /*
437 * Reserve some special page table entries/VA space for temporary
438 * mapping of pages.
439 */
440#define SYSMAP(c, p, v, n) \
441 v = (c)va; va += ((n)*INTEL_PGBYTES);
442
443 va = virtual_avail;
444
445 for (i=0; i<PMAP_NWINDOWS; i++) {
446#if 1
447 kprintf("trying to do SYSMAP idx %d %p\n", i,
448 current_cpu_datap());
449 kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap);
450 kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow);
451 kprintf("two stuff %p %p\n",
452 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
453 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR));
454#endif
455 SYSMAP(caddr_t,
456 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
457 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
458 1);
459 current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP =
460 &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store);
461 *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
462 }
463
464 /* DMAP user for debugger */
465 SYSMAP(caddr_t, DMAP1, DADDR1, 1);
466 SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */
467
468 virtual_avail = va;
469#endif
fe8ab488
A
470 if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) {
471 npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1;
b0d623f7 472
fe8ab488
A
473 }
474
475 npvhashbuckets = npvhashmask + 1;
476
477 if (0 != ((npvhashbuckets) & npvhashmask)) {
478 panic("invalid hash %d, must be ((2^N)-1), "
479 "using default %d\n", npvhashmask, NPVHASHMASK);
b0d623f7
A
480 }
481
b0d623f7
A
482 simple_lock_init(&kernel_pmap->lock, 0);
483 simple_lock_init(&pv_hashed_free_list_lock, 0);
484 simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
485 simple_lock_init(&pv_hash_table_lock,0);
fe8ab488 486 simple_lock_init(&phys_backup_lock, 0);
b0d623f7
A
487
488 pmap_cpu_init();
489
6d2010ae
A
490 if (pmap_pcid_ncpus)
491 printf("PMAP: PCID enabled\n");
492
13f56ec4
A
493 if (pmap_smep_enabled)
494 printf("PMAP: Supervisor Mode Execute Protection enabled\n");
04b8595b
A
495 if (pmap_smap_enabled)
496 printf("PMAP: Supervisor Mode Access Protection enabled\n");
7ddcb079 497
316670eb
A
498#if DEBUG
499 printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
fe8ab488 500 printf("early_random(): 0x%qx\n", early_random());
316670eb 501#endif
5ba3f43e 502#if DEVELOPMENT || DEBUG
316670eb
A
503 boolean_t ptmp;
504 /* Check if the user has requested disabling stack or heap no-execute
505 * enforcement. These are "const" variables; that qualifier is cast away
506 * when altering them. The TEXT/DATA const sections are marked
507 * write protected later in the kernel startup sequence, so altering
508 * them is possible at this point, in pmap_bootstrap().
509 */
510 if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
511 boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
512 *pdknxp = TRUE;
513 }
514
515 if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
516 boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
517 *pdknhp = TRUE;
518 }
5ba3f43e 519#endif /* DEVELOPMENT || DEBUG */
316670eb 520
6d2010ae
A
521 boot_args *args = (boot_args *)PE_state.bootArgs;
522 if (args->efiMode == kBootArgsEfiMode32) {
523 printf("EFI32: kernel virtual space limited to 4GB\n");
524 virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
525 }
b0d623f7
A
526 kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
527 (long)KERNEL_BASE, (long)virtual_end);
528 kprintf("Available physical space from 0x%llx to 0x%llx\n",
529 avail_start, avail_end);
530
531 /*
532 * The -no_shared_cr3 boot-arg is a debugging feature (set by default
533 * in the DEBUG kernel) to force the kernel to switch to its own map
534 * (and cr3) when control is in kernelspace. The kernel's map does not
535 * include (i.e. share) userspace so wild references will cause
5ba3f43e 536 * a panic. Only copyin and copyout are exempt from this.
b0d623f7
A
537 */
538 (void) PE_parse_boot_argn("-no_shared_cr3",
539 &no_shared_cr3, sizeof (no_shared_cr3));
540 if (no_shared_cr3)
541 kprintf("Kernel not sharing user map\n");
5ba3f43e 542
b0d623f7
A
543#ifdef PMAP_TRACES
544 if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
545 kprintf("Kernel traces for pmap operations enabled\n");
5ba3f43e 546 }
b0d623f7 547#endif /* PMAP_TRACES */
39037602
A
548
549#if MACH_ASSERT
550 PE_parse_boot_argn("pmap_stats_assert",
551 &pmap_stats_assert,
552 sizeof (pmap_stats_assert));
553#endif /* MACH_ASSERT */
b0d623f7
A
554}
555
556void
557pmap_virtual_space(
558 vm_offset_t *startp,
559 vm_offset_t *endp)
560{
561 *startp = virtual_avail;
562 *endp = virtual_end;
563}
564
39236c6e
A
565
566
567
568#if HIBERNATION
569
570#include <IOKit/IOHibernatePrivate.h>
571
572int32_t pmap_npages;
573int32_t pmap_teardown_last_valid_compact_indx = -1;
574
575
576void hibernate_rebuild_pmap_structs(void);
577void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
578void pmap_pack_index(uint32_t);
579int32_t pmap_unpack_index(pv_rooted_entry_t);
580
581
582int32_t
583pmap_unpack_index(pv_rooted_entry_t pv_h)
584{
585 int32_t indx = 0;
586
587 indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
588 indx = indx << 16;
589 indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
590
591 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
592 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
593
594 return (indx);
595}
596
597
598void
599pmap_pack_index(uint32_t indx)
600{
601 pv_rooted_entry_t pv_h;
602
603 pv_h = &pv_head_table[indx];
604
605 *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
606 *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
607
608 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
609 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
610}
611
612
613void
614hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
615{
616 int32_t i;
617 int32_t compact_target_indx;
618
619 compact_target_indx = 0;
620
621 for (i = 0; i < pmap_npages; i++) {
622 if (pv_head_table[i].pmap == PMAP_NULL) {
623
624 if (pv_head_table[compact_target_indx].pmap != PMAP_NULL)
625 compact_target_indx = i;
626 } else {
627 pmap_pack_index((uint32_t)i);
628
629 if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
630 /*
631 * we've got a hole to fill, so
632 * move this pv_rooted_entry_t to it's new home
633 */
634 pv_head_table[compact_target_indx] = pv_head_table[i];
635 pv_head_table[i].pmap = PMAP_NULL;
636
637 pmap_teardown_last_valid_compact_indx = compact_target_indx;
638 compact_target_indx++;
639 } else
640 pmap_teardown_last_valid_compact_indx = i;
641 }
642 }
643 *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1];
644 *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1];
645
646 HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
647}
648
649
650void
651hibernate_rebuild_pmap_structs(void)
652{
39037602 653 int32_t cindx, eindx, rindx = 0;
39236c6e
A
654 pv_rooted_entry_t pv_h;
655
656 eindx = (int32_t)pmap_npages;
657
658 for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
659
660 pv_h = &pv_head_table[cindx];
661
662 rindx = pmap_unpack_index(pv_h);
663 assert(rindx < pmap_npages);
664
665 if (rindx != cindx) {
666 /*
667 * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs,
668 * so move it back to its real location
669 */
670 pv_head_table[rindx] = pv_head_table[cindx];
671 }
672 if (rindx+1 != eindx) {
673 /*
674 * the 'hole' between this vm_rooted_entry_t and the previous
675 * vm_rooted_entry_t we moved needs to be initialized as
676 * a range of zero'd vm_rooted_entry_t's
677 */
678 bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry));
679 }
680 eindx = rindx;
681 }
682 if (rindx)
683 bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry));
684
685 HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
686}
687
688#endif
689
b0d623f7
A
690/*
691 * Initialize the pmap module.
692 * Called by vm_init, to initialize any structures that the pmap
693 * system needs to map virtual memory.
694 */
695void
696pmap_init(void)
697{
698 long npages;
699 vm_offset_t addr;
060df5ea 700 vm_size_t s, vsize;
b0d623f7
A
701 vm_map_offset_t vaddr;
702 ppnum_t ppn;
703
704
705 kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
39236c6e 706 _vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store);
b0d623f7
A
707
708 kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
39236c6e 709 _vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store);
b0d623f7
A
710
711 kernel_pmap->pm_obj = &kptobj_object_store;
39236c6e 712 _vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store);
b0d623f7
A
713
714 /*
715 * Allocate memory for the pv_head_table and its lock bits,
716 * the modify bit array, and the pte_page table.
717 */
718
719 /*
720 * zero bias all these arrays now instead of off avail_start
721 * so we cover all memory
722 */
723
724 npages = i386_btop(avail_end);
39236c6e
A
725#if HIBERNATION
726 pmap_npages = (uint32_t)npages;
727#endif
b0d623f7 728 s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
fe8ab488 729 + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets))
b0d623f7 730 + pv_lock_table_size(npages)
fe8ab488 731 + pv_hash_lock_table_size((npvhashbuckets))
b0d623f7 732 + npages);
b0d623f7
A
733 s = round_page(s);
734 if (kernel_memory_allocate(kernel_map, &addr, s, 0,
3e170ce0 735 KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP)
b0d623f7
A
736 != KERN_SUCCESS)
737 panic("pmap_init");
738
739 memset((char *)addr, 0, s);
740
060df5ea
A
741 vaddr = addr;
742 vsize = s;
743
b0d623f7 744#if PV_DEBUG
fe8ab488 745 if (0 == npvhashmask) panic("npvhashmask not initialized");
b0d623f7
A
746#endif
747
748 /*
749 * Allocate the structures first to preserve word-alignment.
750 */
751 pv_head_table = (pv_rooted_entry_t) addr;
752 addr = (vm_offset_t) (pv_head_table + npages);
753
754 pv_hash_table = (pv_hashed_entry_t *)addr;
fe8ab488 755 addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets));
b0d623f7
A
756
757 pv_lock_table = (char *) addr;
758 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
759
760 pv_hash_lock_table = (char *) addr;
fe8ab488 761 addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets)));
b0d623f7
A
762
763 pmap_phys_attributes = (char *) addr;
764
765 ppnum_t last_pn = i386_btop(avail_end);
766 unsigned int i;
767 pmap_memory_region_t *pmptr = pmap_memory_regions;
768 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
769 if (pmptr->type != kEfiConventionalMemory)
770 continue;
316670eb 771 ppnum_t pn;
b0d623f7
A
772 for (pn = pmptr->base; pn <= pmptr->end; pn++) {
773 if (pn < last_pn) {
774 pmap_phys_attributes[pn] |= PHYS_MANAGED;
060df5ea 775
b0d623f7
A
776 if (pn > last_managed_page)
777 last_managed_page = pn;
060df5ea 778
7ddcb079 779 if (pn >= lowest_hi && pn <= highest_hi)
060df5ea 780 pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
b0d623f7
A
781 }
782 }
783 }
060df5ea
A
784 while (vsize) {
785 ppn = pmap_find_phys(kernel_pmap, vaddr);
b0d623f7 786
060df5ea
A
787 pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
788
789 vaddr += PAGE_SIZE;
790 vsize -= PAGE_SIZE;
791 }
b0d623f7
A
792 /*
793 * Create the zone of physical maps,
794 * and of the physical-to-virtual entries.
795 */
796 s = (vm_size_t) sizeof(struct pmap);
797 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
060df5ea
A
798 zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
799
6d2010ae
A
800 pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors");
801 zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE);
802
6d2010ae 803 /* The anchor is required to be page aligned. Zone debugging adds
316670eb
A
804 * padding which may violate that requirement. Tell the zone
805 * subsystem that alignment is required.
6d2010ae 806 */
316670eb
A
807
808 zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
5c9f4661
A
809/* TODO: possible general optimisation...pre-allocate via zones commonly created
810 * level3/2 pagetables
811 */
812 pmap_uanchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable user anchors");
813 zone_change(pmap_uanchor_zone, Z_NOENCRYPT, TRUE);
814
815 /* The anchor is required to be page aligned. Zone debugging adds
816 * padding which may violate that requirement. Tell the zone
817 * subsystem that alignment is required.
818 */
819
820 zone_change(pmap_uanchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
6d2010ae 821
b0d623f7 822 s = (vm_size_t) sizeof(struct pv_hashed_entry);
6d2010ae
A
823 pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
824 4096 * 3 /* LCM x86_64*/, "pv_list");
060df5ea 825 zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
5ba3f43e 826 zone_change(pv_hashed_list_zone, Z_GZALLOC_EXEMPT, TRUE);
b0d623f7
A
827
828 /* create pv entries for kernel pages mapped by low level
829 startup code. these have to exist so we can pmap_remove()
830 e.g. kext pages from the middle of our addr space */
831
832 vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS;
6d2010ae 833 for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) {
b0d623f7
A
834 pv_rooted_entry_t pv_e;
835
836 pv_e = pai_to_pvh(ppn);
39037602 837 pv_e->va_and_flags = vaddr;
b0d623f7
A
838 vaddr += PAGE_SIZE;
839 pv_e->pmap = kernel_pmap;
840 queue_init(&pv_e->qlink);
841 }
842 pmap_initialized = TRUE;
843
b0d623f7
A
844 max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
845
846 /*
847 * Ensure the kernel's PML4 entry exists for the basement
848 * before this is shared with any user.
849 */
316670eb 850 pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
3e170ce0
A
851
852#if CONFIG_VMX
853 pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
7e41aa88 854 pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0);
3e170ce0 855#endif /* CONFIG_VMX */
316670eb
A
856}
857
858static
859void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) {
860 uint64_t ev = sv + nxrosz, cv = sv;
861 pd_entry_t *pdep;
862 pt_entry_t *ptep = NULL;
863
3e170ce0
A
864 assert(!is_ept_pmap(npmap));
865
316670eb
A
866 assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
867
868 for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
869 uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
870
871 if (*pdep & INTEL_PTE_PS) {
872 if (NX)
873 *pdep |= INTEL_PTE_NX;
874 if (ro)
875 *pdep &= ~INTEL_PTE_WRITE;
876 cv += NBPD;
877 cv &= ~((uint64_t) PDEMASK);
878 pdep = pmap_pde(npmap, cv);
879 continue;
880 }
881
882 for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
883 if (NX)
884 *ptep |= INTEL_PTE_NX;
885 if (ro)
886 *ptep &= ~INTEL_PTE_WRITE;
887 cv += NBPT;
888 ptep = pmap_pte(npmap, cv);
889 }
890 }
891 DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
b0d623f7
A
892}
893
6d2010ae
A
894/*
895 * Called once VM is fully initialized so that we can release unused
896 * sections of low memory to the general pool.
897 * Also complete the set-up of identity-mapped sections of the kernel:
898 * 1) write-protect kernel text
899 * 2) map kernel text using large pages if possible
900 * 3) read and write-protect page zero (for K32)
901 * 4) map the global page at the appropriate virtual address.
902 *
903 * Use of large pages
904 * ------------------
905 * To effectively map and write-protect all kernel text pages, the text
906 * must be 2M-aligned at the base, and the data section above must also be
907 * 2M-aligned. That is, there's padding below and above. This is achieved
908 * through linker directives. Large pages are used only if this alignment
909 * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
910 * memory layout is:
911 *
912 * : :
913 * | __DATA |
914 * sdata: ================== 2Meg
915 * | |
916 * | zero-padding |
917 * | |
918 * etext: ------------------
919 * | |
920 * : :
921 * | |
922 * | __TEXT |
923 * | |
924 * : :
925 * | |
926 * stext: ================== 2Meg
927 * | |
928 * | zero-padding |
929 * | |
930 * eHIB: ------------------
931 * | __HIB |
932 * : :
933 *
934 * Prior to changing the mapping from 4K to 2M, the zero-padding pages
935 * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
936 * 4K pages covering [stext,etext] are coalesced as 2M large pages.
937 * The now unused level-1 PTE pages are also freed.
938 */
316670eb 939extern ppnum_t vm_kernel_base_page;
39037602
A
940static uint32_t constptes = 0, dataptes = 0;
941
942void pmap_lowmem_finalize(void) {
6d2010ae
A
943 spl_t spl;
944 int i;
945
6d2010ae
A
946 /*
947 * Update wired memory statistics for early boot pages
948 */
316670eb 949 PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
6d2010ae
A
950
951 /*
316670eb 952 * Free pages in pmap regions below the base:
6d2010ae
A
953 * rdar://6332712
954 * We can't free all the pages to VM that EFI reports available.
955 * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
956 * There's also a size miscalculation here: pend is one page less
957 * than it should be but this is not fixed to be backwards
958 * compatible.
316670eb
A
959 * This is important for KASLR because up to 256*2MB = 512MB of space
960 * needs has to be released to VM.
6d2010ae
A
961 */
962 for (i = 0;
316670eb 963 pmap_memory_regions[i].end < vm_kernel_base_page;
6d2010ae 964 i++) {
316670eb
A
965 vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base);
966 vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
6d2010ae 967
316670eb
A
968 DBG("pmap region %d [%p..[%p\n",
969 i, (void *) pbase, (void *) pend);
970
971 if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED)
972 continue;
973 /*
974 * rdar://6332712
975 * Adjust limits not to free pages in range 0xc0000-0xff000.
976 */
977 if (pbase >= 0xc0000 && pend <= 0x100000)
978 continue;
979 if (pbase < 0xc0000 && pend > 0x100000) {
980 /* page range entirely within region, free lower part */
981 DBG("- ml_static_mfree(%p,%p)\n",
982 (void *) ml_static_ptovirt(pbase),
983 (void *) (0xc0000-pbase));
984 ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase);
985 pbase = 0x100000;
986 }
987 if (pbase < 0xc0000)
988 pend = MIN(pend, 0xc0000);
989 if (pend > 0x100000)
990 pbase = MAX(pbase, 0x100000);
991 DBG("- ml_static_mfree(%p,%p)\n",
6d2010ae 992 (void *) ml_static_ptovirt(pbase),
316670eb 993 (void *) (pend - pbase));
6d2010ae
A
994 ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
995 }
996
316670eb
A
997 /* A final pass to get rid of all initial identity mappings to
998 * low pages.
999 */
1000 DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
1001
143464d5
A
1002 /*
1003 * Remove all mappings past the boot-cpu descriptor aliases and low globals.
1004 * Non-boot-cpu GDT aliases will be remapped later as needed.
1005 */
316670eb
A
1006 pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
1007
6d2010ae
A
1008 /*
1009 * If text and data are both 2MB-aligned,
1010 * we can map text with large-pages,
1011 * unless the -kernel_text_ps_4K boot-arg overrides.
1012 */
1013 if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
1014 kprintf("Kernel text is 2MB aligned");
1015 kernel_text_ps_4K = FALSE;
1016 if (PE_parse_boot_argn("-kernel_text_ps_4K",
1017 &kernel_text_ps_4K,
1018 sizeof (kernel_text_ps_4K)))
1019 kprintf(" but will be mapped with 4K pages\n");
1020 else
1021 kprintf(" and will be mapped with 2M pages\n");
1022 }
1023
1024 (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
1025 if (wpkernel)
1026 kprintf("Kernel text %p-%p to be write-protected\n",
1027 (void *) stext, (void *) etext);
1028
1029 spl = splhigh();
1030
1031 /*
1032 * Scan over text if mappings are to be changed:
1033 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
1034 * - Change to large-pages if possible and not overriden.
1035 */
1036 if (kernel_text_ps_4K && wpkernel) {
1037 vm_offset_t myva;
1038 for (myva = stext; myva < etext; myva += PAGE_SIZE) {
1039 pt_entry_t *ptep;
1040
1041 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
1042 if (ptep)
316670eb 1043 pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE);
6d2010ae
A
1044 }
1045 }
1046
1047 if (!kernel_text_ps_4K) {
1048 vm_offset_t myva;
1049
1050 /*
1051 * Release zero-filled page padding used for 2M-alignment.
1052 */
1053 DBG("ml_static_mfree(%p,%p) for padding below text\n",
1054 (void *) eHIB, (void *) (stext - eHIB));
1055 ml_static_mfree(eHIB, stext - eHIB);
1056 DBG("ml_static_mfree(%p,%p) for padding above text\n",
1057 (void *) etext, (void *) (sdata - etext));
1058 ml_static_mfree(etext, sdata - etext);
1059
1060 /*
1061 * Coalesce text pages into large pages.
1062 */
1063 for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
1064 pt_entry_t *ptep;
1065 vm_offset_t pte_phys;
1066 pt_entry_t *pdep;
1067 pt_entry_t pde;
1068
1069 pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
1070 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
1071 DBG("myva: %p pdep: %p ptep: %p\n",
1072 (void *) myva, (void *) pdep, (void *) ptep);
1073 if ((*ptep & INTEL_PTE_VALID) == 0)
1074 continue;
1075 pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
1076 pde = *pdep & PTMASK; /* page attributes from pde */
1077 pde |= INTEL_PTE_PS; /* make it a 2M entry */
1078 pde |= pte_phys; /* take page frame from pte */
1079
1080 if (wpkernel)
316670eb 1081 pde &= ~INTEL_PTE_WRITE;
6d2010ae
A
1082 DBG("pmap_store_pte(%p,0x%llx)\n",
1083 (void *)pdep, pde);
1084 pmap_store_pte(pdep, pde);
1085
1086 /*
1087 * Free the now-unused level-1 pte.
1088 * Note: ptep is a virtual address to the pte in the
1089 * recursive map. We can't use this address to free
1090 * the page. Instead we need to compute its address
1091 * in the Idle PTEs in "low memory".
1092 */
1093 vm_offset_t vm_ptep = (vm_offset_t) KPTphys
1094 + (pte_phys >> PTPGSHIFT);
1095 DBG("ml_static_mfree(%p,0x%x) for pte\n",
1096 (void *) vm_ptep, PAGE_SIZE);
1097 ml_static_mfree(vm_ptep, PAGE_SIZE);
1098 }
1099
1100 /* Change variable read by sysctl machdep.pmap */
1101 pmap_kernel_text_ps = I386_LPGBYTES;
1102 }
1103
316670eb 1104 boolean_t doconstro = TRUE;
39037602 1105#if DEVELOPMENT || DEBUG
316670eb 1106 (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
39037602
A
1107#endif
1108 if (doconstro) {
1109 if (sconst & PAGE_MASK) {
1110 panic("CONST segment misaligned 0x%lx 0x%lx\n",
1111 sconst, econst);
1112 }
316670eb 1113 kprintf("Marking const DATA read-only\n");
39037602
A
1114 }
1115
316670eb
A
1116 vm_offset_t dva;
1117
1118 for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
1119 assert(((sdata | edata) & PAGE_MASK) == 0);
39037602 1120 pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
316670eb 1121
39037602
A
1122 dpte = *dptep;
1123 assert((dpte & INTEL_PTE_VALID));
1124 dpte |= INTEL_PTE_NX;
1125 pmap_store_pte(dptep, dpte);
1126 dataptes++;
1127 }
1128 assert(dataptes > 0);
1129
1130 for (dva = sconst; dva < econst; dva += I386_PGBYTES) {
316670eb
A
1131 pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
1132
1133 dpte = *dptep;
1134
1135 assert((dpte & INTEL_PTE_VALID));
316670eb 1136 dpte |= INTEL_PTE_NX;
39037602
A
1137 dpte &= ~INTEL_PTE_WRITE;
1138 constptes++;
316670eb
A
1139 pmap_store_pte(dptep, dpte);
1140 }
39037602
A
1141
1142 assert(constptes > 0);
1143
316670eb
A
1144 kernel_segment_command_t * seg;
1145 kernel_section_t * sec;
1146
1147 for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
1148 if (!strcmp(seg->segname, "__TEXT") ||
1149 !strcmp(seg->segname, "__DATA")) {
1150 continue;
1151 }
1152 //XXX
1153 if (!strcmp(seg->segname, "__KLD")) {
1154 continue;
1155 }
1156 if (!strcmp(seg->segname, "__HIB")) {
1157 for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
1158 if (sec->addr & PAGE_MASK)
1159 panic("__HIB segment's sections misaligned");
1160 if (!strcmp(sec->sectname, "__text")) {
1161 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
1162 } else {
1163 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
1164 }
1165 }
1166 } else {
1167 pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
1168 }
1169 }
1170
1171 /*
1172 * If we're debugging, map the low global vector page at the fixed
1173 * virtual address. Otherwise, remove the mapping for this.
1174 */
1175 if (debug_boot_arg) {
1176 pt_entry_t *pte = NULL;
1177 if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS)))
1178 panic("lowmem pte");
1179 /* make sure it is defined on page boundary */
1180 assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
1181 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
1182 | INTEL_PTE_REF
1183 | INTEL_PTE_MOD
1184 | INTEL_PTE_WIRED
1185 | INTEL_PTE_VALID
1186 | INTEL_PTE_WRITE
1187 | INTEL_PTE_NX);
1188 } else {
1189 pmap_remove(kernel_pmap,
1190 LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
1191 }
1192
6d2010ae
A
1193 splx(spl);
1194 if (pmap_pcid_ncpus)
1195 tlb_flush_global();
1196 else
1197 flush_tlb_raw();
1198}
b0d623f7
A
1199
1200/*
1201 * this function is only used for debugging fron the vm layer
1202 */
1203boolean_t
1204pmap_verify_free(
1205 ppnum_t pn)
1206{
1207 pv_rooted_entry_t pv_h;
1208 int pai;
1209 boolean_t result;
1210
1211 assert(pn != vm_page_fictitious_addr);
1212
1213 if (!pmap_initialized)
1214 return(TRUE);
1215
1216 if (pn == vm_page_guard_addr)
1217 return TRUE;
1218
1219 pai = ppn_to_pai(pn);
1220 if (!IS_MANAGED_PAGE(pai))
1221 return(FALSE);
1222 pv_h = pai_to_pvh(pn);
1223 result = (pv_h->pmap == PMAP_NULL);
1224 return(result);
1225}
1226
1227boolean_t
1228pmap_is_empty(
1229 pmap_t pmap,
1230 vm_map_offset_t va_start,
1231 vm_map_offset_t va_end)
1232{
1233 vm_map_offset_t offset;
1234 ppnum_t phys_page;
1235
1236 if (pmap == PMAP_NULL) {
1237 return TRUE;
1238 }
1239
1240 /*
1241 * Check the resident page count
1242 * - if it's zero, the pmap is completely empty.
1243 * This short-circuit test prevents a virtual address scan which is
1244 * painfully slow for 64-bit spaces.
1245 * This assumes the count is correct
1246 * .. the debug kernel ought to be checking perhaps by page table walk.
1247 */
1248 if (pmap->stats.resident_count == 0)
1249 return TRUE;
1250
1251 for (offset = va_start;
1252 offset < va_end;
1253 offset += PAGE_SIZE_64) {
1254 phys_page = pmap_find_phys(pmap, offset);
1255 if (phys_page) {
1256 kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1257 "page %d at 0x%llx\n",
1258 pmap, va_start, va_end, phys_page, offset);
1259 return FALSE;
1260 }
1261 }
1262
1263 return TRUE;
1264}
1265
3e170ce0
A
1266void
1267hv_ept_pmap_create(void **ept_pmap, void **eptp)
1268{
1269 pmap_t p;
1270
1271 if ((ept_pmap == NULL) || (eptp == NULL)) {
1272 return;
1273 }
1274
1275 p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT));
1276 if (p == PMAP_NULL) {
1277 *ept_pmap = NULL;
1278 *eptp = NULL;
1279 return;
1280 }
1281
1282 assert(is_ept_pmap(p));
1283
1284 *ept_pmap = (void*)p;
1285 *eptp = (void*)(p->pm_eptp);
1286 return;
1287}
b0d623f7
A
1288
1289/*
1290 * Create and return a physical map.
1291 *
1292 * If the size specified for the map
1293 * is zero, the map is an actual physical
1294 * map, and may be referenced by the
1295 * hardware.
1296 *
1297 * If the size specified is non-zero,
1298 * the map will be used in software only, and
1299 * is bounded by that size.
1300 */
5c9f4661 1301
b0d623f7 1302pmap_t
3e170ce0
A
1303pmap_create_options(
1304 ledger_t ledger,
1305 vm_map_size_t sz,
1306 int flags)
b0d623f7
A
1307{
1308 pmap_t p;
1309 vm_size_t size;
1310 pml4_entry_t *pml4;
1311 pml4_entry_t *kpml4;
1312
5ba3f43e 1313 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, sz, flags);
b0d623f7
A
1314
1315 size = (vm_size_t) sz;
1316
1317 /*
1318 * A software use-only map doesn't even need a map.
1319 */
1320
1321 if (size != 0) {
1322 return(PMAP_NULL);
1323 }
1324
3e170ce0
A
1325 /*
1326 * Return error when unrecognized flags are passed.
1327 */
5c9f4661 1328 if (__improbable((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0)) {
3e170ce0
A
1329 return(PMAP_NULL);
1330 }
1331
b0d623f7
A
1332 p = (pmap_t) zalloc(pmap_zone);
1333 if (PMAP_NULL == p)
1334 panic("pmap_create zalloc");
5c9f4661 1335
6d2010ae
A
1336 /* Zero all fields */
1337 bzero(p, sizeof(*p));
b0d623f7
A
1338 /* init counts now since we'll be bumping some */
1339 simple_lock_init(&p->lock, 0);
39236c6e 1340 bzero(&p->stats, sizeof (p->stats));
5c9f4661 1341
b0d623f7
A
1342 p->ref_count = 1;
1343 p->nx_enabled = 1;
1344 p->pm_shared = FALSE;
316670eb
A
1345 ledger_reference(ledger);
1346 p->ledger = ledger;
b0d623f7 1347
3e170ce0 1348 p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT);
39037602
A
1349
1350 p->pagezero_accessible = FALSE;
1351
1352 if (pmap_pcid_ncpus) {
6d2010ae 1353 pmap_pcid_initialize(p);
39037602 1354 }
316670eb 1355
6d2010ae 1356 p->pm_pml4 = zalloc(pmap_anchor_zone);
5c9f4661 1357 p->pm_upml4 = zalloc(pmap_uanchor_zone); //cleanup for EPT
b0d623f7 1358
6d2010ae 1359 pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
5c9f4661 1360 pmap_assert((((uintptr_t)p->pm_upml4) & PAGE_MASK) == 0);
b0d623f7 1361
6d2010ae 1362 memset((char *)p->pm_pml4, 0, PAGE_SIZE);
5c9f4661 1363 memset((char *)p->pm_upml4, 0, PAGE_SIZE);
b0d623f7 1364
3e170ce0 1365 if (flags & PMAP_CREATE_EPT) {
7e41aa88 1366 p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4) | pmap_eptp_flags;
3e170ce0
A
1367 p->pm_cr3 = 0;
1368 } else {
1369 p->pm_eptp = 0;
1370 p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
5c9f4661 1371 p->pm_ucr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_upml4);
3e170ce0 1372 }
b0d623f7
A
1373
1374 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1375
39236c6e 1376 p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE);
b0d623f7
A
1377 if (NULL == p->pm_obj_pml4)
1378 panic("pmap_create pdpt obj");
1379
39236c6e 1380 p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE);
b0d623f7
A
1381 if (NULL == p->pm_obj_pdpt)
1382 panic("pmap_create pdpt obj");
1383
39236c6e 1384 p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE);
b0d623f7
A
1385 if (NULL == p->pm_obj)
1386 panic("pmap_create pte obj");
1387
490019cf
A
1388 if (!(flags & PMAP_CREATE_EPT)) {
1389 /* All host pmaps share the kernel's pml4 */
1390 pml4 = pmap64_pml4(p, 0ULL);
1391 kpml4 = kernel_pmap->pm_pml4;
1392 pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX];
1393 pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX];
1394 pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
5c9f4661 1395 pml4[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
5ba3f43e
A
1396#if KASAN
1397 pml4[KERNEL_KASAN_PML4_INDEX0] = kpml4[KERNEL_KASAN_PML4_INDEX0];
1398 pml4[KERNEL_KASAN_PML4_INDEX1] = kpml4[KERNEL_KASAN_PML4_INDEX1];
1399#endif
5c9f4661
A
1400 pml4_entry_t *pml4u = pmap64_user_pml4(p, 0ULL);
1401 pml4u[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX];
490019cf 1402 }
b0d623f7 1403
39037602 1404#if MACH_ASSERT
d9a64523 1405 p->pmap_stats_assert = TRUE;
39037602
A
1406 p->pmap_pid = 0;
1407 strlcpy(p->pmap_procname, "<nil>", sizeof (p->pmap_procname));
1408#endif /* MACH_ASSERT */
1409
5ba3f43e
A
1410 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_END,
1411 VM_KERNEL_ADDRHIDE(p));
b0d623f7
A
1412
1413 return(p);
1414}
1415
3e170ce0
A
1416pmap_t
1417pmap_create(
1418 ledger_t ledger,
1419 vm_map_size_t sz,
1420 boolean_t is_64bit)
1421{
1422 return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0));
1423}
1424
39037602
A
1425/*
1426 * We maintain stats and ledgers so that a task's physical footprint is:
1427 * phys_footprint = ((internal - alternate_accounting)
1428 * + (internal_compressed - alternate_accounting_compressed)
1429 * + iokit_mapped
1430 * + purgeable_nonvolatile
1431 * + purgeable_nonvolatile_compressed
1432 * + page_table)
1433 * where "alternate_accounting" includes "iokit" and "purgeable" memory.
1434 */
1435
1436#if MACH_ASSERT
1437struct {
1438 uint64_t num_pmaps_checked;
1439
1440 int phys_footprint_over;
1441 ledger_amount_t phys_footprint_over_total;
1442 ledger_amount_t phys_footprint_over_max;
1443 int phys_footprint_under;
1444 ledger_amount_t phys_footprint_under_total;
1445 ledger_amount_t phys_footprint_under_max;
1446
1447 int internal_over;
1448 ledger_amount_t internal_over_total;
1449 ledger_amount_t internal_over_max;
1450 int internal_under;
1451 ledger_amount_t internal_under_total;
1452 ledger_amount_t internal_under_max;
1453
1454 int internal_compressed_over;
1455 ledger_amount_t internal_compressed_over_total;
1456 ledger_amount_t internal_compressed_over_max;
1457 int internal_compressed_under;
1458 ledger_amount_t internal_compressed_under_total;
1459 ledger_amount_t internal_compressed_under_max;
1460
1461 int iokit_mapped_over;
1462 ledger_amount_t iokit_mapped_over_total;
1463 ledger_amount_t iokit_mapped_over_max;
1464 int iokit_mapped_under;
1465 ledger_amount_t iokit_mapped_under_total;
1466 ledger_amount_t iokit_mapped_under_max;
1467
1468 int alternate_accounting_over;
1469 ledger_amount_t alternate_accounting_over_total;
1470 ledger_amount_t alternate_accounting_over_max;
1471 int alternate_accounting_under;
1472 ledger_amount_t alternate_accounting_under_total;
1473 ledger_amount_t alternate_accounting_under_max;
1474
1475 int alternate_accounting_compressed_over;
1476 ledger_amount_t alternate_accounting_compressed_over_total;
1477 ledger_amount_t alternate_accounting_compressed_over_max;
1478 int alternate_accounting_compressed_under;
1479 ledger_amount_t alternate_accounting_compressed_under_total;
1480 ledger_amount_t alternate_accounting_compressed_under_max;
1481
1482 int page_table_over;
1483 ledger_amount_t page_table_over_total;
1484 ledger_amount_t page_table_over_max;
1485 int page_table_under;
1486 ledger_amount_t page_table_under_total;
1487 ledger_amount_t page_table_under_max;
1488
1489 int purgeable_volatile_over;
1490 ledger_amount_t purgeable_volatile_over_total;
1491 ledger_amount_t purgeable_volatile_over_max;
1492 int purgeable_volatile_under;
1493 ledger_amount_t purgeable_volatile_under_total;
1494 ledger_amount_t purgeable_volatile_under_max;
1495
1496 int purgeable_nonvolatile_over;
1497 ledger_amount_t purgeable_nonvolatile_over_total;
1498 ledger_amount_t purgeable_nonvolatile_over_max;
1499 int purgeable_nonvolatile_under;
1500 ledger_amount_t purgeable_nonvolatile_under_total;
1501 ledger_amount_t purgeable_nonvolatile_under_max;
1502
1503 int purgeable_volatile_compressed_over;
1504 ledger_amount_t purgeable_volatile_compressed_over_total;
1505 ledger_amount_t purgeable_volatile_compressed_over_max;
1506 int purgeable_volatile_compressed_under;
1507 ledger_amount_t purgeable_volatile_compressed_under_total;
1508 ledger_amount_t purgeable_volatile_compressed_under_max;
1509
1510 int purgeable_nonvolatile_compressed_over;
1511 ledger_amount_t purgeable_nonvolatile_compressed_over_total;
1512 ledger_amount_t purgeable_nonvolatile_compressed_over_max;
1513 int purgeable_nonvolatile_compressed_under;
1514 ledger_amount_t purgeable_nonvolatile_compressed_under_total;
1515 ledger_amount_t purgeable_nonvolatile_compressed_under_max;
d9a64523
A
1516
1517 int network_volatile_over;
1518 ledger_amount_t network_volatile_over_total;
1519 ledger_amount_t network_volatile_over_max;
1520 int network_volatile_under;
1521 ledger_amount_t network_volatile_under_total;
1522 ledger_amount_t network_volatile_under_max;
1523
1524 int network_nonvolatile_over;
1525 ledger_amount_t network_nonvolatile_over_total;
1526 ledger_amount_t network_nonvolatile_over_max;
1527 int network_nonvolatile_under;
1528 ledger_amount_t network_nonvolatile_under_total;
1529 ledger_amount_t network_nonvolatile_under_max;
1530
1531 int network_volatile_compressed_over;
1532 ledger_amount_t network_volatile_compressed_over_total;
1533 ledger_amount_t network_volatile_compressed_over_max;
1534 int network_volatile_compressed_under;
1535 ledger_amount_t network_volatile_compressed_under_total;
1536 ledger_amount_t network_volatile_compressed_under_max;
1537
1538 int network_nonvolatile_compressed_over;
1539 ledger_amount_t network_nonvolatile_compressed_over_total;
1540 ledger_amount_t network_nonvolatile_compressed_over_max;
1541 int network_nonvolatile_compressed_under;
1542 ledger_amount_t network_nonvolatile_compressed_under_total;
1543 ledger_amount_t network_nonvolatile_compressed_under_max;
39037602
A
1544} pmap_ledgers_drift;
1545static void pmap_check_ledgers(pmap_t pmap);
1546#else /* MACH_ASSERT */
1547static inline void pmap_check_ledgers(__unused pmap_t pmap) {}
1548#endif /* MACH_ASSERT */
1549
b0d623f7
A
1550/*
1551 * Retire the given physical map from service.
1552 * Should only be called if the map contains
1553 * no valid mappings.
1554 */
3e170ce0 1555extern int vm_wired_objects_page_count;
b0d623f7
A
1556
1557void
6d2010ae 1558pmap_destroy(pmap_t p)
b0d623f7 1559{
6d2010ae 1560 int c;
b0d623f7
A
1561
1562 if (p == PMAP_NULL)
1563 return;
1564
1565 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
5ba3f43e 1566 VM_KERNEL_ADDRHIDe(p));
b0d623f7
A
1567
1568 PMAP_LOCK(p);
1569
1570 c = --p->ref_count;
1571
6d2010ae
A
1572 pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
1573
b0d623f7
A
1574 if (c == 0) {
1575 /*
1576 * If some cpu is not using the physical pmap pointer that it
1577 * is supposed to be (see set_dirbase), we might be using the
1578 * pmap that is being destroyed! Make sure we are
1579 * physically on the right pmap:
1580 */
1581 PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
ebb1b9f4
A
1582 if (pmap_pcid_ncpus)
1583 pmap_destroy_pcid_sync(p);
b0d623f7 1584 }
ebb1b9f4 1585
b0d623f7
A
1586 PMAP_UNLOCK(p);
1587
1588 if (c != 0) {
5ba3f43e 1589 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
6d2010ae 1590 pmap_assert(p == kernel_pmap);
b0d623f7
A
1591 return; /* still in use */
1592 }
1593
1594 /*
1595 * Free the memory maps, then the
1596 * pmap structure.
1597 */
1598 int inuse_ptepages = 0;
1599
6d2010ae 1600 zfree(pmap_anchor_zone, p->pm_pml4);
5c9f4661 1601 zfree(pmap_uanchor_zone, p->pm_upml4);
b0d623f7
A
1602
1603 inuse_ptepages += p->pm_obj_pml4->resident_page_count;
1604 vm_object_deallocate(p->pm_obj_pml4);
1605
1606 inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
1607 vm_object_deallocate(p->pm_obj_pdpt);
1608
1609 inuse_ptepages += p->pm_obj->resident_page_count;
1610 vm_object_deallocate(p->pm_obj);
1611
1612 OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count);
316670eb 1613 PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
39037602
A
1614
1615 pmap_check_ledgers(p);
316670eb 1616 ledger_dereference(p->ledger);
b0d623f7
A
1617 zfree(pmap_zone, p);
1618
5ba3f43e 1619 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
b0d623f7
A
1620}
1621
1622/*
1623 * Add a reference to the specified pmap.
1624 */
1625
1626void
1627pmap_reference(pmap_t p)
1628{
1629 if (p != PMAP_NULL) {
1630 PMAP_LOCK(p);
1631 p->ref_count++;
1632 PMAP_UNLOCK(p);;
1633 }
1634}
1635
b0d623f7
A
1636/*
1637 * Remove phys addr if mapped in specified map
1638 *
1639 */
1640void
1641pmap_remove_some_phys(
1642 __unused pmap_t map,
1643 __unused ppnum_t pn)
1644{
1645
1646/* Implement to support working set code */
1647
1648}
1649
39236c6e
A
1650
1651void
1652pmap_protect(
1653 pmap_t map,
1654 vm_map_offset_t sva,
1655 vm_map_offset_t eva,
1656 vm_prot_t prot)
1657{
1658 pmap_protect_options(map, sva, eva, prot, 0, NULL);
1659}
1660
1661
b0d623f7
A
1662/*
1663 * Set the physical protection on the
1664 * specified range of this map as requested.
d9a64523
A
1665 *
1666 * VERY IMPORTANT: Will *NOT* increase permissions.
1667 * pmap_protect_options() should protect the range against any access types
1668 * that are not in "prot" but it should never grant extra access.
1669 * For example, if "prot" is READ|EXECUTE, that means "remove write
1670 * access" but it does *not* mean "add read and execute" access.
1671 * VM relies on getting soft-faults to enforce extra checks (code
1672 * signing, for example), for example.
1673 * New access permissions are granted via pmap_enter() only.
b0d623f7
A
1674 */
1675void
39236c6e 1676pmap_protect_options(
b0d623f7
A
1677 pmap_t map,
1678 vm_map_offset_t sva,
1679 vm_map_offset_t eva,
39236c6e
A
1680 vm_prot_t prot,
1681 unsigned int options,
1682 void *arg)
b0d623f7
A
1683{
1684 pt_entry_t *pde;
1685 pt_entry_t *spte, *epte;
1686 vm_map_offset_t lva;
1687 vm_map_offset_t orig_sva;
1688 boolean_t set_NX;
1689 int num_found = 0;
3e170ce0 1690 boolean_t is_ept;
b0d623f7
A
1691
1692 pmap_intr_assert();
1693
1694 if (map == PMAP_NULL)
1695 return;
1696
1697 if (prot == VM_PROT_NONE) {
39236c6e 1698 pmap_remove_options(map, sva, eva, options);
b0d623f7
A
1699 return;
1700 }
5ba3f43e 1701
b0d623f7 1702 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
5ba3f43e
A
1703 VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(sva),
1704 VM_KERNEL_ADDRHIDE(eva));
b0d623f7
A
1705
1706 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled)
1707 set_NX = FALSE;
1708 else
1709 set_NX = TRUE;
1710
3e170ce0
A
1711 is_ept = is_ept_pmap(map);
1712
1713
b0d623f7
A
1714 PMAP_LOCK(map);
1715
1716 orig_sva = sva;
1717 while (sva < eva) {
1718 lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1);
1719 if (lva > eva)
1720 lva = eva;
1721 pde = pmap_pde(map, sva);
3e170ce0
A
1722 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
1723 if (*pde & PTE_PS) {
b0d623f7
A
1724 /* superpage */
1725 spte = pde;
1726 epte = spte+1; /* excluded */
1727 } else {
1728 spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1)));
1729 spte = &spte[ptenum(sva)];
1730 epte = &spte[intel_btop(lva - sva)];
1731 }
1732
1733 for (; spte < epte; spte++) {
3e170ce0 1734 if (!(*spte & PTE_VALID_MASK(is_ept)))
b0d623f7
A
1735 continue;
1736
3e170ce0 1737 if (is_ept) {
d9a64523 1738 if (! (prot & VM_PROT_READ)) {
3e170ce0 1739 pmap_update_pte(spte, PTE_READ(is_ept), 0);
d9a64523 1740 }
3e170ce0 1741 }
d9a64523 1742 if (! (prot & VM_PROT_WRITE)) {
3e170ce0 1743 pmap_update_pte(spte, PTE_WRITE(is_ept), 0);
d9a64523
A
1744 }
1745#if DEVELOPMENT || DEBUG
1746 else if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) &&
1747 map == kernel_pmap) {
1748 pmap_update_pte(spte, 0, PTE_WRITE(is_ept));
1749 }
1750#endif /* DEVELOPMENT || DEBUG */
b0d623f7 1751
3e170ce0 1752 if (set_NX) {
d9a64523 1753 if (!is_ept) {
3e170ce0 1754 pmap_update_pte(spte, 0, INTEL_PTE_NX);
d9a64523 1755 } else {
3e170ce0 1756 pmap_update_pte(spte, INTEL_EPT_EX, 0);
d9a64523 1757 }
3e170ce0 1758 }
b0d623f7
A
1759 num_found++;
1760 }
1761 }
1762 sva = lva;
1763 }
39236c6e
A
1764 if (num_found) {
1765 if (options & PMAP_OPTIONS_NOFLUSH)
1766 PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg);
1767 else
1768 PMAP_UPDATE_TLBS(map, orig_sva, eva);
1769 }
b0d623f7
A
1770 PMAP_UNLOCK(map);
1771
5ba3f43e 1772 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END);
b0d623f7
A
1773
1774}
1775
1776/* Map a (possibly) autogenned block */
5ba3f43e 1777kern_return_t
b0d623f7 1778pmap_map_block(
5ba3f43e 1779 pmap_t pmap,
b0d623f7
A
1780 addr64_t va,
1781 ppnum_t pa,
1782 uint32_t size,
1783 vm_prot_t prot,
1784 int attr,
1785 __unused unsigned int flags)
1786{
5ba3f43e
A
1787 kern_return_t kr;
1788 addr64_t original_va = va;
b0d623f7
A
1789 uint32_t page;
1790 int cur_page_size;
1791
1792 if (attr & VM_MEM_SUPERPAGE)
1793 cur_page_size = SUPERPAGE_SIZE;
5ba3f43e 1794 else
b0d623f7
A
1795 cur_page_size = PAGE_SIZE;
1796
1797 for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
5ba3f43e
A
1798 kr = pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
1799
1800 if (kr != KERN_SUCCESS) {
1801 /*
1802 * This will panic for now, as it is unclear that
1803 * removing the mappings is correct.
1804 */
1805 panic("%s: failed pmap_enter, "
1806 "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x",
1807 __FUNCTION__,
1808 pmap, va, pa, size, prot, flags);
1809
1810 pmap_remove(pmap, original_va, va - original_va);
1811 return kr;
1812 }
1813
b0d623f7
A
1814 va += cur_page_size;
1815 pa+=cur_page_size/PAGE_SIZE;
1816 }
5ba3f43e
A
1817
1818 return KERN_SUCCESS;
b0d623f7
A
1819}
1820
316670eb 1821kern_return_t
b0d623f7
A
1822pmap_expand_pml4(
1823 pmap_t map,
316670eb
A
1824 vm_map_offset_t vaddr,
1825 unsigned int options)
b0d623f7
A
1826{
1827 vm_page_t m;
1828 pmap_paddr_t pa;
1829 uint64_t i;
1830 ppnum_t pn;
1831 pml4_entry_t *pml4p;
3e170ce0 1832 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
1833
1834 DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
1835
5c9f4661
A
1836 /* With the exception of the kext "basement", the kernel's level 4
1837 * pagetables must not be dynamically expanded.
1838 */
1839 assert(map != kernel_pmap || (vaddr == KERNEL_BASEMENT));
b0d623f7
A
1840 /*
1841 * Allocate a VM page for the pml4 page
1842 */
316670eb
A
1843 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1844 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1845 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1846 VM_PAGE_WAIT();
316670eb 1847 }
b0d623f7
A
1848 /*
1849 * put the page into the pmap's obj list so it
1850 * can be found later.
1851 */
39037602 1852 pn = VM_PAGE_GET_PHYS_PAGE(m);
b0d623f7
A
1853 pa = i386_ptob(pn);
1854 i = pml4idx(map, vaddr);
1855
1856 /*
1857 * Zero the page.
1858 */
1859 pmap_zero_page(pn);
1860
1861 vm_page_lockspin_queues();
3e170ce0 1862 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
1863 vm_page_unlock_queues();
1864
1865 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1866 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1867 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1868
1869 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1870 vm_object_lock(map->pm_obj_pml4);
1871
1872 PMAP_LOCK(map);
1873 /*
1874 * See if someone else expanded us first
1875 */
1876 if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
1877 PMAP_UNLOCK(map);
1878 vm_object_unlock(map->pm_obj_pml4);
1879
1880 VM_PAGE_FREE(m);
1881
1882 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1883 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1884 return KERN_SUCCESS;
b0d623f7
A
1885 }
1886
1887#if 0 /* DEBUG */
39236c6e 1888 if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1889 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1890 map, map->pm_obj_pml4, vaddr, i);
1891 }
1892#endif
3e170ce0 1893 vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
1894 vm_object_unlock(map->pm_obj_pml4);
1895
1896 /*
1897 * Set the page directory entry for this page table.
1898 */
1899 pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
1900
1901 pmap_store_pte(pml4p, pa_to_pte(pa)
3e170ce0
A
1902 | PTE_READ(is_ept)
1903 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1904 | PTE_WRITE(is_ept));
5c9f4661
A
1905 pml4_entry_t *upml4p;
1906
1907 upml4p = pmap64_user_pml4(map, vaddr);
1908 pmap_store_pte(upml4p, pa_to_pte(pa)
1909 | PTE_READ(is_ept)
1910 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1911 | PTE_WRITE(is_ept));
b0d623f7
A
1912
1913 PMAP_UNLOCK(map);
1914
316670eb 1915 return KERN_SUCCESS;
b0d623f7
A
1916}
1917
316670eb
A
1918kern_return_t
1919pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
b0d623f7
A
1920{
1921 vm_page_t m;
1922 pmap_paddr_t pa;
1923 uint64_t i;
1924 ppnum_t pn;
1925 pdpt_entry_t *pdptp;
3e170ce0 1926 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
1927
1928 DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
1929
1930 while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
316670eb
A
1931 kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
1932 if (pep4kr != KERN_SUCCESS)
1933 return pep4kr;
b0d623f7
A
1934 }
1935
1936 /*
1937 * Allocate a VM page for the pdpt page
1938 */
316670eb
A
1939 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1940 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1941 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1942 VM_PAGE_WAIT();
316670eb 1943 }
b0d623f7
A
1944
1945 /*
1946 * put the page into the pmap's obj list so it
1947 * can be found later.
1948 */
39037602 1949 pn = VM_PAGE_GET_PHYS_PAGE(m);
b0d623f7
A
1950 pa = i386_ptob(pn);
1951 i = pdptidx(map, vaddr);
1952
1953 /*
1954 * Zero the page.
1955 */
1956 pmap_zero_page(pn);
1957
1958 vm_page_lockspin_queues();
3e170ce0 1959 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
1960 vm_page_unlock_queues();
1961
1962 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1963 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1964 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1965
1966 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1967 vm_object_lock(map->pm_obj_pdpt);
1968
1969 PMAP_LOCK(map);
1970 /*
1971 * See if someone else expanded us first
1972 */
1973 if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
1974 PMAP_UNLOCK(map);
1975 vm_object_unlock(map->pm_obj_pdpt);
1976
1977 VM_PAGE_FREE(m);
1978
1979 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1980 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1981 return KERN_SUCCESS;
b0d623f7
A
1982 }
1983
1984#if 0 /* DEBUG */
39236c6e 1985 if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1986 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1987 map, map->pm_obj_pdpt, vaddr, i);
1988 }
1989#endif
3e170ce0 1990 vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
1991 vm_object_unlock(map->pm_obj_pdpt);
1992
1993 /*
1994 * Set the page directory entry for this page table.
1995 */
1996 pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
1997
1998 pmap_store_pte(pdptp, pa_to_pte(pa)
3e170ce0
A
1999 | PTE_READ(is_ept)
2000 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2001 | PTE_WRITE(is_ept));
b0d623f7
A
2002
2003 PMAP_UNLOCK(map);
2004
316670eb 2005 return KERN_SUCCESS;
b0d623f7
A
2006
2007}
2008
2009
2010
2011/*
2012 * Routine: pmap_expand
2013 *
2014 * Expands a pmap to be able to map the specified virtual address.
2015 *
2016 * Allocates new virtual memory for the P0 or P1 portion of the
2017 * pmap, then re-maps the physical pages that were in the old
2018 * pmap to be in the new pmap.
2019 *
2020 * Must be called with the pmap system and the pmap unlocked,
2021 * since these must be unlocked to use vm_allocate or vm_deallocate.
2022 * Thus it must be called in a loop that checks whether the map
2023 * has been expanded enough.
2024 * (We won't loop forever, since page tables aren't shrunk.)
2025 */
316670eb 2026kern_return_t
b0d623f7
A
2027pmap_expand(
2028 pmap_t map,
316670eb
A
2029 vm_map_offset_t vaddr,
2030 unsigned int options)
b0d623f7
A
2031{
2032 pt_entry_t *pdp;
39037602
A
2033 vm_page_t m;
2034 pmap_paddr_t pa;
b0d623f7
A
2035 uint64_t i;
2036 ppnum_t pn;
3e170ce0 2037 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
2038
2039
2040 /*
2041 * For the kernel, the virtual address must be in or above the basement
2042 * which is for kexts and is in the 512GB immediately below the kernel..
2043 * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
2044 */
5c9f4661
A
2045 if (__improbable(map == kernel_pmap &&
2046 !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))) {
2047 if ((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0) {
2048 panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
2049 }
2050 }
b0d623f7
A
2051
2052
2053 while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
5c9f4661 2054 assert((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0);
316670eb
A
2055 kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
2056 if (pepkr != KERN_SUCCESS)
2057 return pepkr;
b0d623f7
A
2058 }
2059
2060 /*
2061 * Allocate a VM page for the pde entries.
2062 */
316670eb
A
2063 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
2064 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
2065 return KERN_RESOURCE_SHORTAGE;
b0d623f7 2066 VM_PAGE_WAIT();
316670eb 2067 }
b0d623f7
A
2068
2069 /*
2070 * put the page into the pmap's obj list so it
2071 * can be found later.
2072 */
39037602 2073 pn = VM_PAGE_GET_PHYS_PAGE(m);
b0d623f7
A
2074 pa = i386_ptob(pn);
2075 i = pdeidx(map, vaddr);
2076
2077 /*
2078 * Zero the page.
2079 */
2080 pmap_zero_page(pn);
2081
2082 vm_page_lockspin_queues();
3e170ce0 2083 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
2084 vm_page_unlock_queues();
2085
2086 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 2087 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 2088 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
2089
2090 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
2091 vm_object_lock(map->pm_obj);
2092
2093 PMAP_LOCK(map);
2094
2095 /*
2096 * See if someone else expanded us first
2097 */
2098 if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
2099 PMAP_UNLOCK(map);
2100 vm_object_unlock(map->pm_obj);
2101
2102 VM_PAGE_FREE(m);
2103
5c9f4661 2104 OSAddAtomic(-1, &inuse_ptepages_count);//todo replace all with inlines
316670eb
A
2105 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
2106 return KERN_SUCCESS;
b0d623f7
A
2107 }
2108
2109#if 0 /* DEBUG */
39236c6e 2110 if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
2111 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
2112 map, map->pm_obj, vaddr, i);
2113 }
2114#endif
3e170ce0 2115 vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
2116 vm_object_unlock(map->pm_obj);
2117
2118 /*
2119 * Set the page directory entry for this page table.
2120 */
2121 pdp = pmap_pde(map, vaddr);
2122 pmap_store_pte(pdp, pa_to_pte(pa)
3e170ce0
A
2123 | PTE_READ(is_ept)
2124 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2125 | PTE_WRITE(is_ept));
b0d623f7
A
2126
2127 PMAP_UNLOCK(map);
2128
316670eb 2129 return KERN_SUCCESS;
b0d623f7
A
2130}
2131
2132/* On K64 machines with more than 32GB of memory, pmap_steal_memory
2133 * will allocate past the 1GB of pre-expanded virtual kernel area. This
2134 * function allocates all the page tables using memory from the same pool
2135 * that pmap_steal_memory uses, rather than calling vm_page_grab (which
2136 * isn't available yet). */
2137void
6d2010ae
A
2138pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
2139{
b0d623f7
A
2140 ppnum_t pn;
2141 pt_entry_t *pte;
3e170ce0 2142 boolean_t is_ept = is_ept_pmap(pmap);
b0d623f7
A
2143
2144 PMAP_LOCK(pmap);
2145
2146 if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
0b4c1975 2147 if (!pmap_next_page_hi(&pn))
b0d623f7
A
2148 panic("pmap_pre_expand");
2149
2150 pmap_zero_page(pn);
2151
2152 pte = pmap64_pml4(pmap, vaddr);
2153
2154 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
2155 | PTE_READ(is_ept)
2156 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2157 | PTE_WRITE(is_ept));
5c9f4661
A
2158
2159 pte = pmap64_user_pml4(pmap, vaddr);
2160
2161 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
2162 | PTE_READ(is_ept)
2163 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2164 | PTE_WRITE(is_ept));
2165
b0d623f7
A
2166 }
2167
2168 if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
0b4c1975 2169 if (!pmap_next_page_hi(&pn))
b0d623f7
A
2170 panic("pmap_pre_expand");
2171
2172 pmap_zero_page(pn);
2173
2174 pte = pmap64_pdpt(pmap, vaddr);
2175
2176 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
2177 | PTE_READ(is_ept)
2178 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2179 | PTE_WRITE(is_ept));
b0d623f7
A
2180 }
2181
2182 if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
0b4c1975 2183 if (!pmap_next_page_hi(&pn))
b0d623f7
A
2184 panic("pmap_pre_expand");
2185
2186 pmap_zero_page(pn);
2187
2188 pte = pmap64_pde(pmap, vaddr);
2189
2190 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
2191 | PTE_READ(is_ept)
2192 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
2193 | PTE_WRITE(is_ept));
b0d623f7
A
2194 }
2195
2196 PMAP_UNLOCK(pmap);
2197}
2198
2199/*
2200 * pmap_sync_page_data_phys(ppnum_t pa)
2201 *
2202 * Invalidates all of the instruction cache on a physical page and
2203 * pushes any dirty data from the data cache for the same physical page
2204 * Not required in i386.
2205 */
2206void
2207pmap_sync_page_data_phys(__unused ppnum_t pa)
2208{
2209 return;
2210}
2211
2212/*
2213 * pmap_sync_page_attributes_phys(ppnum_t pa)
2214 *
2215 * Write back and invalidate all cachelines on a physical page.
2216 */
2217void
2218pmap_sync_page_attributes_phys(ppnum_t pa)
2219{
2220 cache_flush_page_phys(pa);
2221}
2222
2223
2224
2225#ifdef CURRENTLY_UNUSED_AND_UNTESTED
2226
2227int collect_ref;
2228int collect_unref;
2229
2230/*
2231 * Routine: pmap_collect
2232 * Function:
2233 * Garbage collects the physical map system for
2234 * pages which are no longer used.
2235 * Success need not be guaranteed -- that is, there
2236 * may well be pages which are not referenced, but
2237 * others may be collected.
2238 * Usage:
2239 * Called by the pageout daemon when pages are scarce.
2240 */
2241void
2242pmap_collect(
2243 pmap_t p)
2244{
39037602 2245 pt_entry_t *pdp, *ptp;
b0d623f7
A
2246 pt_entry_t *eptp;
2247 int wired;
3e170ce0 2248 boolean_t is_ept;
b0d623f7
A
2249
2250 if (p == PMAP_NULL)
2251 return;
2252
2253 if (p == kernel_pmap)
2254 return;
2255
3e170ce0
A
2256 is_ept = is_ept_pmap(p);
2257
b0d623f7
A
2258 /*
2259 * Garbage collect map.
2260 */
2261 PMAP_LOCK(p);
2262
2263 for (pdp = (pt_entry_t *)p->dirbase;
2264 pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
2265 pdp++)
2266 {
3e170ce0
A
2267 if (*pdp & PTE_VALID_MASK(is_ept)) {
2268 if (*pdp & PTE_REF(is_ept)) {
2269 pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept));
2270 collect_ref++;
2271 } else {
2272 collect_unref++;
2273 ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
2274 eptp = ptp + NPTEPG;
b0d623f7 2275
3e170ce0
A
2276 /*
2277 * If the pte page has any wired mappings, we cannot
2278 * free it.
2279 */
2280 wired = 0;
2281 {
39037602 2282 pt_entry_t *ptep;
3e170ce0
A
2283 for (ptep = ptp; ptep < eptp; ptep++) {
2284 if (iswired(*ptep)) {
2285 wired = 1;
2286 break;
2287 }
2288 }
2289 }
2290 if (!wired) {
2291 /*
2292 * Remove the virtual addresses mapped by this pte page.
2293 */
2294 pmap_remove_range(p,
2295 pdetova(pdp - (pt_entry_t *)p->dirbase),
2296 ptp,
2297 eptp);
2298
2299 /*
2300 * Invalidate the page directory pointer.
2301 */
2302 pmap_store_pte(pdp, 0x0);
2303
2304 PMAP_UNLOCK(p);
2305
2306 /*
2307 * And free the pte page itself.
2308 */
2309 {
39037602 2310 vm_page_t m;
3e170ce0
A
2311
2312 vm_object_lock(p->pm_obj);
2313
2314 m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
2315 if (m == VM_PAGE_NULL)
2316 panic("pmap_collect: pte page not in object");
2317
2318 vm_object_unlock(p->pm_obj);
2319
2320 VM_PAGE_FREE(m);
2321
2322 OSAddAtomic(-1, &inuse_ptepages_count);
2323 PMAP_ZINFO_PFREE(p, PAGE_SIZE);
2324 }
2325
2326 PMAP_LOCK(p);
2327 }
b0d623f7 2328 }
b0d623f7 2329 }
b0d623f7
A
2330 }
2331
2332 PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
2333 PMAP_UNLOCK(p);
2334 return;
b0d623f7
A
2335}
2336#endif
2337
2338
2339void
2340pmap_copy_page(ppnum_t src, ppnum_t dst)
2341{
2342 bcopy_phys((addr64_t)i386_ptob(src),
2343 (addr64_t)i386_ptob(dst),
2344 PAGE_SIZE);
2345}
2346
2347
2348/*
2349 * Routine: pmap_pageable
2350 * Function:
2351 * Make the specified pages (by pmap, offset)
2352 * pageable (or not) as requested.
2353 *
2354 * A page which is not pageable may not take
2355 * a fault; therefore, its page table entry
2356 * must remain valid for the duration.
2357 *
2358 * This routine is merely advisory; pmap_enter
2359 * will specify that these pages are to be wired
2360 * down (or not) as appropriate.
2361 */
2362void
2363pmap_pageable(
2364 __unused pmap_t pmap,
2365 __unused vm_map_offset_t start_addr,
2366 __unused vm_map_offset_t end_addr,
2367 __unused boolean_t pageable)
2368{
2369#ifdef lint
2370 pmap++; start_addr++; end_addr++; pageable++;
2371#endif /* lint */
2372}
2373
b0d623f7
A
2374void
2375invalidate_icache(__unused vm_offset_t addr,
2376 __unused unsigned cnt,
2377 __unused int phys)
2378{
2379 return;
2380}
2381
2382void
2383flush_dcache(__unused vm_offset_t addr,
2384 __unused unsigned count,
2385 __unused int phys)
2386{
2387 return;
2388}
2389
2390#if CONFIG_DTRACE
2391/*
2392 * Constrain DTrace copyin/copyout actions
2393 */
2394extern kern_return_t dtrace_copyio_preflight(addr64_t);
2395extern kern_return_t dtrace_copyio_postflight(addr64_t);
2396
2397kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
2398{
2399 thread_t thread = current_thread();
6d2010ae 2400 uint64_t ccr3;
b0d623f7
A
2401 if (current_map() == kernel_map)
2402 return KERN_FAILURE;
6d2010ae
A
2403 else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE))
2404 return KERN_FAILURE;
2405 else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3))
b0d623f7 2406 return KERN_FAILURE;
b0d623f7
A
2407 else
2408 return KERN_SUCCESS;
2409}
2410
2411kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
2412{
2413 return KERN_SUCCESS;
2414}
2415#endif /* CONFIG_DTRACE */
2416
2417#include <mach_vm_debug.h>
2418#if MACH_VM_DEBUG
2419#include <vm/vm_debug.h>
2420
2421int
2422pmap_list_resident_pages(
2423 __unused pmap_t pmap,
2424 __unused vm_offset_t *listp,
2425 __unused int space)
2426{
2427 return 0;
2428}
2429#endif /* MACH_VM_DEBUG */
2430
2431
39037602 2432#if CONFIG_COREDUMP
b0d623f7
A
2433/* temporary workaround */
2434boolean_t
2435coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
2436{
2437#if 0
2438 pt_entry_t *ptep;
2439
2440 ptep = pmap_pte(map->pmap, va);
2441 if (0 == ptep)
2442 return FALSE;
2443 return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
2444#else
2445 return TRUE;
2446#endif
2447}
39037602 2448#endif
b0d623f7
A
2449
2450boolean_t
2451phys_page_exists(ppnum_t pn)
2452{
2453 assert(pn != vm_page_fictitious_addr);
2454
2455 if (!pmap_initialized)
2456 return TRUE;
2457
2458 if (pn == vm_page_guard_addr)
2459 return FALSE;
2460
2461 if (!IS_MANAGED_PAGE(ppn_to_pai(pn)))
2462 return FALSE;
2463
2464 return TRUE;
2465}
2466
6d2010ae
A
2467
2468
b0d623f7
A
2469void
2470pmap_switch(pmap_t tpmap)
2471{
2472 spl_t s;
2473
d9a64523 2474 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(tpmap));
b0d623f7 2475 s = splhigh(); /* Make sure interruptions are disabled */
fe8ab488 2476 set_dirbase(tpmap, current_thread(), cpu_number());
b0d623f7 2477 splx(s);
d9a64523 2478 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END);
b0d623f7
A
2479}
2480
2481
2482/*
2483 * disable no-execute capability on
2484 * the specified pmap
2485 */
2486void
2487pmap_disable_NX(pmap_t pmap)
2488{
2489 pmap->nx_enabled = 0;
2490}
2491
6d2010ae
A
2492void
2493pt_fake_zone_init(int zone_index)
2494{
2495 pt_fake_zone_index = zone_index;
2496}
2497
b0d623f7
A
2498void
2499pt_fake_zone_info(
2500 int *count,
2501 vm_size_t *cur_size,
2502 vm_size_t *max_size,
2503 vm_size_t *elem_size,
2504 vm_size_t *alloc_size,
6d2010ae 2505 uint64_t *sum_size,
b0d623f7 2506 int *collectable,
6d2010ae
A
2507 int *exhaustable,
2508 int *caller_acct)
b0d623f7
A
2509{
2510 *count = inuse_ptepages_count;
2511 *cur_size = PAGE_SIZE * inuse_ptepages_count;
2512 *max_size = PAGE_SIZE * (inuse_ptepages_count +
2513 vm_page_inactive_count +
2514 vm_page_active_count +
2515 vm_page_free_count);
2516 *elem_size = PAGE_SIZE;
2517 *alloc_size = PAGE_SIZE;
6d2010ae 2518 *sum_size = alloc_ptepages_count * PAGE_SIZE;
b0d623f7
A
2519
2520 *collectable = 1;
2521 *exhaustable = 0;
6d2010ae 2522 *caller_acct = 1;
b0d623f7
A
2523}
2524
39236c6e
A
2525
2526void
2527pmap_flush_context_init(pmap_flush_context *pfc)
2528{
2529 pfc->pfc_cpus = 0;
2530 pfc->pfc_invalid_global = 0;
2531}
2532
39037602 2533extern uint64_t TLBTimeOut;
39236c6e
A
2534void
2535pmap_flush(
2536 pmap_flush_context *pfc)
2537{
2538 unsigned int my_cpu;
2539 unsigned int cpu;
d9a64523 2540 cpumask_t cpu_bit;
fe8ab488
A
2541 cpumask_t cpus_to_respond = 0;
2542 cpumask_t cpus_to_signal = 0;
2543 cpumask_t cpus_signaled = 0;
39236c6e
A
2544 boolean_t flush_self = FALSE;
2545 uint64_t deadline;
2546
2547 mp_disable_preemption();
2548
2549 my_cpu = cpu_number();
2550 cpus_to_signal = pfc->pfc_cpus;
2551
2552 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
5ba3f43e 2553 NULL, cpus_to_signal);
39236c6e
A
2554
2555 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
2556
2557 if (cpus_to_signal & cpu_bit) {
2558
2559 cpus_to_signal &= ~cpu_bit;
2560
a39ff7e2 2561 if (!cpu_is_running(cpu))
39236c6e
A
2562 continue;
2563
2564 if (pfc->pfc_invalid_global & cpu_bit)
2565 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2566 else
2567 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
2568 mfence();
2569
2570 if (cpu == my_cpu) {
2571 flush_self = TRUE;
2572 continue;
2573 }
2574 if (CPU_CR3_IS_ACTIVE(cpu)) {
2575 cpus_to_respond |= cpu_bit;
2576 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2577 }
2578 }
2579 }
2580 cpus_signaled = cpus_to_respond;
2581
2582 /*
2583 * Flush local tlb if required.
2584 * Do this now to overlap with other processors responding.
2585 */
2586 if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE)
2587 process_pmap_updates();
2588
2589 if (cpus_to_respond) {
2590
fe8ab488
A
2591 deadline = mach_absolute_time() +
2592 (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2593 boolean_t is_timeout_traced = FALSE;
5ba3f43e 2594
39236c6e
A
2595 /*
2596 * Wait for those other cpus to acknowledge
2597 */
2598 while (cpus_to_respond != 0) {
2599 long orig_acks = 0;
2600
2601 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2602 /* Consider checking local/global invalidity
2603 * as appropriate in the PCID case.
2604 */
2605 if ((cpus_to_respond & cpu_bit) != 0) {
a39ff7e2 2606 if (!cpu_is_running(cpu) ||
39236c6e
A
2607 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2608 !CPU_CR3_IS_ACTIVE(cpu)) {
2609 cpus_to_respond &= ~cpu_bit;
2610 }
2611 cpu_pause();
2612 }
2613 if (cpus_to_respond == 0)
2614 break;
2615 }
2616 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
2617 if (machine_timeout_suspended())
2618 continue;
fe8ab488
A
2619 if (TLBTimeOut == 0) {
2620 if (is_timeout_traced)
2621 continue;
5ba3f43e 2622
fe8ab488 2623 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
5ba3f43e
A
2624 NULL, cpus_to_signal, cpus_to_respond);
2625
fe8ab488
A
2626 is_timeout_traced = TRUE;
2627 continue;
2628 }
39236c6e 2629 orig_acks = NMIPI_acks;
5ba3f43e 2630 NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
5c9f4661 2631 panic("Uninterruptible processor(s): CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu",
5ba3f43e 2632 cpus_to_respond, orig_acks, NMIPI_acks, deadline);
39236c6e
A
2633 }
2634 }
2635 }
5ba3f43e 2636
39236c6e 2637 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
5ba3f43e 2638 NULL, cpus_signaled, flush_self);
39236c6e
A
2639
2640 mp_enable_preemption();
2641}
2642
2643
3e170ce0
A
2644static void
2645invept(void *eptp)
2646{
2647 struct {
2648 uint64_t eptp;
2649 uint64_t reserved;
2650 } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0};
2651
2652 __asm__ volatile("invept (%%rax), %%rcx"
2653 : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor)
2654 : "cc", "memory");
2655}
2656
b0d623f7
A
2657/*
2658 * Called with pmap locked, we:
2659 * - scan through per-cpu data to see which other cpus need to flush
2660 * - send an IPI to each non-idle cpu to be flushed
2661 * - wait for all to signal back that they are inactive or we see that
2662 * they are at a safe point (idle).
2663 * - flush the local tlb if active for this pmap
2664 * - return ... the caller will unlock the pmap
2665 */
6d2010ae 2666
b0d623f7 2667void
39236c6e 2668pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
b0d623f7
A
2669{
2670 unsigned int cpu;
d9a64523 2671 cpumask_t cpu_bit;
39037602 2672 cpumask_t cpus_to_signal = 0;
b0d623f7
A
2673 unsigned int my_cpu = cpu_number();
2674 pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
2675 boolean_t flush_self = FALSE;
2676 uint64_t deadline;
6d2010ae 2677 boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
39236c6e 2678 boolean_t need_global_flush = FALSE;
fe8ab488 2679 uint32_t event_code;
4bd07ac2 2680 vm_map_offset_t event_startv, event_endv;
3e170ce0 2681 boolean_t is_ept = is_ept_pmap(pmap);
b0d623f7
A
2682
2683 assert((processor_avail_count < 2) ||
2684 (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2685
3e170ce0
A
2686 if (pmap == kernel_pmap) {
2687 event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS);
4bd07ac2
A
2688 event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv);
2689 event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv);
3e170ce0
A
2690 } else if (is_ept) {
2691 event_code = PMAP_CODE(PMAP__FLUSH_EPT);
4bd07ac2
A
2692 event_startv = startv;
2693 event_endv = endv;
3e170ce0
A
2694 } else {
2695 event_code = PMAP_CODE(PMAP__FLUSH_TLBS);
4bd07ac2
A
2696 event_startv = startv;
2697 event_endv = endv;
3e170ce0
A
2698 }
2699
fe8ab488 2700 PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START,
5ba3f43e
A
2701 VM_KERNEL_UNSLIDE_OR_PERM(pmap), options,
2702 event_startv, event_endv);
fe8ab488 2703
3e170ce0
A
2704 if (is_ept) {
2705 mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp);
2706 goto out;
2707 }
2708
b0d623f7
A
2709 /*
2710 * Scan other cpus for matching active or task CR3.
2711 * For idle cpus (with no active map) we mark them invalid but
2712 * don't signal -- they'll check as they go busy.
2713 */
6d2010ae 2714 if (pmap_pcid_ncpus) {
39236c6e
A
2715 if (pmap_is_shared)
2716 need_global_flush = TRUE;
6d2010ae 2717 pmap_pcid_invalidate_all_cpus(pmap);
39236c6e 2718 mfence();
6d2010ae 2719 }
b0d623f7 2720 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
a39ff7e2 2721 if (!cpu_is_running(cpu))
b0d623f7
A
2722 continue;
2723 uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
2724 uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
5c9f4661 2725//recall that the shadowed task cr3 is pre-composed
b0d623f7
A
2726 if ((pmap_cr3 == cpu_task_cr3) ||
2727 (pmap_cr3 == cpu_active_cr3) ||
6d2010ae 2728 (pmap_is_shared)) {
39236c6e
A
2729
2730 if (options & PMAP_DELAY_TLB_FLUSH) {
2731 if (need_global_flush == TRUE)
2732 pfc->pfc_invalid_global |= cpu_bit;
2733 pfc->pfc_cpus |= cpu_bit;
2734
2735 continue;
2736 }
b0d623f7
A
2737 if (cpu == my_cpu) {
2738 flush_self = TRUE;
2739 continue;
2740 }
39236c6e 2741 if (need_global_flush == TRUE)
6d2010ae
A
2742 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2743 else
2744 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
39236c6e 2745 mfence();
b0d623f7
A
2746
2747 /*
2748 * We don't need to signal processors which will flush
2749 * lazily at the idle state or kernel boundary.
2750 * For example, if we're invalidating the kernel pmap,
2751 * processors currently in userspace don't need to flush
2752 * their TLBs until the next time they enter the kernel.
2753 * Alterations to the address space of a task active
2754 * on a remote processor result in a signal, to
2755 * account for copy operations. (There may be room
2756 * for optimization in such cases).
2757 * The order of the loads below with respect
2758 * to the store to the "cpu_tlb_invalid" field above
2759 * is important--hence the barrier.
2760 */
2761 if (CPU_CR3_IS_ACTIVE(cpu) &&
2762 (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
39236c6e
A
2763 pmap->pm_shared ||
2764 (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
b0d623f7
A
2765 cpus_to_signal |= cpu_bit;
2766 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2767 }
2768 }
2769 }
39236c6e 2770 if ((options & PMAP_DELAY_TLB_FLUSH))
fe8ab488 2771 goto out;
b0d623f7 2772
b0d623f7
A
2773 /*
2774 * Flush local tlb if required.
2775 * Do this now to overlap with other processors responding.
2776 */
6d2010ae
A
2777 if (flush_self) {
2778 if (pmap_pcid_ncpus) {
2779 pmap_pcid_validate_cpu(pmap, my_cpu);
2780 if (pmap_is_shared)
2781 tlb_flush_global();
2782 else
2783 flush_tlb_raw();
2784 }
2785 else
2786 flush_tlb_raw();
2787 }
b0d623f7
A
2788
2789 if (cpus_to_signal) {
fe8ab488
A
2790 cpumask_t cpus_to_respond = cpus_to_signal;
2791
2792 deadline = mach_absolute_time() +
2793 (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2794 boolean_t is_timeout_traced = FALSE;
b0d623f7 2795
b0d623f7
A
2796 /*
2797 * Wait for those other cpus to acknowledge
2798 */
2799 while (cpus_to_respond != 0) {
060df5ea 2800 long orig_acks = 0;
b0d623f7
A
2801
2802 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
6d2010ae
A
2803 /* Consider checking local/global invalidity
2804 * as appropriate in the PCID case.
2805 */
b0d623f7 2806 if ((cpus_to_respond & cpu_bit) != 0) {
a39ff7e2 2807 if (!cpu_is_running(cpu) ||
b0d623f7
A
2808 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2809 !CPU_CR3_IS_ACTIVE(cpu)) {
2810 cpus_to_respond &= ~cpu_bit;
2811 }
2812 cpu_pause();
2813 }
2814 if (cpus_to_respond == 0)
2815 break;
2816 }
6d2010ae 2817 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
060df5ea
A
2818 if (machine_timeout_suspended())
2819 continue;
fe8ab488
A
2820 if (TLBTimeOut == 0) {
2821 /* cut tracepoint but don't panic */
2822 if (is_timeout_traced)
2823 continue;
5ba3f43e
A
2824
2825 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
2826 VM_KERNEL_UNSLIDE_OR_PERM(pmap),
2827 cpus_to_signal,
2828 cpus_to_respond);
2829
fe8ab488
A
2830 is_timeout_traced = TRUE;
2831 continue;
2832 }
060df5ea 2833 orig_acks = NMIPI_acks;
060df5ea 2834
5ba3f43e
A
2835 NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT);
2836 panic("TLB invalidation IPI timeout, unresponsive CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu",
2837 cpus_to_respond, orig_acks, NMIPI_acks, deadline);
060df5ea 2838 }
b0d623f7
A
2839 }
2840 }
2841
316670eb 2842 if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
39236c6e
A
2843 panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
2844 }
2845
fe8ab488
A
2846out:
2847 PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END,
5ba3f43e
A
2848 VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal,
2849 event_startv, event_endv);
316670eb 2850
b0d623f7
A
2851}
2852
2853void
2854process_pmap_updates(void)
2855{
6d2010ae
A
2856 int ccpu = cpu_number();
2857 pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2858 if (pmap_pcid_ncpus) {
2859 pmap_pcid_validate_current();
5c9f4661
A
2860 cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
2861 tlb_flush_global();
2862 } else {
6d2010ae
A
2863 current_cpu_datap()->cpu_tlb_invalid = FALSE;
2864 flush_tlb_raw();
2865 }
b0d623f7 2866
39236c6e 2867 mfence();
b0d623f7
A
2868}
2869
2870void
2871pmap_update_interrupt(void)
2872{
5ba3f43e 2873 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START);
b0d623f7 2874
39236c6e
A
2875 if (current_cpu_datap()->cpu_tlb_invalid)
2876 process_pmap_updates();
b0d623f7 2877
5ba3f43e 2878 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END);
b0d623f7 2879}
316670eb
A
2880
2881#include <mach/mach_vm.h> /* mach_vm_region_recurse() */
2882/* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
2883 * and identify ranges with mismatched VM permissions and PTE permissions
2884 */
2885kern_return_t
2886pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) {
2887 vm_offset_t cv = sv;
2888 kern_return_t rv = KERN_SUCCESS;
2889 uint64_t skip4 = 0, skip2 = 0;
2890
3e170ce0
A
2891 assert(!is_ept_pmap(ipmap));
2892
316670eb
A
2893 sv &= ~PAGE_MASK_64;
2894 ev &= ~PAGE_MASK_64;
2895 while (cv < ev) {
2896 if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
2897 (cv < 0xFFFF800000000000ULL))) {
2898 cv = 0xFFFF800000000000ULL;
2899 }
2900 /* Potential inconsistencies from not holding pmap lock
2901 * but harmless for the moment.
2902 */
2903 if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
2904 if ((cv + NBPML4) > cv)
2905 cv += NBPML4;
2906 else
2907 break;
2908 skip4++;
2909 continue;
2910 }
2911 if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
2912 if ((cv + NBPD) > cv)
2913 cv += NBPD;
2914 else
2915 break;
2916 skip2++;
2917 continue;
2918 }
2919
2920 pt_entry_t *ptep = pmap_pte(ipmap, cv);
2921 if (ptep && (*ptep & INTEL_PTE_VALID)) {
2922 if (*ptep & INTEL_PTE_WRITE) {
2923 if (!(*ptep & INTEL_PTE_NX)) {
2924 kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
2925 rv = KERN_FAILURE;
2926 }
2927 }
2928 }
2929 cv += PAGE_SIZE;
2930 }
2931 kprintf("Completed pmap scan\n");
2932 cv = sv;
2933
2934 struct vm_region_submap_info_64 vbr;
2935 mach_msg_type_number_t vbrcount = 0;
2936 mach_vm_size_t vmsize;
2937 vm_prot_t prot;
2938 uint32_t nesting_depth = 0;
2939 kern_return_t kret;
2940
2941 while (cv < ev) {
2942
2943 for (;;) {
2944 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
2945 if((kret = mach_vm_region_recurse(ivmmap,
2946 (mach_vm_address_t *) &cv, &vmsize, &nesting_depth,
2947 (vm_region_recurse_info_t)&vbr,
2948 &vbrcount)) != KERN_SUCCESS) {
2949 break;
2950 }
2951
2952 if(vbr.is_submap) {
2953 nesting_depth++;
2954 continue;
2955 } else {
2956 break;
2957 }
2958 }
2959
2960 if(kret != KERN_SUCCESS)
2961 break;
2962
2963 prot = vbr.protection;
2964
2965 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
2966 kprintf("W+X map entry at address 0x%lx\n", cv);
2967 rv = KERN_FAILURE;
2968 }
2969
2970 if (prot) {
2971 vm_offset_t pcv;
2972 for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
2973 pt_entry_t *ptep = pmap_pte(ipmap, pcv);
2974 vm_prot_t tprot;
2975
2976 if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID))
2977 continue;
2978 tprot = VM_PROT_READ;
2979 if (*ptep & INTEL_PTE_WRITE)
2980 tprot |= VM_PROT_WRITE;
2981 if ((*ptep & INTEL_PTE_NX) == 0)
2982 tprot |= VM_PROT_EXECUTE;
2983 if (tprot != prot) {
2984 kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
2985 rv = KERN_FAILURE;
2986 }
2987 }
2988 }
2989 cv += vmsize;
2990 }
2991 return rv;
2992}
39037602
A
2993
2994#if MACH_ASSERT
2995extern int pmap_ledgers_panic;
d9a64523
A
2996extern int pmap_ledgers_panic_leeway;
2997
39037602
A
2998static void
2999pmap_check_ledgers(
3000 pmap_t pmap)
3001{
3002 ledger_amount_t bal;
3003 int pid;
3004 char *procname;
3005 boolean_t do_panic;
3006
3007 if (pmap->pmap_pid == 0) {
3008 /*
3009 * This pmap was not or is no longer fully associated
3010 * with a task (e.g. the old pmap after a fork()/exec() or
3011 * spawn()). Its "ledger" still points at a task that is
3012 * now using a different (and active) address space, so
3013 * we can't check that all the pmap ledgers are balanced here.
3014 *
3015 * If the "pid" is set, that means that we went through
3016 * pmap_set_process() in task_terminate_internal(), so
3017 * this task's ledger should not have been re-used and
3018 * all the pmap ledgers should be back to 0.
3019 */
3020 return;
3021 }
3022
3023 do_panic = FALSE;
3024 pid = pmap->pmap_pid;
3025 procname = pmap->pmap_procname;
3026
3027 pmap_ledgers_drift.num_pmaps_checked++;
3028
d9a64523
A
3029#define LEDGER_CHECK_BALANCE(__LEDGER) \
3030MACRO_BEGIN \
3031 int panic_on_negative = TRUE; \
3032 ledger_get_balance(pmap->ledger, \
3033 task_ledgers.__LEDGER, \
3034 &bal); \
3035 ledger_get_panic_on_negative(pmap->ledger, \
3036 task_ledgers.__LEDGER, \
3037 &panic_on_negative); \
3038 if (bal != 0) { \
3039 if (panic_on_negative || \
3040 (pmap_ledgers_panic && \
3041 pmap_ledgers_panic_leeway > 0 && \
3042 (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \
3043 bal < (pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \
3044 do_panic = TRUE; \
3045 } \
3046 printf("LEDGER BALANCE proc %d (%s) " \
3047 "\"%s\" = %lld\n", \
3048 pid, procname, #__LEDGER, bal); \
3049 if (bal > 0) { \
3050 pmap_ledgers_drift.__LEDGER##_over++; \
3051 pmap_ledgers_drift.__LEDGER##_over_total += bal; \
3052 if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \
3053 pmap_ledgers_drift.__LEDGER##_over_max = bal; \
3054 } \
3055 } else if (bal < 0) { \
3056 pmap_ledgers_drift.__LEDGER##_under++; \
3057 pmap_ledgers_drift.__LEDGER##_under_total += bal; \
3058 if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \
3059 pmap_ledgers_drift.__LEDGER##_under_max = bal; \
3060 } \
3061 } \
3062 } \
3063MACRO_END
3064
3065 LEDGER_CHECK_BALANCE(phys_footprint);
3066 LEDGER_CHECK_BALANCE(internal);
3067 LEDGER_CHECK_BALANCE(internal_compressed);
3068 LEDGER_CHECK_BALANCE(iokit_mapped);
3069 LEDGER_CHECK_BALANCE(alternate_accounting);
3070 LEDGER_CHECK_BALANCE(alternate_accounting_compressed);
3071 LEDGER_CHECK_BALANCE(page_table);
3072 LEDGER_CHECK_BALANCE(purgeable_volatile);
3073 LEDGER_CHECK_BALANCE(purgeable_nonvolatile);
3074 LEDGER_CHECK_BALANCE(purgeable_volatile_compressed);
3075 LEDGER_CHECK_BALANCE(purgeable_nonvolatile_compressed);
3076 LEDGER_CHECK_BALANCE(network_volatile);
3077 LEDGER_CHECK_BALANCE(network_nonvolatile);
3078 LEDGER_CHECK_BALANCE(network_volatile_compressed);
3079 LEDGER_CHECK_BALANCE(network_nonvolatile_compressed);
39037602
A
3080
3081 if (do_panic) {
3082 if (pmap_ledgers_panic) {
3083 panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
3084 pmap, pid, procname);
3085 } else {
3086 printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n",
3087 pmap, pid, procname);
3088 }
3089 }
3090
3091 if (pmap->stats.resident_count != 0 ||
5c9f4661
A
3092#if 35156815
3093 /*
3094 * "wired_count" is unfortunately a bit inaccurate, so let's
3095 * tolerate some slight deviation to limit the amount of
3096 * somewhat-spurious assertion failures.
3097 */
3098 pmap->stats.wired_count > 10 ||
3099#else /* 35156815 */
39037602 3100 pmap->stats.wired_count != 0 ||
5c9f4661 3101#endif /* 35156815 */
39037602
A
3102 pmap->stats.device != 0 ||
3103 pmap->stats.internal != 0 ||
3104 pmap->stats.external != 0 ||
3105 pmap->stats.reusable != 0 ||
3106 pmap->stats.compressed != 0) {
d9a64523
A
3107 if (pmap_stats_assert &&
3108 pmap->pmap_stats_assert) {
39037602
A
3109 panic("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
3110 pmap, pid, procname,
3111 pmap->stats.resident_count,
3112 pmap->stats.wired_count,
3113 pmap->stats.device,
3114 pmap->stats.internal,
3115 pmap->stats.external,
3116 pmap->stats.reusable,
3117 pmap->stats.compressed);
3118 } else {
3119 printf("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld",
3120 pmap, pid, procname,
3121 pmap->stats.resident_count,
3122 pmap->stats.wired_count,
3123 pmap->stats.device,
3124 pmap->stats.internal,
3125 pmap->stats.external,
3126 pmap->stats.reusable,
3127 pmap->stats.compressed);
3128 }
3129 }
3130}
3131
3132void
3133pmap_set_process(
3134 pmap_t pmap,
3135 int pid,
3136 char *procname)
3137{
3138 if (pmap == NULL)
3139 return;
3140
3141 pmap->pmap_pid = pid;
3142 strlcpy(pmap->pmap_procname, procname, sizeof (pmap->pmap_procname));
d9a64523
A
3143 if (pmap_ledgers_panic_leeway) {
3144 /*
3145 * XXX FBDP
3146 * Some processes somehow trigger some issues that make
3147 * the pmap stats and ledgers go off track, causing
3148 * some assertion failures and ledger panics.
3149 * Turn off the sanity checks if we allow some ledger leeway
3150 * because of that. We'll still do a final check in
3151 * pmap_check_ledgers() for discrepancies larger than the
3152 * allowed leeway after the address space has been fully
3153 * cleaned up.
3154 */
3155 pmap->pmap_stats_assert = FALSE;
3156 ledger_disable_panic_on_negative(pmap->ledger,
3157 task_ledgers.phys_footprint);
3158 ledger_disable_panic_on_negative(pmap->ledger,
3159 task_ledgers.internal);
3160 ledger_disable_panic_on_negative(pmap->ledger,
3161 task_ledgers.internal_compressed);
3162 ledger_disable_panic_on_negative(pmap->ledger,
3163 task_ledgers.iokit_mapped);
3164 ledger_disable_panic_on_negative(pmap->ledger,
3165 task_ledgers.alternate_accounting);
3166 ledger_disable_panic_on_negative(pmap->ledger,
3167 task_ledgers.alternate_accounting_compressed);
3168 }
39037602
A
3169}
3170#endif /* MACH_ASSERT */
3171
3172
3173#if DEVELOPMENT || DEBUG
3174int pmap_pagezero_mitigation = 1;
3175#endif
3176
3177void pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound) {
3178#if DEVELOPMENT || DEBUG
3179 if (pmap_pagezero_mitigation == 0) {
3180 lpmap->pagezero_accessible = FALSE;
3181 return;
3182 }
3183#endif
3184 lpmap->pagezero_accessible = ((pmap_smap_enabled == FALSE) && (low_bound < 0x1000));
3185 if (lpmap == current_pmap()) {
3186 mp_disable_preemption();
3187 current_cpu_datap()->cpu_pagezero_mapped = lpmap->pagezero_accessible;
3188 mp_enable_preemption();
3189 }
3190}
813fb2f6
A
3191
3192void pmap_verify_noncacheable(uintptr_t vaddr) {
3193 pt_entry_t *ptep = NULL;
3194 ptep = pmap_pte(kernel_pmap, vaddr);
3195 if (ptep == NULL) {
3196 panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr);
3197 }
3198 /* Non-cacheable OK */
3199 if (*ptep & (INTEL_PTE_NCACHE))
3200 return;
3201 /* Write-combined OK */
3202 if (*ptep & (INTEL_PTE_PTA))
3203 return;
3204 panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep);
3205}
d9a64523 3206