]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/pmap.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * File: pmap.c
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * (These guys wrote the Vax version)
63 *
64 * Physical Map management code for Intel i386, i486, and i860.
65 *
66 * Manages physical address maps.
67 *
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
74 *
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
80 * requested.
81 *
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information as
87 * to which processors are currently using which maps,
88 * and to when physical maps must be made correct.
89 */
90
91#include <string.h>
b0d623f7
A
92#include <mach_ldebug.h>
93
94#include <libkern/OSAtomic.h>
95
96#include <mach/machine/vm_types.h>
97
98#include <mach/boolean.h>
99#include <kern/thread.h>
100#include <kern/zalloc.h>
101#include <kern/queue.h>
316670eb 102#include <kern/ledger.h>
6d2010ae 103#include <kern/mach_param.h>
b0d623f7 104
b0d623f7
A
105#include <kern/kalloc.h>
106#include <kern/spl.h>
107
108#include <vm/pmap.h>
109#include <vm/vm_map.h>
110#include <vm/vm_kern.h>
111#include <mach/vm_param.h>
112#include <mach/vm_prot.h>
113#include <vm/vm_object.h>
114#include <vm/vm_page.h>
115
116#include <mach/machine/vm_param.h>
117#include <machine/thread.h>
118
119#include <kern/misc_protos.h> /* prototyping */
120#include <i386/misc_protos.h>
6d2010ae 121#include <i386/i386_lowmem.h>
b0d623f7
A
122#include <x86_64/lowglobals.h>
123
124#include <i386/cpuid.h>
125#include <i386/cpu_data.h>
126#include <i386/cpu_number.h>
127#include <i386/machine_cpu.h>
128#include <i386/seg.h>
129#include <i386/serial_io.h>
130#include <i386/cpu_capabilities.h>
131#include <i386/machine_routines.h>
132#include <i386/proc_reg.h>
133#include <i386/tsc.h>
134#include <i386/pmap_internal.h>
6d2010ae 135#include <i386/pmap_pcid.h>
3e170ce0
A
136#if CONFIG_VMX
137#include <i386/vmx/vmx_cpu.h>
138#endif
b0d623f7 139
b0d623f7
A
140#include <vm/vm_protos.h>
141
142#include <i386/mp.h>
143#include <i386/mp_desc.h>
316670eb
A
144#include <libkern/kernel_mach_header.h>
145
146#include <pexpert/i386/efi.h>
b0d623f7
A
147
148
b0d623f7
A
149#ifdef IWANTTODEBUG
150#undef DEBUG
151#define DEBUG 1
152#define POSTCODE_DELAY 1
153#include <i386/postcode.h>
154#endif /* IWANTTODEBUG */
155
6d2010ae
A
156#ifdef PMAP_DEBUG
157#define DBG(x...) kprintf("DBG: " x)
b0d623f7
A
158#else
159#define DBG(x...)
160#endif
6d2010ae
A
161/* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
162 * in the trampolines for kernel/user boundary TLB coherency.
b0d623f7 163 */
6d2010ae
A
164char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
165boolean_t pmap_trace = FALSE;
b0d623f7 166
6d2010ae 167boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */
b0d623f7
A
168
169int nx_enabled = 1; /* enable no-execute protection */
170int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
171int allow_stack_exec = 0; /* No apps may execute from the stack by default */
172
173const boolean_t cpu_64bit = TRUE; /* Mais oui! */
174
b0d623f7
A
175uint64_t max_preemption_latency_tsc = 0;
176
b0d623f7
A
177pv_hashed_entry_t *pv_hash_table; /* hash lists */
178
fe8ab488 179uint32_t npvhashmask = 0, npvhashbuckets = 0;
b0d623f7 180
b0d623f7
A
181pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
182pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
183decl_simple_lock_data(,pv_hashed_free_list_lock)
184decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
185decl_simple_lock_data(,pv_hash_table_lock)
186
fe8ab488
A
187decl_simple_lock_data(,phys_backup_lock)
188
b0d623f7
A
189zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */
190
b0d623f7
A
191/*
192 * First and last physical addresses that we maintain any information
193 * for. Initialized to zero so that pmap operations done before
194 * pmap_init won't touch any non-existent structures.
195 */
196boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
197
198static struct vm_object kptobj_object_store;
199static struct vm_object kpml4obj_object_store;
200static struct vm_object kpdptobj_object_store;
201
202/*
6d2010ae 203 * Array of physical page attribites for managed pages.
b0d623f7
A
204 * One byte per physical page.
205 */
206char *pmap_phys_attributes;
316670eb 207ppnum_t last_managed_page = 0;
6d2010ae
A
208
209/*
210 * Amount of virtual memory mapped by one
211 * page-directory entry.
212 */
213
b0d623f7
A
214uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
215
b0d623f7
A
216unsigned pmap_memory_region_count;
217unsigned pmap_memory_region_current;
218
219pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
220
221/*
222 * Other useful macros.
223 */
224#define current_pmap() (vm_map_pmap(current_thread()->map))
225
226struct pmap kernel_pmap_store;
227pmap_t kernel_pmap;
228
b0d623f7
A
229struct zone *pmap_zone; /* zone of pmap structures */
230
6d2010ae
A
231struct zone *pmap_anchor_zone;
232int pmap_debug = 0; /* flag for debugging prints */
233
b0d623f7 234unsigned int inuse_ptepages_count = 0;
6d2010ae
A
235long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
236unsigned int bootstrap_wired_pages = 0;
237int pt_fake_zone_index = -1;
b0d623f7 238
6d2010ae 239extern long NMIPI_acks;
b0d623f7 240
6d2010ae
A
241boolean_t kernel_text_ps_4K = TRUE;
242boolean_t wpkernel = TRUE;
b0d623f7
A
243
244extern char end;
245
246static int nkpt;
247
248pt_entry_t *DMAP1, *DMAP2;
249caddr_t DADDR1;
250caddr_t DADDR2;
b0d623f7 251
3e170ce0
A
252boolean_t pmap_disable_kheap_nx = FALSE;
253boolean_t pmap_disable_kstack_nx = FALSE;
316670eb 254extern boolean_t doconstro_override;
b0d623f7 255
316670eb 256extern long __stack_chk_guard[];
b0d623f7 257
7e41aa88 258static uint64_t pmap_eptp_flags = 0;
3e170ce0
A
259boolean_t pmap_ept_support_ad = FALSE;
260
261
b0d623f7
A
262/*
263 * Map memory at initialization. The physical addresses being
264 * mapped are not managed and are never unmapped.
265 *
266 * For now, VM is already on, we only need to map the
267 * specified memory.
268 */
269vm_offset_t
270pmap_map(
271 vm_offset_t virt,
272 vm_map_offset_t start_addr,
273 vm_map_offset_t end_addr,
274 vm_prot_t prot,
275 unsigned int flags)
276{
277 int ps;
278
279 ps = PAGE_SIZE;
280 while (start_addr < end_addr) {
281 pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
316670eb 282 (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
b0d623f7
A
283 virt += ps;
284 start_addr += ps;
285 }
286 return(virt);
287}
288
b0d623f7
A
289extern char *first_avail;
290extern vm_offset_t virtual_avail, virtual_end;
291extern pmap_paddr_t avail_start, avail_end;
292extern vm_offset_t sHIB;
293extern vm_offset_t eHIB;
294extern vm_offset_t stext;
295extern vm_offset_t etext;
316670eb
A
296extern vm_offset_t sdata, edata;
297extern vm_offset_t sconstdata, econstdata;
b0d623f7 298
6d2010ae
A
299extern void *KPTphys;
300
13f56ec4 301boolean_t pmap_smep_enabled = FALSE;
fe8ab488 302boolean_t pmap_smap_enabled = FALSE;
13f56ec4 303
b0d623f7
A
304void
305pmap_cpu_init(void)
306{
bd504ef0 307 cpu_data_t *cdp = current_cpu_datap();
b0d623f7
A
308 /*
309 * Here early in the life of a processor (from cpu_mode_init()).
6d2010ae 310 * Ensure global page feature is disabled at this point.
b0d623f7 311 */
6d2010ae 312
b0d623f7
A
313 set_cr4(get_cr4() &~ CR4_PGE);
314
315 /*
316 * Initialize the per-cpu, TLB-related fields.
317 */
bd504ef0
A
318 cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
319 cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
320 cdp->cpu_tlb_invalid = FALSE;
321 cdp->cpu_task_map = TASK_MAP_64BIT;
6d2010ae 322 pmap_pcid_configure();
13f56ec4
A
323 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
324 boolean_t nsmep;
325 if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
326 set_cr4(get_cr4() | CR4_SMEP);
327 pmap_smep_enabled = TRUE;
328 }
329 }
04b8595b
A
330 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) {
331 boolean_t nsmap;
332 if (!PE_parse_boot_argn("-pmap_smap_disable", &nsmap, sizeof(nsmap))) {
333 set_cr4(get_cr4() | CR4_SMAP);
334 pmap_smap_enabled = TRUE;
335 }
336 }
bd504ef0
A
337
338 if (cdp->cpu_fixed_pmcs_enabled) {
339 boolean_t enable = TRUE;
340 cpu_pmc_control(&enable);
341 }
b0d623f7
A
342}
343
fe8ab488
A
344static uint32_t pmap_scale_shift(void) {
345 uint32_t scale = 0;
b0d623f7 346
fe8ab488
A
347 if (sane_size <= 8*GB) {
348 scale = (uint32_t)(sane_size / (2 * GB));
349 } else if (sane_size <= 32*GB) {
350 scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB));
351 } else {
352 scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB)));
353 }
354 return scale;
355}
b0d623f7
A
356
357/*
358 * Bootstrap the system enough to run with virtual memory.
359 * Map the kernel's code and data, and allocate the system page table.
360 * Called with mapping OFF. Page_size must already be set.
361 */
362
363void
364pmap_bootstrap(
365 __unused vm_offset_t load_start,
366 __unused boolean_t IA32e)
367{
368#if NCOPY_WINDOWS > 0
369 vm_offset_t va;
370 int i;
371#endif
b0d623f7
A
372 assert(IA32e);
373
374 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address
375 * known to VM */
376 /*
377 * The kernel's pmap is statically allocated so we don't
378 * have to use pmap_create, which is unlikely to work
379 * correctly at this part of the boot sequence.
380 */
381
382 kernel_pmap = &kernel_pmap_store;
383 kernel_pmap->ref_count = 1;
316670eb 384 kernel_pmap->nx_enabled = TRUE;
b0d623f7
A
385 kernel_pmap->pm_task_map = TASK_MAP_64BIT;
386 kernel_pmap->pm_obj = (vm_object_t) NULL;
387 kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD);
388 kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT);
389 kernel_pmap->pm_pml4 = IdlePML4;
390 kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
3e170ce0 391 kernel_pmap->pm_eptp = 0;
6d2010ae 392 pmap_pcid_initialize_kernel(kernel_pmap);
b0d623f7 393
6d2010ae 394
b0d623f7
A
395
396 current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
397
398 nkpt = NKPT;
399 OSAddAtomic(NKPT, &inuse_ptepages_count);
6d2010ae
A
400 OSAddAtomic64(NKPT, &alloc_ptepages_count);
401 bootstrap_wired_pages = NKPT;
b0d623f7
A
402
403 virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
404 virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
405
406#if NCOPY_WINDOWS > 0
407 /*
408 * Reserve some special page table entries/VA space for temporary
409 * mapping of pages.
410 */
411#define SYSMAP(c, p, v, n) \
412 v = (c)va; va += ((n)*INTEL_PGBYTES);
413
414 va = virtual_avail;
415
416 for (i=0; i<PMAP_NWINDOWS; i++) {
417#if 1
418 kprintf("trying to do SYSMAP idx %d %p\n", i,
419 current_cpu_datap());
420 kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap);
421 kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow);
422 kprintf("two stuff %p %p\n",
423 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
424 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR));
425#endif
426 SYSMAP(caddr_t,
427 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
428 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
429 1);
430 current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP =
431 &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store);
432 *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
433 }
434
435 /* DMAP user for debugger */
436 SYSMAP(caddr_t, DMAP1, DADDR1, 1);
437 SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */
438
439 virtual_avail = va;
440#endif
fe8ab488
A
441 if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) {
442 npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1;
b0d623f7 443
fe8ab488
A
444 }
445
446 npvhashbuckets = npvhashmask + 1;
447
448 if (0 != ((npvhashbuckets) & npvhashmask)) {
449 panic("invalid hash %d, must be ((2^N)-1), "
450 "using default %d\n", npvhashmask, NPVHASHMASK);
b0d623f7
A
451 }
452
b0d623f7
A
453 simple_lock_init(&kernel_pmap->lock, 0);
454 simple_lock_init(&pv_hashed_free_list_lock, 0);
455 simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
456 simple_lock_init(&pv_hash_table_lock,0);
fe8ab488 457 simple_lock_init(&phys_backup_lock, 0);
b0d623f7
A
458
459 pmap_cpu_init();
460
6d2010ae
A
461 if (pmap_pcid_ncpus)
462 printf("PMAP: PCID enabled\n");
463
13f56ec4
A
464 if (pmap_smep_enabled)
465 printf("PMAP: Supervisor Mode Execute Protection enabled\n");
04b8595b
A
466 if (pmap_smap_enabled)
467 printf("PMAP: Supervisor Mode Access Protection enabled\n");
7ddcb079 468
316670eb
A
469#if DEBUG
470 printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
fe8ab488 471 printf("early_random(): 0x%qx\n", early_random());
316670eb
A
472#endif
473 boolean_t ptmp;
474 /* Check if the user has requested disabling stack or heap no-execute
475 * enforcement. These are "const" variables; that qualifier is cast away
476 * when altering them. The TEXT/DATA const sections are marked
477 * write protected later in the kernel startup sequence, so altering
478 * them is possible at this point, in pmap_bootstrap().
479 */
480 if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
481 boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
482 *pdknxp = TRUE;
483 }
484
485 if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
486 boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
487 *pdknhp = TRUE;
488 }
489
6d2010ae
A
490 boot_args *args = (boot_args *)PE_state.bootArgs;
491 if (args->efiMode == kBootArgsEfiMode32) {
492 printf("EFI32: kernel virtual space limited to 4GB\n");
493 virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
494 }
b0d623f7
A
495 kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
496 (long)KERNEL_BASE, (long)virtual_end);
497 kprintf("Available physical space from 0x%llx to 0x%llx\n",
498 avail_start, avail_end);
499
500 /*
501 * The -no_shared_cr3 boot-arg is a debugging feature (set by default
502 * in the DEBUG kernel) to force the kernel to switch to its own map
503 * (and cr3) when control is in kernelspace. The kernel's map does not
504 * include (i.e. share) userspace so wild references will cause
505 * a panic. Only copyin and copyout are exempt from this.
506 */
507 (void) PE_parse_boot_argn("-no_shared_cr3",
508 &no_shared_cr3, sizeof (no_shared_cr3));
509 if (no_shared_cr3)
510 kprintf("Kernel not sharing user map\n");
511
512#ifdef PMAP_TRACES
513 if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
514 kprintf("Kernel traces for pmap operations enabled\n");
515 }
516#endif /* PMAP_TRACES */
517}
518
519void
520pmap_virtual_space(
521 vm_offset_t *startp,
522 vm_offset_t *endp)
523{
524 *startp = virtual_avail;
525 *endp = virtual_end;
526}
527
39236c6e
A
528
529
530
531#if HIBERNATION
532
533#include <IOKit/IOHibernatePrivate.h>
534
535int32_t pmap_npages;
536int32_t pmap_teardown_last_valid_compact_indx = -1;
537
538
539void hibernate_rebuild_pmap_structs(void);
540void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
541void pmap_pack_index(uint32_t);
542int32_t pmap_unpack_index(pv_rooted_entry_t);
543
544
545int32_t
546pmap_unpack_index(pv_rooted_entry_t pv_h)
547{
548 int32_t indx = 0;
549
550 indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
551 indx = indx << 16;
552 indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
553
554 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
555 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
556
557 return (indx);
558}
559
560
561void
562pmap_pack_index(uint32_t indx)
563{
564 pv_rooted_entry_t pv_h;
565
566 pv_h = &pv_head_table[indx];
567
568 *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
569 *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
570
571 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
572 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
573}
574
575
576void
577hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
578{
579 int32_t i;
580 int32_t compact_target_indx;
581
582 compact_target_indx = 0;
583
584 for (i = 0; i < pmap_npages; i++) {
585 if (pv_head_table[i].pmap == PMAP_NULL) {
586
587 if (pv_head_table[compact_target_indx].pmap != PMAP_NULL)
588 compact_target_indx = i;
589 } else {
590 pmap_pack_index((uint32_t)i);
591
592 if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
593 /*
594 * we've got a hole to fill, so
595 * move this pv_rooted_entry_t to it's new home
596 */
597 pv_head_table[compact_target_indx] = pv_head_table[i];
598 pv_head_table[i].pmap = PMAP_NULL;
599
600 pmap_teardown_last_valid_compact_indx = compact_target_indx;
601 compact_target_indx++;
602 } else
603 pmap_teardown_last_valid_compact_indx = i;
604 }
605 }
606 *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1];
607 *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1];
608
609 HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
610}
611
612
613void
614hibernate_rebuild_pmap_structs(void)
615{
616 int32_t cindx, eindx, rindx;
617 pv_rooted_entry_t pv_h;
618
619 eindx = (int32_t)pmap_npages;
620
621 for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
622
623 pv_h = &pv_head_table[cindx];
624
625 rindx = pmap_unpack_index(pv_h);
626 assert(rindx < pmap_npages);
627
628 if (rindx != cindx) {
629 /*
630 * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs,
631 * so move it back to its real location
632 */
633 pv_head_table[rindx] = pv_head_table[cindx];
634 }
635 if (rindx+1 != eindx) {
636 /*
637 * the 'hole' between this vm_rooted_entry_t and the previous
638 * vm_rooted_entry_t we moved needs to be initialized as
639 * a range of zero'd vm_rooted_entry_t's
640 */
641 bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry));
642 }
643 eindx = rindx;
644 }
645 if (rindx)
646 bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry));
647
648 HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
649}
650
651#endif
652
b0d623f7
A
653/*
654 * Initialize the pmap module.
655 * Called by vm_init, to initialize any structures that the pmap
656 * system needs to map virtual memory.
657 */
658void
659pmap_init(void)
660{
661 long npages;
662 vm_offset_t addr;
060df5ea 663 vm_size_t s, vsize;
b0d623f7
A
664 vm_map_offset_t vaddr;
665 ppnum_t ppn;
666
667
668 kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
39236c6e 669 _vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store);
b0d623f7
A
670
671 kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
39236c6e 672 _vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store);
b0d623f7
A
673
674 kernel_pmap->pm_obj = &kptobj_object_store;
39236c6e 675 _vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store);
b0d623f7
A
676
677 /*
678 * Allocate memory for the pv_head_table and its lock bits,
679 * the modify bit array, and the pte_page table.
680 */
681
682 /*
683 * zero bias all these arrays now instead of off avail_start
684 * so we cover all memory
685 */
686
687 npages = i386_btop(avail_end);
39236c6e
A
688#if HIBERNATION
689 pmap_npages = (uint32_t)npages;
690#endif
b0d623f7 691 s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
fe8ab488 692 + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets))
b0d623f7 693 + pv_lock_table_size(npages)
fe8ab488 694 + pv_hash_lock_table_size((npvhashbuckets))
b0d623f7 695 + npages);
b0d623f7
A
696 s = round_page(s);
697 if (kernel_memory_allocate(kernel_map, &addr, s, 0,
3e170ce0 698 KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP)
b0d623f7
A
699 != KERN_SUCCESS)
700 panic("pmap_init");
701
702 memset((char *)addr, 0, s);
703
060df5ea
A
704 vaddr = addr;
705 vsize = s;
706
b0d623f7 707#if PV_DEBUG
fe8ab488 708 if (0 == npvhashmask) panic("npvhashmask not initialized");
b0d623f7
A
709#endif
710
711 /*
712 * Allocate the structures first to preserve word-alignment.
713 */
714 pv_head_table = (pv_rooted_entry_t) addr;
715 addr = (vm_offset_t) (pv_head_table + npages);
716
717 pv_hash_table = (pv_hashed_entry_t *)addr;
fe8ab488 718 addr = (vm_offset_t) (pv_hash_table + (npvhashbuckets));
b0d623f7
A
719
720 pv_lock_table = (char *) addr;
721 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
722
723 pv_hash_lock_table = (char *) addr;
fe8ab488 724 addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhashbuckets)));
b0d623f7
A
725
726 pmap_phys_attributes = (char *) addr;
727
728 ppnum_t last_pn = i386_btop(avail_end);
729 unsigned int i;
730 pmap_memory_region_t *pmptr = pmap_memory_regions;
731 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
732 if (pmptr->type != kEfiConventionalMemory)
733 continue;
316670eb 734 ppnum_t pn;
b0d623f7
A
735 for (pn = pmptr->base; pn <= pmptr->end; pn++) {
736 if (pn < last_pn) {
737 pmap_phys_attributes[pn] |= PHYS_MANAGED;
060df5ea 738
b0d623f7
A
739 if (pn > last_managed_page)
740 last_managed_page = pn;
060df5ea 741
7ddcb079 742 if (pn >= lowest_hi && pn <= highest_hi)
060df5ea 743 pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
b0d623f7
A
744 }
745 }
746 }
060df5ea
A
747 while (vsize) {
748 ppn = pmap_find_phys(kernel_pmap, vaddr);
b0d623f7 749
060df5ea
A
750 pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
751
752 vaddr += PAGE_SIZE;
753 vsize -= PAGE_SIZE;
754 }
b0d623f7
A
755 /*
756 * Create the zone of physical maps,
757 * and of the physical-to-virtual entries.
758 */
759 s = (vm_size_t) sizeof(struct pmap);
760 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
060df5ea
A
761 zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
762
6d2010ae
A
763 pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors");
764 zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE);
765
6d2010ae 766 /* The anchor is required to be page aligned. Zone debugging adds
316670eb
A
767 * padding which may violate that requirement. Tell the zone
768 * subsystem that alignment is required.
6d2010ae 769 */
316670eb
A
770
771 zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
6d2010ae 772
b0d623f7 773 s = (vm_size_t) sizeof(struct pv_hashed_entry);
6d2010ae
A
774 pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
775 4096 * 3 /* LCM x86_64*/, "pv_list");
060df5ea 776 zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
b0d623f7
A
777
778 /* create pv entries for kernel pages mapped by low level
779 startup code. these have to exist so we can pmap_remove()
780 e.g. kext pages from the middle of our addr space */
781
782 vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS;
6d2010ae 783 for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) {
b0d623f7
A
784 pv_rooted_entry_t pv_e;
785
786 pv_e = pai_to_pvh(ppn);
787 pv_e->va = vaddr;
788 vaddr += PAGE_SIZE;
789 pv_e->pmap = kernel_pmap;
790 queue_init(&pv_e->qlink);
791 }
792 pmap_initialized = TRUE;
793
b0d623f7
A
794 max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
795
796 /*
797 * Ensure the kernel's PML4 entry exists for the basement
798 * before this is shared with any user.
799 */
316670eb 800 pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
3e170ce0
A
801
802#if CONFIG_VMX
803 pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
7e41aa88 804 pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0);
3e170ce0 805#endif /* CONFIG_VMX */
316670eb
A
806}
807
808static
809void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) {
810 uint64_t ev = sv + nxrosz, cv = sv;
811 pd_entry_t *pdep;
812 pt_entry_t *ptep = NULL;
813
3e170ce0
A
814 assert(!is_ept_pmap(npmap));
815
316670eb
A
816 assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
817
818 for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
819 uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
820
821 if (*pdep & INTEL_PTE_PS) {
822 if (NX)
823 *pdep |= INTEL_PTE_NX;
824 if (ro)
825 *pdep &= ~INTEL_PTE_WRITE;
826 cv += NBPD;
827 cv &= ~((uint64_t) PDEMASK);
828 pdep = pmap_pde(npmap, cv);
829 continue;
830 }
831
832 for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
833 if (NX)
834 *ptep |= INTEL_PTE_NX;
835 if (ro)
836 *ptep &= ~INTEL_PTE_WRITE;
837 cv += NBPT;
838 ptep = pmap_pte(npmap, cv);
839 }
840 }
841 DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
b0d623f7
A
842}
843
6d2010ae
A
844/*
845 * Called once VM is fully initialized so that we can release unused
846 * sections of low memory to the general pool.
847 * Also complete the set-up of identity-mapped sections of the kernel:
848 * 1) write-protect kernel text
849 * 2) map kernel text using large pages if possible
850 * 3) read and write-protect page zero (for K32)
851 * 4) map the global page at the appropriate virtual address.
852 *
853 * Use of large pages
854 * ------------------
855 * To effectively map and write-protect all kernel text pages, the text
856 * must be 2M-aligned at the base, and the data section above must also be
857 * 2M-aligned. That is, there's padding below and above. This is achieved
858 * through linker directives. Large pages are used only if this alignment
859 * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
860 * memory layout is:
861 *
862 * : :
863 * | __DATA |
864 * sdata: ================== 2Meg
865 * | |
866 * | zero-padding |
867 * | |
868 * etext: ------------------
869 * | |
870 * : :
871 * | |
872 * | __TEXT |
873 * | |
874 * : :
875 * | |
876 * stext: ================== 2Meg
877 * | |
878 * | zero-padding |
879 * | |
880 * eHIB: ------------------
881 * | __HIB |
882 * : :
883 *
884 * Prior to changing the mapping from 4K to 2M, the zero-padding pages
885 * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
886 * 4K pages covering [stext,etext] are coalesced as 2M large pages.
887 * The now unused level-1 PTE pages are also freed.
888 */
316670eb 889extern ppnum_t vm_kernel_base_page;
6d2010ae
A
890void
891pmap_lowmem_finalize(void)
892{
893 spl_t spl;
894 int i;
895
6d2010ae
A
896 /*
897 * Update wired memory statistics for early boot pages
898 */
316670eb 899 PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
6d2010ae
A
900
901 /*
316670eb 902 * Free pages in pmap regions below the base:
6d2010ae
A
903 * rdar://6332712
904 * We can't free all the pages to VM that EFI reports available.
905 * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
906 * There's also a size miscalculation here: pend is one page less
907 * than it should be but this is not fixed to be backwards
908 * compatible.
316670eb
A
909 * This is important for KASLR because up to 256*2MB = 512MB of space
910 * needs has to be released to VM.
6d2010ae
A
911 */
912 for (i = 0;
316670eb 913 pmap_memory_regions[i].end < vm_kernel_base_page;
6d2010ae 914 i++) {
316670eb
A
915 vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base);
916 vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
6d2010ae 917
316670eb
A
918 DBG("pmap region %d [%p..[%p\n",
919 i, (void *) pbase, (void *) pend);
920
921 if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED)
922 continue;
923 /*
924 * rdar://6332712
925 * Adjust limits not to free pages in range 0xc0000-0xff000.
926 */
927 if (pbase >= 0xc0000 && pend <= 0x100000)
928 continue;
929 if (pbase < 0xc0000 && pend > 0x100000) {
930 /* page range entirely within region, free lower part */
931 DBG("- ml_static_mfree(%p,%p)\n",
932 (void *) ml_static_ptovirt(pbase),
933 (void *) (0xc0000-pbase));
934 ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase);
935 pbase = 0x100000;
936 }
937 if (pbase < 0xc0000)
938 pend = MIN(pend, 0xc0000);
939 if (pend > 0x100000)
940 pbase = MAX(pbase, 0x100000);
941 DBG("- ml_static_mfree(%p,%p)\n",
6d2010ae 942 (void *) ml_static_ptovirt(pbase),
316670eb 943 (void *) (pend - pbase));
6d2010ae
A
944 ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
945 }
946
316670eb
A
947 /* A final pass to get rid of all initial identity mappings to
948 * low pages.
949 */
950 DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
951
143464d5
A
952 /*
953 * Remove all mappings past the boot-cpu descriptor aliases and low globals.
954 * Non-boot-cpu GDT aliases will be remapped later as needed.
955 */
316670eb
A
956 pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
957
6d2010ae
A
958 /*
959 * If text and data are both 2MB-aligned,
960 * we can map text with large-pages,
961 * unless the -kernel_text_ps_4K boot-arg overrides.
962 */
963 if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
964 kprintf("Kernel text is 2MB aligned");
965 kernel_text_ps_4K = FALSE;
966 if (PE_parse_boot_argn("-kernel_text_ps_4K",
967 &kernel_text_ps_4K,
968 sizeof (kernel_text_ps_4K)))
969 kprintf(" but will be mapped with 4K pages\n");
970 else
971 kprintf(" and will be mapped with 2M pages\n");
972 }
973
974 (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
975 if (wpkernel)
976 kprintf("Kernel text %p-%p to be write-protected\n",
977 (void *) stext, (void *) etext);
978
979 spl = splhigh();
980
981 /*
982 * Scan over text if mappings are to be changed:
983 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
984 * - Change to large-pages if possible and not overriden.
985 */
986 if (kernel_text_ps_4K && wpkernel) {
987 vm_offset_t myva;
988 for (myva = stext; myva < etext; myva += PAGE_SIZE) {
989 pt_entry_t *ptep;
990
991 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
992 if (ptep)
316670eb 993 pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE);
6d2010ae
A
994 }
995 }
996
997 if (!kernel_text_ps_4K) {
998 vm_offset_t myva;
999
1000 /*
1001 * Release zero-filled page padding used for 2M-alignment.
1002 */
1003 DBG("ml_static_mfree(%p,%p) for padding below text\n",
1004 (void *) eHIB, (void *) (stext - eHIB));
1005 ml_static_mfree(eHIB, stext - eHIB);
1006 DBG("ml_static_mfree(%p,%p) for padding above text\n",
1007 (void *) etext, (void *) (sdata - etext));
1008 ml_static_mfree(etext, sdata - etext);
1009
1010 /*
1011 * Coalesce text pages into large pages.
1012 */
1013 for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
1014 pt_entry_t *ptep;
1015 vm_offset_t pte_phys;
1016 pt_entry_t *pdep;
1017 pt_entry_t pde;
1018
1019 pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
1020 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
1021 DBG("myva: %p pdep: %p ptep: %p\n",
1022 (void *) myva, (void *) pdep, (void *) ptep);
1023 if ((*ptep & INTEL_PTE_VALID) == 0)
1024 continue;
1025 pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
1026 pde = *pdep & PTMASK; /* page attributes from pde */
1027 pde |= INTEL_PTE_PS; /* make it a 2M entry */
1028 pde |= pte_phys; /* take page frame from pte */
1029
1030 if (wpkernel)
316670eb 1031 pde &= ~INTEL_PTE_WRITE;
6d2010ae
A
1032 DBG("pmap_store_pte(%p,0x%llx)\n",
1033 (void *)pdep, pde);
1034 pmap_store_pte(pdep, pde);
1035
1036 /*
1037 * Free the now-unused level-1 pte.
1038 * Note: ptep is a virtual address to the pte in the
1039 * recursive map. We can't use this address to free
1040 * the page. Instead we need to compute its address
1041 * in the Idle PTEs in "low memory".
1042 */
1043 vm_offset_t vm_ptep = (vm_offset_t) KPTphys
1044 + (pte_phys >> PTPGSHIFT);
1045 DBG("ml_static_mfree(%p,0x%x) for pte\n",
1046 (void *) vm_ptep, PAGE_SIZE);
1047 ml_static_mfree(vm_ptep, PAGE_SIZE);
1048 }
1049
1050 /* Change variable read by sysctl machdep.pmap */
1051 pmap_kernel_text_ps = I386_LPGBYTES;
1052 }
1053
316670eb
A
1054 boolean_t doconstro = TRUE;
1055
1056 (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
1057
1058 if ((sconstdata | econstdata) & PAGE_MASK) {
1059 kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata);
1060 if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE))
1061 doconstro = FALSE;
1062 }
1063
1064 if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) {
1065 kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata);
1066 doconstro = FALSE;
1067 }
1068
1069 if (doconstro)
1070 kprintf("Marking const DATA read-only\n");
1071
1072 vm_offset_t dva;
1073
1074 for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
1075 assert(((sdata | edata) & PAGE_MASK) == 0);
1076 if ( (sdata | edata) & PAGE_MASK) {
1077 kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata);
1078 break;
1079 }
1080
1081 pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
1082
1083 dpte = *dptep;
1084
1085 assert((dpte & INTEL_PTE_VALID));
1086 if ((dpte & INTEL_PTE_VALID) == 0) {
1087 kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata);
1088 continue;
1089 }
1090
1091 dpte |= INTEL_PTE_NX;
1092 if (doconstro && (dva >= sconstdata) && (dva < econstdata)) {
1093 dpte &= ~INTEL_PTE_WRITE;
1094 }
1095 pmap_store_pte(dptep, dpte);
1096 }
1097 kernel_segment_command_t * seg;
1098 kernel_section_t * sec;
1099
1100 for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
1101 if (!strcmp(seg->segname, "__TEXT") ||
1102 !strcmp(seg->segname, "__DATA")) {
1103 continue;
1104 }
1105 //XXX
1106 if (!strcmp(seg->segname, "__KLD")) {
1107 continue;
1108 }
1109 if (!strcmp(seg->segname, "__HIB")) {
1110 for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
1111 if (sec->addr & PAGE_MASK)
1112 panic("__HIB segment's sections misaligned");
1113 if (!strcmp(sec->sectname, "__text")) {
1114 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
1115 } else {
1116 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
1117 }
1118 }
1119 } else {
1120 pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
1121 }
1122 }
1123
1124 /*
1125 * If we're debugging, map the low global vector page at the fixed
1126 * virtual address. Otherwise, remove the mapping for this.
1127 */
1128 if (debug_boot_arg) {
1129 pt_entry_t *pte = NULL;
1130 if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS)))
1131 panic("lowmem pte");
1132 /* make sure it is defined on page boundary */
1133 assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
1134 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
1135 | INTEL_PTE_REF
1136 | INTEL_PTE_MOD
1137 | INTEL_PTE_WIRED
1138 | INTEL_PTE_VALID
1139 | INTEL_PTE_WRITE
1140 | INTEL_PTE_NX);
1141 } else {
1142 pmap_remove(kernel_pmap,
1143 LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
1144 }
1145
6d2010ae
A
1146 splx(spl);
1147 if (pmap_pcid_ncpus)
1148 tlb_flush_global();
1149 else
1150 flush_tlb_raw();
1151}
b0d623f7
A
1152
1153/*
1154 * this function is only used for debugging fron the vm layer
1155 */
1156boolean_t
1157pmap_verify_free(
1158 ppnum_t pn)
1159{
1160 pv_rooted_entry_t pv_h;
1161 int pai;
1162 boolean_t result;
1163
1164 assert(pn != vm_page_fictitious_addr);
1165
1166 if (!pmap_initialized)
1167 return(TRUE);
1168
1169 if (pn == vm_page_guard_addr)
1170 return TRUE;
1171
1172 pai = ppn_to_pai(pn);
1173 if (!IS_MANAGED_PAGE(pai))
1174 return(FALSE);
1175 pv_h = pai_to_pvh(pn);
1176 result = (pv_h->pmap == PMAP_NULL);
1177 return(result);
1178}
1179
1180boolean_t
1181pmap_is_empty(
1182 pmap_t pmap,
1183 vm_map_offset_t va_start,
1184 vm_map_offset_t va_end)
1185{
1186 vm_map_offset_t offset;
1187 ppnum_t phys_page;
1188
1189 if (pmap == PMAP_NULL) {
1190 return TRUE;
1191 }
1192
1193 /*
1194 * Check the resident page count
1195 * - if it's zero, the pmap is completely empty.
1196 * This short-circuit test prevents a virtual address scan which is
1197 * painfully slow for 64-bit spaces.
1198 * This assumes the count is correct
1199 * .. the debug kernel ought to be checking perhaps by page table walk.
1200 */
1201 if (pmap->stats.resident_count == 0)
1202 return TRUE;
1203
1204 for (offset = va_start;
1205 offset < va_end;
1206 offset += PAGE_SIZE_64) {
1207 phys_page = pmap_find_phys(pmap, offset);
1208 if (phys_page) {
1209 kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1210 "page %d at 0x%llx\n",
1211 pmap, va_start, va_end, phys_page, offset);
1212 return FALSE;
1213 }
1214 }
1215
1216 return TRUE;
1217}
1218
3e170ce0
A
1219void
1220hv_ept_pmap_create(void **ept_pmap, void **eptp)
1221{
1222 pmap_t p;
1223
1224 if ((ept_pmap == NULL) || (eptp == NULL)) {
1225 return;
1226 }
1227
1228 p = pmap_create_options(get_task_ledger(current_task()), 0, (PMAP_CREATE_64BIT | PMAP_CREATE_EPT));
1229 if (p == PMAP_NULL) {
1230 *ept_pmap = NULL;
1231 *eptp = NULL;
1232 return;
1233 }
1234
1235 assert(is_ept_pmap(p));
1236
1237 *ept_pmap = (void*)p;
1238 *eptp = (void*)(p->pm_eptp);
1239 return;
1240}
b0d623f7
A
1241
1242/*
1243 * Create and return a physical map.
1244 *
1245 * If the size specified for the map
1246 * is zero, the map is an actual physical
1247 * map, and may be referenced by the
1248 * hardware.
1249 *
1250 * If the size specified is non-zero,
1251 * the map will be used in software only, and
1252 * is bounded by that size.
1253 */
1254pmap_t
3e170ce0
A
1255pmap_create_options(
1256 ledger_t ledger,
1257 vm_map_size_t sz,
1258 int flags)
b0d623f7
A
1259{
1260 pmap_t p;
1261 vm_size_t size;
1262 pml4_entry_t *pml4;
1263 pml4_entry_t *kpml4;
1264
1265 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
3e170ce0 1266 (uint32_t) (sz>>32), (uint32_t) sz, flags, 0, 0);
b0d623f7
A
1267
1268 size = (vm_size_t) sz;
1269
1270 /*
1271 * A software use-only map doesn't even need a map.
1272 */
1273
1274 if (size != 0) {
1275 return(PMAP_NULL);
1276 }
1277
3e170ce0
A
1278 /*
1279 * Return error when unrecognized flags are passed.
1280 */
1281 if ((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0) {
1282 return(PMAP_NULL);
1283 }
1284
b0d623f7
A
1285 p = (pmap_t) zalloc(pmap_zone);
1286 if (PMAP_NULL == p)
1287 panic("pmap_create zalloc");
6d2010ae
A
1288 /* Zero all fields */
1289 bzero(p, sizeof(*p));
b0d623f7
A
1290 /* init counts now since we'll be bumping some */
1291 simple_lock_init(&p->lock, 0);
39236c6e 1292#if 00
b0d623f7
A
1293 p->stats.resident_count = 0;
1294 p->stats.resident_max = 0;
1295 p->stats.wired_count = 0;
39236c6e
A
1296#else
1297 bzero(&p->stats, sizeof (p->stats));
1298#endif
b0d623f7
A
1299 p->ref_count = 1;
1300 p->nx_enabled = 1;
1301 p->pm_shared = FALSE;
316670eb
A
1302 ledger_reference(ledger);
1303 p->ledger = ledger;
b0d623f7 1304
3e170ce0 1305 p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT);
6d2010ae
A
1306 if (pmap_pcid_ncpus)
1307 pmap_pcid_initialize(p);
316670eb 1308
6d2010ae 1309 p->pm_pml4 = zalloc(pmap_anchor_zone);
b0d623f7 1310
6d2010ae 1311 pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
b0d623f7 1312
6d2010ae 1313 memset((char *)p->pm_pml4, 0, PAGE_SIZE);
b0d623f7 1314
3e170ce0 1315 if (flags & PMAP_CREATE_EPT) {
7e41aa88 1316 p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4) | pmap_eptp_flags;
3e170ce0
A
1317 p->pm_cr3 = 0;
1318 } else {
1319 p->pm_eptp = 0;
1320 p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
1321 }
b0d623f7
A
1322
1323 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1324
39236c6e 1325 p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE);
b0d623f7
A
1326 if (NULL == p->pm_obj_pml4)
1327 panic("pmap_create pdpt obj");
1328
39236c6e 1329 p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE);
b0d623f7
A
1330 if (NULL == p->pm_obj_pdpt)
1331 panic("pmap_create pdpt obj");
1332
39236c6e 1333 p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE);
b0d623f7
A
1334 if (NULL == p->pm_obj)
1335 panic("pmap_create pte obj");
1336
490019cf
A
1337 if (!(flags & PMAP_CREATE_EPT)) {
1338 /* All host pmaps share the kernel's pml4 */
1339 pml4 = pmap64_pml4(p, 0ULL);
1340 kpml4 = kernel_pmap->pm_pml4;
1341 pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX];
1342 pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX];
1343 pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
1344 }
b0d623f7
A
1345
1346 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
3e170ce0 1347 p, flags, 0, 0, 0);
b0d623f7
A
1348
1349 return(p);
1350}
1351
3e170ce0
A
1352pmap_t
1353pmap_create(
1354 ledger_t ledger,
1355 vm_map_size_t sz,
1356 boolean_t is_64bit)
1357{
1358 return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0));
1359}
1360
b0d623f7
A
1361/*
1362 * Retire the given physical map from service.
1363 * Should only be called if the map contains
1364 * no valid mappings.
1365 */
3e170ce0 1366extern int vm_wired_objects_page_count;
b0d623f7
A
1367
1368void
6d2010ae 1369pmap_destroy(pmap_t p)
b0d623f7 1370{
6d2010ae 1371 int c;
b0d623f7
A
1372
1373 if (p == PMAP_NULL)
1374 return;
1375
1376 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
1377 p, 0, 0, 0, 0);
1378
1379 PMAP_LOCK(p);
1380
1381 c = --p->ref_count;
1382
6d2010ae
A
1383 pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
1384
b0d623f7
A
1385 if (c == 0) {
1386 /*
1387 * If some cpu is not using the physical pmap pointer that it
1388 * is supposed to be (see set_dirbase), we might be using the
1389 * pmap that is being destroyed! Make sure we are
1390 * physically on the right pmap:
1391 */
1392 PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
ebb1b9f4
A
1393 if (pmap_pcid_ncpus)
1394 pmap_destroy_pcid_sync(p);
b0d623f7 1395 }
ebb1b9f4 1396
b0d623f7
A
1397 PMAP_UNLOCK(p);
1398
1399 if (c != 0) {
1400 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1401 p, 1, 0, 0, 0);
6d2010ae 1402 pmap_assert(p == kernel_pmap);
b0d623f7
A
1403 return; /* still in use */
1404 }
1405
1406 /*
1407 * Free the memory maps, then the
1408 * pmap structure.
1409 */
1410 int inuse_ptepages = 0;
1411
6d2010ae 1412 zfree(pmap_anchor_zone, p->pm_pml4);
b0d623f7
A
1413
1414 inuse_ptepages += p->pm_obj_pml4->resident_page_count;
1415 vm_object_deallocate(p->pm_obj_pml4);
1416
1417 inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
1418 vm_object_deallocate(p->pm_obj_pdpt);
1419
1420 inuse_ptepages += p->pm_obj->resident_page_count;
1421 vm_object_deallocate(p->pm_obj);
1422
1423 OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count);
316670eb
A
1424 PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
1425 ledger_dereference(p->ledger);
b0d623f7
A
1426 zfree(pmap_zone, p);
1427
1428 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1429 0, 0, 0, 0, 0);
1430}
1431
1432/*
1433 * Add a reference to the specified pmap.
1434 */
1435
1436void
1437pmap_reference(pmap_t p)
1438{
1439 if (p != PMAP_NULL) {
1440 PMAP_LOCK(p);
1441 p->ref_count++;
1442 PMAP_UNLOCK(p);;
1443 }
1444}
1445
b0d623f7
A
1446/*
1447 * Remove phys addr if mapped in specified map
1448 *
1449 */
1450void
1451pmap_remove_some_phys(
1452 __unused pmap_t map,
1453 __unused ppnum_t pn)
1454{
1455
1456/* Implement to support working set code */
1457
1458}
1459
39236c6e
A
1460
1461void
1462pmap_protect(
1463 pmap_t map,
1464 vm_map_offset_t sva,
1465 vm_map_offset_t eva,
1466 vm_prot_t prot)
1467{
1468 pmap_protect_options(map, sva, eva, prot, 0, NULL);
1469}
1470
1471
b0d623f7
A
1472/*
1473 * Set the physical protection on the
1474 * specified range of this map as requested.
1475 * Will not increase permissions.
1476 */
1477void
39236c6e 1478pmap_protect_options(
b0d623f7
A
1479 pmap_t map,
1480 vm_map_offset_t sva,
1481 vm_map_offset_t eva,
39236c6e
A
1482 vm_prot_t prot,
1483 unsigned int options,
1484 void *arg)
b0d623f7
A
1485{
1486 pt_entry_t *pde;
1487 pt_entry_t *spte, *epte;
1488 vm_map_offset_t lva;
1489 vm_map_offset_t orig_sva;
1490 boolean_t set_NX;
1491 int num_found = 0;
3e170ce0 1492 boolean_t is_ept;
b0d623f7
A
1493
1494 pmap_intr_assert();
1495
1496 if (map == PMAP_NULL)
1497 return;
1498
1499 if (prot == VM_PROT_NONE) {
39236c6e 1500 pmap_remove_options(map, sva, eva, options);
b0d623f7
A
1501 return;
1502 }
1503 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
1504 map,
1505 (uint32_t) (sva >> 32), (uint32_t) sva,
1506 (uint32_t) (eva >> 32), (uint32_t) eva);
1507
1508 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled)
1509 set_NX = FALSE;
1510 else
1511 set_NX = TRUE;
1512
3e170ce0
A
1513 is_ept = is_ept_pmap(map);
1514
1515
b0d623f7
A
1516 PMAP_LOCK(map);
1517
1518 orig_sva = sva;
1519 while (sva < eva) {
1520 lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1);
1521 if (lva > eva)
1522 lva = eva;
1523 pde = pmap_pde(map, sva);
3e170ce0
A
1524 if (pde && (*pde & PTE_VALID_MASK(is_ept))) {
1525 if (*pde & PTE_PS) {
b0d623f7
A
1526 /* superpage */
1527 spte = pde;
1528 epte = spte+1; /* excluded */
1529 } else {
1530 spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1)));
1531 spte = &spte[ptenum(sva)];
1532 epte = &spte[intel_btop(lva - sva)];
1533 }
1534
1535 for (; spte < epte; spte++) {
3e170ce0 1536 if (!(*spte & PTE_VALID_MASK(is_ept)))
b0d623f7
A
1537 continue;
1538
3e170ce0
A
1539 if (is_ept) {
1540 if (prot & VM_PROT_READ)
1541 pmap_update_pte(spte, 0, PTE_READ(is_ept));
1542 else
1543 pmap_update_pte(spte, PTE_READ(is_ept), 0);
1544 }
b0d623f7 1545 if (prot & VM_PROT_WRITE)
3e170ce0 1546 pmap_update_pte(spte, 0, PTE_WRITE(is_ept));
b0d623f7 1547 else
3e170ce0 1548 pmap_update_pte(spte, PTE_WRITE(is_ept), 0);
b0d623f7 1549
3e170ce0
A
1550 if (set_NX) {
1551 if (!is_ept)
1552 pmap_update_pte(spte, 0, INTEL_PTE_NX);
1553 else
1554 pmap_update_pte(spte, INTEL_EPT_EX, 0);
1555 } else {
1556 if (!is_ept)
1557 pmap_update_pte(spte, INTEL_PTE_NX, 0);
1558 else
1559 pmap_update_pte(spte, 0, INTEL_EPT_EX);
1560 }
b0d623f7
A
1561 num_found++;
1562 }
1563 }
1564 sva = lva;
1565 }
39236c6e
A
1566 if (num_found) {
1567 if (options & PMAP_OPTIONS_NOFLUSH)
1568 PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg);
1569 else
1570 PMAP_UPDATE_TLBS(map, orig_sva, eva);
1571 }
b0d623f7
A
1572 PMAP_UNLOCK(map);
1573
1574 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
1575 0, 0, 0, 0, 0);
1576
1577}
1578
1579/* Map a (possibly) autogenned block */
1580void
1581pmap_map_block(
1582 pmap_t pmap,
1583 addr64_t va,
1584 ppnum_t pa,
1585 uint32_t size,
1586 vm_prot_t prot,
1587 int attr,
1588 __unused unsigned int flags)
1589{
1590 uint32_t page;
1591 int cur_page_size;
1592
1593 if (attr & VM_MEM_SUPERPAGE)
1594 cur_page_size = SUPERPAGE_SIZE;
1595 else
1596 cur_page_size = PAGE_SIZE;
1597
1598 for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
316670eb 1599 pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
b0d623f7
A
1600 va += cur_page_size;
1601 pa+=cur_page_size/PAGE_SIZE;
1602 }
1603}
1604
316670eb 1605kern_return_t
b0d623f7
A
1606pmap_expand_pml4(
1607 pmap_t map,
316670eb
A
1608 vm_map_offset_t vaddr,
1609 unsigned int options)
b0d623f7
A
1610{
1611 vm_page_t m;
1612 pmap_paddr_t pa;
1613 uint64_t i;
1614 ppnum_t pn;
1615 pml4_entry_t *pml4p;
3e170ce0 1616 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
1617
1618 DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
1619
1620 /*
1621 * Allocate a VM page for the pml4 page
1622 */
316670eb
A
1623 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1624 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1625 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1626 VM_PAGE_WAIT();
316670eb 1627 }
b0d623f7
A
1628 /*
1629 * put the page into the pmap's obj list so it
1630 * can be found later.
1631 */
1632 pn = m->phys_page;
1633 pa = i386_ptob(pn);
1634 i = pml4idx(map, vaddr);
1635
1636 /*
1637 * Zero the page.
1638 */
1639 pmap_zero_page(pn);
1640
1641 vm_page_lockspin_queues();
3e170ce0 1642 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
1643 vm_page_unlock_queues();
1644
1645 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1646 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1647 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1648
1649 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1650 vm_object_lock(map->pm_obj_pml4);
1651
1652 PMAP_LOCK(map);
1653 /*
1654 * See if someone else expanded us first
1655 */
1656 if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
1657 PMAP_UNLOCK(map);
1658 vm_object_unlock(map->pm_obj_pml4);
1659
1660 VM_PAGE_FREE(m);
1661
1662 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1663 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1664 return KERN_SUCCESS;
b0d623f7
A
1665 }
1666
1667#if 0 /* DEBUG */
39236c6e 1668 if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1669 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1670 map, map->pm_obj_pml4, vaddr, i);
1671 }
1672#endif
3e170ce0 1673 vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
1674 vm_object_unlock(map->pm_obj_pml4);
1675
1676 /*
1677 * Set the page directory entry for this page table.
1678 */
1679 pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
1680
1681 pmap_store_pte(pml4p, pa_to_pte(pa)
3e170ce0
A
1682 | PTE_READ(is_ept)
1683 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1684 | PTE_WRITE(is_ept));
b0d623f7
A
1685
1686 PMAP_UNLOCK(map);
1687
316670eb 1688 return KERN_SUCCESS;
b0d623f7
A
1689}
1690
316670eb
A
1691kern_return_t
1692pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
b0d623f7
A
1693{
1694 vm_page_t m;
1695 pmap_paddr_t pa;
1696 uint64_t i;
1697 ppnum_t pn;
1698 pdpt_entry_t *pdptp;
3e170ce0 1699 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
1700
1701 DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
1702
1703 while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
316670eb
A
1704 kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
1705 if (pep4kr != KERN_SUCCESS)
1706 return pep4kr;
b0d623f7
A
1707 }
1708
1709 /*
1710 * Allocate a VM page for the pdpt page
1711 */
316670eb
A
1712 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1713 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1714 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1715 VM_PAGE_WAIT();
316670eb 1716 }
b0d623f7
A
1717
1718 /*
1719 * put the page into the pmap's obj list so it
1720 * can be found later.
1721 */
1722 pn = m->phys_page;
1723 pa = i386_ptob(pn);
1724 i = pdptidx(map, vaddr);
1725
1726 /*
1727 * Zero the page.
1728 */
1729 pmap_zero_page(pn);
1730
1731 vm_page_lockspin_queues();
3e170ce0 1732 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
1733 vm_page_unlock_queues();
1734
1735 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1736 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1737 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1738
1739 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1740 vm_object_lock(map->pm_obj_pdpt);
1741
1742 PMAP_LOCK(map);
1743 /*
1744 * See if someone else expanded us first
1745 */
1746 if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
1747 PMAP_UNLOCK(map);
1748 vm_object_unlock(map->pm_obj_pdpt);
1749
1750 VM_PAGE_FREE(m);
1751
1752 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1753 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1754 return KERN_SUCCESS;
b0d623f7
A
1755 }
1756
1757#if 0 /* DEBUG */
39236c6e 1758 if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1759 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1760 map, map->pm_obj_pdpt, vaddr, i);
1761 }
1762#endif
3e170ce0 1763 vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
1764 vm_object_unlock(map->pm_obj_pdpt);
1765
1766 /*
1767 * Set the page directory entry for this page table.
1768 */
1769 pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
1770
1771 pmap_store_pte(pdptp, pa_to_pte(pa)
3e170ce0
A
1772 | PTE_READ(is_ept)
1773 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1774 | PTE_WRITE(is_ept));
b0d623f7
A
1775
1776 PMAP_UNLOCK(map);
1777
316670eb 1778 return KERN_SUCCESS;
b0d623f7
A
1779
1780}
1781
1782
1783
1784/*
1785 * Routine: pmap_expand
1786 *
1787 * Expands a pmap to be able to map the specified virtual address.
1788 *
1789 * Allocates new virtual memory for the P0 or P1 portion of the
1790 * pmap, then re-maps the physical pages that were in the old
1791 * pmap to be in the new pmap.
1792 *
1793 * Must be called with the pmap system and the pmap unlocked,
1794 * since these must be unlocked to use vm_allocate or vm_deallocate.
1795 * Thus it must be called in a loop that checks whether the map
1796 * has been expanded enough.
1797 * (We won't loop forever, since page tables aren't shrunk.)
1798 */
316670eb 1799kern_return_t
b0d623f7
A
1800pmap_expand(
1801 pmap_t map,
316670eb
A
1802 vm_map_offset_t vaddr,
1803 unsigned int options)
b0d623f7
A
1804{
1805 pt_entry_t *pdp;
1806 register vm_page_t m;
1807 register pmap_paddr_t pa;
1808 uint64_t i;
1809 ppnum_t pn;
3e170ce0 1810 boolean_t is_ept = is_ept_pmap(map);
b0d623f7
A
1811
1812
1813 /*
1814 * For the kernel, the virtual address must be in or above the basement
1815 * which is for kexts and is in the 512GB immediately below the kernel..
1816 * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
1817 */
1818 if (map == kernel_pmap &&
1819 !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))
1820 panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
1821
1822
1823 while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
316670eb
A
1824 kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
1825 if (pepkr != KERN_SUCCESS)
1826 return pepkr;
b0d623f7
A
1827 }
1828
1829 /*
1830 * Allocate a VM page for the pde entries.
1831 */
316670eb
A
1832 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1833 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1834 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1835 VM_PAGE_WAIT();
316670eb 1836 }
b0d623f7
A
1837
1838 /*
1839 * put the page into the pmap's obj list so it
1840 * can be found later.
1841 */
1842 pn = m->phys_page;
1843 pa = i386_ptob(pn);
1844 i = pdeidx(map, vaddr);
1845
1846 /*
1847 * Zero the page.
1848 */
1849 pmap_zero_page(pn);
1850
1851 vm_page_lockspin_queues();
3e170ce0 1852 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
b0d623f7
A
1853 vm_page_unlock_queues();
1854
1855 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1856 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1857 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1858
1859 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1860 vm_object_lock(map->pm_obj);
1861
1862 PMAP_LOCK(map);
1863
1864 /*
1865 * See if someone else expanded us first
1866 */
1867 if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
1868 PMAP_UNLOCK(map);
1869 vm_object_unlock(map->pm_obj);
1870
1871 VM_PAGE_FREE(m);
1872
1873 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1874 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1875 return KERN_SUCCESS;
b0d623f7
A
1876 }
1877
1878#if 0 /* DEBUG */
39236c6e 1879 if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1880 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
1881 map, map->pm_obj, vaddr, i);
1882 }
1883#endif
3e170ce0 1884 vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE);
b0d623f7
A
1885 vm_object_unlock(map->pm_obj);
1886
1887 /*
1888 * Set the page directory entry for this page table.
1889 */
1890 pdp = pmap_pde(map, vaddr);
1891 pmap_store_pte(pdp, pa_to_pte(pa)
3e170ce0
A
1892 | PTE_READ(is_ept)
1893 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1894 | PTE_WRITE(is_ept));
b0d623f7
A
1895
1896 PMAP_UNLOCK(map);
1897
316670eb 1898 return KERN_SUCCESS;
b0d623f7
A
1899}
1900
1901/* On K64 machines with more than 32GB of memory, pmap_steal_memory
1902 * will allocate past the 1GB of pre-expanded virtual kernel area. This
1903 * function allocates all the page tables using memory from the same pool
1904 * that pmap_steal_memory uses, rather than calling vm_page_grab (which
1905 * isn't available yet). */
1906void
6d2010ae
A
1907pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
1908{
b0d623f7
A
1909 ppnum_t pn;
1910 pt_entry_t *pte;
3e170ce0 1911 boolean_t is_ept = is_ept_pmap(pmap);
b0d623f7
A
1912
1913 PMAP_LOCK(pmap);
1914
1915 if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
0b4c1975 1916 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1917 panic("pmap_pre_expand");
1918
1919 pmap_zero_page(pn);
1920
1921 pte = pmap64_pml4(pmap, vaddr);
1922
1923 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
1924 | PTE_READ(is_ept)
1925 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1926 | PTE_WRITE(is_ept));
b0d623f7
A
1927 }
1928
1929 if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
0b4c1975 1930 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1931 panic("pmap_pre_expand");
1932
1933 pmap_zero_page(pn);
1934
1935 pte = pmap64_pdpt(pmap, vaddr);
1936
1937 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
1938 | PTE_READ(is_ept)
1939 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1940 | PTE_WRITE(is_ept));
b0d623f7
A
1941 }
1942
1943 if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
0b4c1975 1944 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1945 panic("pmap_pre_expand");
1946
1947 pmap_zero_page(pn);
1948
1949 pte = pmap64_pde(pmap, vaddr);
1950
1951 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
3e170ce0
A
1952 | PTE_READ(is_ept)
1953 | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER)
1954 | PTE_WRITE(is_ept));
b0d623f7
A
1955 }
1956
1957 PMAP_UNLOCK(pmap);
1958}
1959
1960/*
1961 * pmap_sync_page_data_phys(ppnum_t pa)
1962 *
1963 * Invalidates all of the instruction cache on a physical page and
1964 * pushes any dirty data from the data cache for the same physical page
1965 * Not required in i386.
1966 */
1967void
1968pmap_sync_page_data_phys(__unused ppnum_t pa)
1969{
1970 return;
1971}
1972
1973/*
1974 * pmap_sync_page_attributes_phys(ppnum_t pa)
1975 *
1976 * Write back and invalidate all cachelines on a physical page.
1977 */
1978void
1979pmap_sync_page_attributes_phys(ppnum_t pa)
1980{
1981 cache_flush_page_phys(pa);
1982}
1983
1984
1985
1986#ifdef CURRENTLY_UNUSED_AND_UNTESTED
1987
1988int collect_ref;
1989int collect_unref;
1990
1991/*
1992 * Routine: pmap_collect
1993 * Function:
1994 * Garbage collects the physical map system for
1995 * pages which are no longer used.
1996 * Success need not be guaranteed -- that is, there
1997 * may well be pages which are not referenced, but
1998 * others may be collected.
1999 * Usage:
2000 * Called by the pageout daemon when pages are scarce.
2001 */
2002void
2003pmap_collect(
2004 pmap_t p)
2005{
2006 register pt_entry_t *pdp, *ptp;
2007 pt_entry_t *eptp;
2008 int wired;
3e170ce0 2009 boolean_t is_ept;
b0d623f7
A
2010
2011 if (p == PMAP_NULL)
2012 return;
2013
2014 if (p == kernel_pmap)
2015 return;
2016
3e170ce0
A
2017 is_ept = is_ept_pmap(p);
2018
b0d623f7
A
2019 /*
2020 * Garbage collect map.
2021 */
2022 PMAP_LOCK(p);
2023
2024 for (pdp = (pt_entry_t *)p->dirbase;
2025 pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
2026 pdp++)
2027 {
3e170ce0
A
2028 if (*pdp & PTE_VALID_MASK(is_ept)) {
2029 if (*pdp & PTE_REF(is_ept)) {
2030 pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept));
2031 collect_ref++;
2032 } else {
2033 collect_unref++;
2034 ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
2035 eptp = ptp + NPTEPG;
b0d623f7 2036
3e170ce0
A
2037 /*
2038 * If the pte page has any wired mappings, we cannot
2039 * free it.
2040 */
2041 wired = 0;
2042 {
2043 register pt_entry_t *ptep;
2044 for (ptep = ptp; ptep < eptp; ptep++) {
2045 if (iswired(*ptep)) {
2046 wired = 1;
2047 break;
2048 }
2049 }
2050 }
2051 if (!wired) {
2052 /*
2053 * Remove the virtual addresses mapped by this pte page.
2054 */
2055 pmap_remove_range(p,
2056 pdetova(pdp - (pt_entry_t *)p->dirbase),
2057 ptp,
2058 eptp);
2059
2060 /*
2061 * Invalidate the page directory pointer.
2062 */
2063 pmap_store_pte(pdp, 0x0);
2064
2065 PMAP_UNLOCK(p);
2066
2067 /*
2068 * And free the pte page itself.
2069 */
2070 {
2071 register vm_page_t m;
2072
2073 vm_object_lock(p->pm_obj);
2074
2075 m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
2076 if (m == VM_PAGE_NULL)
2077 panic("pmap_collect: pte page not in object");
2078
2079 vm_object_unlock(p->pm_obj);
2080
2081 VM_PAGE_FREE(m);
2082
2083 OSAddAtomic(-1, &inuse_ptepages_count);
2084 PMAP_ZINFO_PFREE(p, PAGE_SIZE);
2085 }
2086
2087 PMAP_LOCK(p);
2088 }
b0d623f7 2089 }
b0d623f7 2090 }
b0d623f7
A
2091 }
2092
2093 PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
2094 PMAP_UNLOCK(p);
2095 return;
b0d623f7
A
2096}
2097#endif
2098
2099
2100void
2101pmap_copy_page(ppnum_t src, ppnum_t dst)
2102{
2103 bcopy_phys((addr64_t)i386_ptob(src),
2104 (addr64_t)i386_ptob(dst),
2105 PAGE_SIZE);
2106}
2107
2108
2109/*
2110 * Routine: pmap_pageable
2111 * Function:
2112 * Make the specified pages (by pmap, offset)
2113 * pageable (or not) as requested.
2114 *
2115 * A page which is not pageable may not take
2116 * a fault; therefore, its page table entry
2117 * must remain valid for the duration.
2118 *
2119 * This routine is merely advisory; pmap_enter
2120 * will specify that these pages are to be wired
2121 * down (or not) as appropriate.
2122 */
2123void
2124pmap_pageable(
2125 __unused pmap_t pmap,
2126 __unused vm_map_offset_t start_addr,
2127 __unused vm_map_offset_t end_addr,
2128 __unused boolean_t pageable)
2129{
2130#ifdef lint
2131 pmap++; start_addr++; end_addr++; pageable++;
2132#endif /* lint */
2133}
2134
b0d623f7
A
2135void
2136invalidate_icache(__unused vm_offset_t addr,
2137 __unused unsigned cnt,
2138 __unused int phys)
2139{
2140 return;
2141}
2142
2143void
2144flush_dcache(__unused vm_offset_t addr,
2145 __unused unsigned count,
2146 __unused int phys)
2147{
2148 return;
2149}
2150
2151#if CONFIG_DTRACE
2152/*
2153 * Constrain DTrace copyin/copyout actions
2154 */
2155extern kern_return_t dtrace_copyio_preflight(addr64_t);
2156extern kern_return_t dtrace_copyio_postflight(addr64_t);
2157
2158kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
2159{
2160 thread_t thread = current_thread();
6d2010ae 2161 uint64_t ccr3;
b0d623f7
A
2162 if (current_map() == kernel_map)
2163 return KERN_FAILURE;
6d2010ae
A
2164 else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE))
2165 return KERN_FAILURE;
2166 else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3))
b0d623f7 2167 return KERN_FAILURE;
b0d623f7
A
2168 else
2169 return KERN_SUCCESS;
2170}
2171
2172kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
2173{
2174 return KERN_SUCCESS;
2175}
2176#endif /* CONFIG_DTRACE */
2177
2178#include <mach_vm_debug.h>
2179#if MACH_VM_DEBUG
2180#include <vm/vm_debug.h>
2181
2182int
2183pmap_list_resident_pages(
2184 __unused pmap_t pmap,
2185 __unused vm_offset_t *listp,
2186 __unused int space)
2187{
2188 return 0;
2189}
2190#endif /* MACH_VM_DEBUG */
2191
2192
2193
2194/* temporary workaround */
2195boolean_t
2196coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
2197{
2198#if 0
2199 pt_entry_t *ptep;
2200
2201 ptep = pmap_pte(map->pmap, va);
2202 if (0 == ptep)
2203 return FALSE;
2204 return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
2205#else
2206 return TRUE;
2207#endif
2208}
2209
2210
2211boolean_t
2212phys_page_exists(ppnum_t pn)
2213{
2214 assert(pn != vm_page_fictitious_addr);
2215
2216 if (!pmap_initialized)
2217 return TRUE;
2218
2219 if (pn == vm_page_guard_addr)
2220 return FALSE;
2221
2222 if (!IS_MANAGED_PAGE(ppn_to_pai(pn)))
2223 return FALSE;
2224
2225 return TRUE;
2226}
2227
6d2010ae
A
2228
2229
b0d623f7
A
2230void
2231pmap_switch(pmap_t tpmap)
2232{
2233 spl_t s;
2234
2235 s = splhigh(); /* Make sure interruptions are disabled */
fe8ab488 2236 set_dirbase(tpmap, current_thread(), cpu_number());
b0d623f7
A
2237 splx(s);
2238}
2239
2240
2241/*
2242 * disable no-execute capability on
2243 * the specified pmap
2244 */
2245void
2246pmap_disable_NX(pmap_t pmap)
2247{
2248 pmap->nx_enabled = 0;
2249}
2250
6d2010ae
A
2251void
2252pt_fake_zone_init(int zone_index)
2253{
2254 pt_fake_zone_index = zone_index;
2255}
2256
b0d623f7
A
2257void
2258pt_fake_zone_info(
2259 int *count,
2260 vm_size_t *cur_size,
2261 vm_size_t *max_size,
2262 vm_size_t *elem_size,
2263 vm_size_t *alloc_size,
6d2010ae 2264 uint64_t *sum_size,
b0d623f7 2265 int *collectable,
6d2010ae
A
2266 int *exhaustable,
2267 int *caller_acct)
b0d623f7
A
2268{
2269 *count = inuse_ptepages_count;
2270 *cur_size = PAGE_SIZE * inuse_ptepages_count;
2271 *max_size = PAGE_SIZE * (inuse_ptepages_count +
2272 vm_page_inactive_count +
2273 vm_page_active_count +
2274 vm_page_free_count);
2275 *elem_size = PAGE_SIZE;
2276 *alloc_size = PAGE_SIZE;
6d2010ae 2277 *sum_size = alloc_ptepages_count * PAGE_SIZE;
b0d623f7
A
2278
2279 *collectable = 1;
2280 *exhaustable = 0;
6d2010ae 2281 *caller_acct = 1;
b0d623f7
A
2282}
2283
39236c6e
A
2284
2285void
2286pmap_flush_context_init(pmap_flush_context *pfc)
2287{
2288 pfc->pfc_cpus = 0;
2289 pfc->pfc_invalid_global = 0;
2290}
2291
fe8ab488 2292extern unsigned TLBTimeOut;
39236c6e
A
2293void
2294pmap_flush(
2295 pmap_flush_context *pfc)
2296{
2297 unsigned int my_cpu;
2298 unsigned int cpu;
2299 unsigned int cpu_bit;
fe8ab488
A
2300 cpumask_t cpus_to_respond = 0;
2301 cpumask_t cpus_to_signal = 0;
2302 cpumask_t cpus_signaled = 0;
39236c6e
A
2303 boolean_t flush_self = FALSE;
2304 uint64_t deadline;
2305
2306 mp_disable_preemption();
2307
2308 my_cpu = cpu_number();
2309 cpus_to_signal = pfc->pfc_cpus;
2310
2311 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
2312 NULL, cpus_to_signal, 0, 0, 0);
2313
2314 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
2315
2316 if (cpus_to_signal & cpu_bit) {
2317
2318 cpus_to_signal &= ~cpu_bit;
2319
2320 if (!cpu_datap(cpu)->cpu_running)
2321 continue;
2322
2323 if (pfc->pfc_invalid_global & cpu_bit)
2324 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2325 else
2326 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
2327 mfence();
2328
2329 if (cpu == my_cpu) {
2330 flush_self = TRUE;
2331 continue;
2332 }
2333 if (CPU_CR3_IS_ACTIVE(cpu)) {
2334 cpus_to_respond |= cpu_bit;
2335 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2336 }
2337 }
2338 }
2339 cpus_signaled = cpus_to_respond;
2340
2341 /*
2342 * Flush local tlb if required.
2343 * Do this now to overlap with other processors responding.
2344 */
2345 if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE)
2346 process_pmap_updates();
2347
2348 if (cpus_to_respond) {
2349
fe8ab488
A
2350 deadline = mach_absolute_time() +
2351 (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2352 boolean_t is_timeout_traced = FALSE;
2353
39236c6e
A
2354 /*
2355 * Wait for those other cpus to acknowledge
2356 */
2357 while (cpus_to_respond != 0) {
2358 long orig_acks = 0;
2359
2360 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2361 /* Consider checking local/global invalidity
2362 * as appropriate in the PCID case.
2363 */
2364 if ((cpus_to_respond & cpu_bit) != 0) {
2365 if (!cpu_datap(cpu)->cpu_running ||
2366 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2367 !CPU_CR3_IS_ACTIVE(cpu)) {
2368 cpus_to_respond &= ~cpu_bit;
2369 }
2370 cpu_pause();
2371 }
2372 if (cpus_to_respond == 0)
2373 break;
2374 }
2375 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
2376 if (machine_timeout_suspended())
2377 continue;
fe8ab488
A
2378 if (TLBTimeOut == 0) {
2379 if (is_timeout_traced)
2380 continue;
2381 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO),
2382 NULL, cpus_to_signal, cpus_to_respond, 0, 0);
2383 is_timeout_traced = TRUE;
2384 continue;
2385 }
39236c6e
A
2386 pmap_tlb_flush_timeout = TRUE;
2387 orig_acks = NMIPI_acks;
fe8ab488 2388 mp_cpus_NMIPI(cpus_to_respond);
39236c6e
A
2389
2390 panic("TLB invalidation IPI timeout: "
3e170ce0 2391 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
39236c6e
A
2392 cpus_to_respond, orig_acks, NMIPI_acks);
2393 }
2394 }
2395 }
2396 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
2397 NULL, cpus_signaled, flush_self, 0, 0);
2398
2399 mp_enable_preemption();
2400}
2401
2402
3e170ce0
A
2403static void
2404invept(void *eptp)
2405{
2406 struct {
2407 uint64_t eptp;
2408 uint64_t reserved;
2409 } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0};
2410
2411 __asm__ volatile("invept (%%rax), %%rcx"
2412 : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor)
2413 : "cc", "memory");
2414}
2415
b0d623f7
A
2416/*
2417 * Called with pmap locked, we:
2418 * - scan through per-cpu data to see which other cpus need to flush
2419 * - send an IPI to each non-idle cpu to be flushed
2420 * - wait for all to signal back that they are inactive or we see that
2421 * they are at a safe point (idle).
2422 * - flush the local tlb if active for this pmap
2423 * - return ... the caller will unlock the pmap
2424 */
6d2010ae 2425
b0d623f7 2426void
39236c6e 2427pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
b0d623f7
A
2428{
2429 unsigned int cpu;
2430 unsigned int cpu_bit;
fe8ab488 2431 cpumask_t cpus_to_signal;
b0d623f7
A
2432 unsigned int my_cpu = cpu_number();
2433 pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
2434 boolean_t flush_self = FALSE;
2435 uint64_t deadline;
6d2010ae 2436 boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
39236c6e 2437 boolean_t need_global_flush = FALSE;
fe8ab488 2438 uint32_t event_code;
4bd07ac2 2439 vm_map_offset_t event_startv, event_endv;
3e170ce0 2440 boolean_t is_ept = is_ept_pmap(pmap);
b0d623f7
A
2441
2442 assert((processor_avail_count < 2) ||
2443 (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2444
3e170ce0
A
2445 if (pmap == kernel_pmap) {
2446 event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS);
4bd07ac2
A
2447 event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv);
2448 event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv);
3e170ce0
A
2449 } else if (is_ept) {
2450 event_code = PMAP_CODE(PMAP__FLUSH_EPT);
4bd07ac2
A
2451 event_startv = startv;
2452 event_endv = endv;
3e170ce0
A
2453 } else {
2454 event_code = PMAP_CODE(PMAP__FLUSH_TLBS);
4bd07ac2
A
2455 event_startv = startv;
2456 event_endv = endv;
3e170ce0
A
2457 }
2458
fe8ab488 2459 PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START,
4bd07ac2 2460 VM_KERNEL_UNSLIDE_OR_PERM(pmap), options, event_startv, event_endv, 0);
fe8ab488 2461
3e170ce0
A
2462 if (is_ept) {
2463 mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp);
2464 goto out;
2465 }
2466
b0d623f7
A
2467 /*
2468 * Scan other cpus for matching active or task CR3.
2469 * For idle cpus (with no active map) we mark them invalid but
2470 * don't signal -- they'll check as they go busy.
2471 */
2472 cpus_to_signal = 0;
6d2010ae
A
2473
2474 if (pmap_pcid_ncpus) {
39236c6e
A
2475 if (pmap_is_shared)
2476 need_global_flush = TRUE;
6d2010ae 2477 pmap_pcid_invalidate_all_cpus(pmap);
39236c6e 2478 mfence();
6d2010ae 2479 }
b0d623f7
A
2480 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2481 if (!cpu_datap(cpu)->cpu_running)
2482 continue;
2483 uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
2484 uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
2485
2486 if ((pmap_cr3 == cpu_task_cr3) ||
2487 (pmap_cr3 == cpu_active_cr3) ||
6d2010ae 2488 (pmap_is_shared)) {
39236c6e
A
2489
2490 if (options & PMAP_DELAY_TLB_FLUSH) {
2491 if (need_global_flush == TRUE)
2492 pfc->pfc_invalid_global |= cpu_bit;
2493 pfc->pfc_cpus |= cpu_bit;
2494
2495 continue;
2496 }
b0d623f7
A
2497 if (cpu == my_cpu) {
2498 flush_self = TRUE;
2499 continue;
2500 }
39236c6e 2501 if (need_global_flush == TRUE)
6d2010ae
A
2502 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2503 else
2504 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
39236c6e 2505 mfence();
b0d623f7
A
2506
2507 /*
2508 * We don't need to signal processors which will flush
2509 * lazily at the idle state or kernel boundary.
2510 * For example, if we're invalidating the kernel pmap,
2511 * processors currently in userspace don't need to flush
2512 * their TLBs until the next time they enter the kernel.
2513 * Alterations to the address space of a task active
2514 * on a remote processor result in a signal, to
2515 * account for copy operations. (There may be room
2516 * for optimization in such cases).
2517 * The order of the loads below with respect
2518 * to the store to the "cpu_tlb_invalid" field above
2519 * is important--hence the barrier.
2520 */
2521 if (CPU_CR3_IS_ACTIVE(cpu) &&
2522 (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
39236c6e
A
2523 pmap->pm_shared ||
2524 (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
b0d623f7
A
2525 cpus_to_signal |= cpu_bit;
2526 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2527 }
2528 }
2529 }
39236c6e 2530 if ((options & PMAP_DELAY_TLB_FLUSH))
fe8ab488 2531 goto out;
b0d623f7 2532
b0d623f7
A
2533 /*
2534 * Flush local tlb if required.
2535 * Do this now to overlap with other processors responding.
2536 */
6d2010ae
A
2537 if (flush_self) {
2538 if (pmap_pcid_ncpus) {
2539 pmap_pcid_validate_cpu(pmap, my_cpu);
2540 if (pmap_is_shared)
2541 tlb_flush_global();
2542 else
2543 flush_tlb_raw();
2544 }
2545 else
2546 flush_tlb_raw();
2547 }
b0d623f7
A
2548
2549 if (cpus_to_signal) {
fe8ab488
A
2550 cpumask_t cpus_to_respond = cpus_to_signal;
2551
2552 deadline = mach_absolute_time() +
2553 (TLBTimeOut ? TLBTimeOut : LockTimeOut);
2554 boolean_t is_timeout_traced = FALSE;
b0d623f7 2555
b0d623f7
A
2556 /*
2557 * Wait for those other cpus to acknowledge
2558 */
2559 while (cpus_to_respond != 0) {
060df5ea 2560 long orig_acks = 0;
b0d623f7
A
2561
2562 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
6d2010ae
A
2563 /* Consider checking local/global invalidity
2564 * as appropriate in the PCID case.
2565 */
b0d623f7
A
2566 if ((cpus_to_respond & cpu_bit) != 0) {
2567 if (!cpu_datap(cpu)->cpu_running ||
2568 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2569 !CPU_CR3_IS_ACTIVE(cpu)) {
2570 cpus_to_respond &= ~cpu_bit;
2571 }
2572 cpu_pause();
2573 }
2574 if (cpus_to_respond == 0)
2575 break;
2576 }
6d2010ae 2577 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
060df5ea
A
2578 if (machine_timeout_suspended())
2579 continue;
fe8ab488
A
2580 if (TLBTimeOut == 0) {
2581 /* cut tracepoint but don't panic */
2582 if (is_timeout_traced)
2583 continue;
2584 PMAP_TRACE_CONSTANT(
2585 PMAP_CODE(PMAP__FLUSH_TLBS_TO),
4bd07ac2 2586 VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, cpus_to_respond, 0, 0);
fe8ab488
A
2587 is_timeout_traced = TRUE;
2588 continue;
2589 }
060df5ea
A
2590 pmap_tlb_flush_timeout = TRUE;
2591 orig_acks = NMIPI_acks;
fe8ab488 2592 mp_cpus_NMIPI(cpus_to_respond);
060df5ea
A
2593
2594 panic("TLB invalidation IPI timeout: "
3e170ce0 2595 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%llx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
060df5ea
A
2596 cpus_to_respond, orig_acks, NMIPI_acks);
2597 }
b0d623f7
A
2598 }
2599 }
2600
316670eb 2601 if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
39236c6e
A
2602 panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
2603 }
2604
fe8ab488
A
2605out:
2606 PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END,
4bd07ac2 2607 VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, event_startv, event_endv, 0);
316670eb 2608
b0d623f7
A
2609}
2610
2611void
2612process_pmap_updates(void)
2613{
6d2010ae
A
2614 int ccpu = cpu_number();
2615 pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2616 if (pmap_pcid_ncpus) {
2617 pmap_pcid_validate_current();
2618 if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
2619 cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
2620 tlb_flush_global();
2621 }
2622 else {
2623 cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE;
2624 flush_tlb_raw();
2625 }
2626 }
2627 else {
2628 current_cpu_datap()->cpu_tlb_invalid = FALSE;
2629 flush_tlb_raw();
2630 }
b0d623f7 2631
39236c6e 2632 mfence();
b0d623f7
A
2633}
2634
2635void
2636pmap_update_interrupt(void)
2637{
2638 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
2639 0, 0, 0, 0, 0);
2640
39236c6e
A
2641 if (current_cpu_datap()->cpu_tlb_invalid)
2642 process_pmap_updates();
b0d623f7
A
2643
2644 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
2645 0, 0, 0, 0, 0);
2646}
316670eb
A
2647
2648#include <mach/mach_vm.h> /* mach_vm_region_recurse() */
2649/* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
2650 * and identify ranges with mismatched VM permissions and PTE permissions
2651 */
2652kern_return_t
2653pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) {
2654 vm_offset_t cv = sv;
2655 kern_return_t rv = KERN_SUCCESS;
2656 uint64_t skip4 = 0, skip2 = 0;
2657
3e170ce0
A
2658 assert(!is_ept_pmap(ipmap));
2659
316670eb
A
2660 sv &= ~PAGE_MASK_64;
2661 ev &= ~PAGE_MASK_64;
2662 while (cv < ev) {
2663 if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
2664 (cv < 0xFFFF800000000000ULL))) {
2665 cv = 0xFFFF800000000000ULL;
2666 }
2667 /* Potential inconsistencies from not holding pmap lock
2668 * but harmless for the moment.
2669 */
2670 if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
2671 if ((cv + NBPML4) > cv)
2672 cv += NBPML4;
2673 else
2674 break;
2675 skip4++;
2676 continue;
2677 }
2678 if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
2679 if ((cv + NBPD) > cv)
2680 cv += NBPD;
2681 else
2682 break;
2683 skip2++;
2684 continue;
2685 }
2686
2687 pt_entry_t *ptep = pmap_pte(ipmap, cv);
2688 if (ptep && (*ptep & INTEL_PTE_VALID)) {
2689 if (*ptep & INTEL_PTE_WRITE) {
2690 if (!(*ptep & INTEL_PTE_NX)) {
2691 kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
2692 rv = KERN_FAILURE;
2693 }
2694 }
2695 }
2696 cv += PAGE_SIZE;
2697 }
2698 kprintf("Completed pmap scan\n");
2699 cv = sv;
2700
2701 struct vm_region_submap_info_64 vbr;
2702 mach_msg_type_number_t vbrcount = 0;
2703 mach_vm_size_t vmsize;
2704 vm_prot_t prot;
2705 uint32_t nesting_depth = 0;
2706 kern_return_t kret;
2707
2708 while (cv < ev) {
2709
2710 for (;;) {
2711 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
2712 if((kret = mach_vm_region_recurse(ivmmap,
2713 (mach_vm_address_t *) &cv, &vmsize, &nesting_depth,
2714 (vm_region_recurse_info_t)&vbr,
2715 &vbrcount)) != KERN_SUCCESS) {
2716 break;
2717 }
2718
2719 if(vbr.is_submap) {
2720 nesting_depth++;
2721 continue;
2722 } else {
2723 break;
2724 }
2725 }
2726
2727 if(kret != KERN_SUCCESS)
2728 break;
2729
2730 prot = vbr.protection;
2731
2732 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
2733 kprintf("W+X map entry at address 0x%lx\n", cv);
2734 rv = KERN_FAILURE;
2735 }
2736
2737 if (prot) {
2738 vm_offset_t pcv;
2739 for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
2740 pt_entry_t *ptep = pmap_pte(ipmap, pcv);
2741 vm_prot_t tprot;
2742
2743 if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID))
2744 continue;
2745 tprot = VM_PROT_READ;
2746 if (*ptep & INTEL_PTE_WRITE)
2747 tprot |= VM_PROT_WRITE;
2748 if ((*ptep & INTEL_PTE_NX) == 0)
2749 tprot |= VM_PROT_EXECUTE;
2750 if (tprot != prot) {
2751 kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
2752 rv = KERN_FAILURE;
2753 }
2754 }
2755 }
2756 cv += vmsize;
2757 }
2758 return rv;
2759}