]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/pmap.c
xnu-2050.24.15.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * File: pmap.c
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * (These guys wrote the Vax version)
63 *
64 * Physical Map management code for Intel i386, i486, and i860.
65 *
66 * Manages physical address maps.
67 *
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
74 *
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
80 * requested.
81 *
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information as
87 * to which processors are currently using which maps,
88 * and to when physical maps must be made correct.
89 */
90
91#include <string.h>
b0d623f7
A
92#include <mach_ldebug.h>
93
94#include <libkern/OSAtomic.h>
95
96#include <mach/machine/vm_types.h>
97
98#include <mach/boolean.h>
99#include <kern/thread.h>
100#include <kern/zalloc.h>
101#include <kern/queue.h>
316670eb 102#include <kern/ledger.h>
6d2010ae 103#include <kern/mach_param.h>
b0d623f7
A
104
105#include <kern/lock.h>
106#include <kern/kalloc.h>
107#include <kern/spl.h>
108
109#include <vm/pmap.h>
110#include <vm/vm_map.h>
111#include <vm/vm_kern.h>
112#include <mach/vm_param.h>
113#include <mach/vm_prot.h>
114#include <vm/vm_object.h>
115#include <vm/vm_page.h>
116
117#include <mach/machine/vm_param.h>
118#include <machine/thread.h>
119
120#include <kern/misc_protos.h> /* prototyping */
121#include <i386/misc_protos.h>
6d2010ae 122#include <i386/i386_lowmem.h>
b0d623f7
A
123#include <x86_64/lowglobals.h>
124
125#include <i386/cpuid.h>
126#include <i386/cpu_data.h>
127#include <i386/cpu_number.h>
128#include <i386/machine_cpu.h>
129#include <i386/seg.h>
130#include <i386/serial_io.h>
131#include <i386/cpu_capabilities.h>
132#include <i386/machine_routines.h>
133#include <i386/proc_reg.h>
134#include <i386/tsc.h>
135#include <i386/pmap_internal.h>
6d2010ae 136#include <i386/pmap_pcid.h>
b0d623f7 137
b0d623f7
A
138#include <vm/vm_protos.h>
139
140#include <i386/mp.h>
141#include <i386/mp_desc.h>
316670eb
A
142#include <libkern/kernel_mach_header.h>
143
144#include <pexpert/i386/efi.h>
b0d623f7
A
145
146
b0d623f7
A
147#ifdef IWANTTODEBUG
148#undef DEBUG
149#define DEBUG 1
150#define POSTCODE_DELAY 1
151#include <i386/postcode.h>
152#endif /* IWANTTODEBUG */
153
6d2010ae
A
154#ifdef PMAP_DEBUG
155#define DBG(x...) kprintf("DBG: " x)
b0d623f7
A
156#else
157#define DBG(x...)
158#endif
6d2010ae
A
159/* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
160 * in the trampolines for kernel/user boundary TLB coherency.
b0d623f7 161 */
6d2010ae
A
162char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
163boolean_t pmap_trace = FALSE;
b0d623f7 164
6d2010ae 165boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */
b0d623f7
A
166
167int nx_enabled = 1; /* enable no-execute protection */
168int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
169int allow_stack_exec = 0; /* No apps may execute from the stack by default */
170
171const boolean_t cpu_64bit = TRUE; /* Mais oui! */
172
b0d623f7
A
173uint64_t max_preemption_latency_tsc = 0;
174
b0d623f7
A
175pv_hashed_entry_t *pv_hash_table; /* hash lists */
176
177uint32_t npvhash = 0;
178
b0d623f7
A
179pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
180pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
181decl_simple_lock_data(,pv_hashed_free_list_lock)
182decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
183decl_simple_lock_data(,pv_hash_table_lock)
184
b0d623f7
A
185zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */
186
b0d623f7
A
187/*
188 * First and last physical addresses that we maintain any information
189 * for. Initialized to zero so that pmap operations done before
190 * pmap_init won't touch any non-existent structures.
191 */
192boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
193
194static struct vm_object kptobj_object_store;
195static struct vm_object kpml4obj_object_store;
196static struct vm_object kpdptobj_object_store;
197
198/*
6d2010ae 199 * Array of physical page attribites for managed pages.
b0d623f7
A
200 * One byte per physical page.
201 */
202char *pmap_phys_attributes;
316670eb 203ppnum_t last_managed_page = 0;
6d2010ae
A
204
205/*
206 * Amount of virtual memory mapped by one
207 * page-directory entry.
208 */
209
b0d623f7
A
210uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
211
b0d623f7
A
212unsigned pmap_memory_region_count;
213unsigned pmap_memory_region_current;
214
215pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
216
217/*
218 * Other useful macros.
219 */
220#define current_pmap() (vm_map_pmap(current_thread()->map))
221
222struct pmap kernel_pmap_store;
223pmap_t kernel_pmap;
224
b0d623f7
A
225struct zone *pmap_zone; /* zone of pmap structures */
226
6d2010ae
A
227struct zone *pmap_anchor_zone;
228int pmap_debug = 0; /* flag for debugging prints */
229
b0d623f7 230unsigned int inuse_ptepages_count = 0;
6d2010ae
A
231long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
232unsigned int bootstrap_wired_pages = 0;
233int pt_fake_zone_index = -1;
b0d623f7 234
6d2010ae 235extern long NMIPI_acks;
b0d623f7 236
6d2010ae
A
237boolean_t kernel_text_ps_4K = TRUE;
238boolean_t wpkernel = TRUE;
b0d623f7
A
239
240extern char end;
241
242static int nkpt;
243
244pt_entry_t *DMAP1, *DMAP2;
245caddr_t DADDR1;
246caddr_t DADDR2;
b0d623f7 247
316670eb
A
248const boolean_t pmap_disable_kheap_nx = FALSE;
249const boolean_t pmap_disable_kstack_nx = FALSE;
250extern boolean_t doconstro_override;
b0d623f7 251
316670eb 252extern long __stack_chk_guard[];
b0d623f7
A
253
254/*
255 * Map memory at initialization. The physical addresses being
256 * mapped are not managed and are never unmapped.
257 *
258 * For now, VM is already on, we only need to map the
259 * specified memory.
260 */
261vm_offset_t
262pmap_map(
263 vm_offset_t virt,
264 vm_map_offset_t start_addr,
265 vm_map_offset_t end_addr,
266 vm_prot_t prot,
267 unsigned int flags)
268{
269 int ps;
270
271 ps = PAGE_SIZE;
272 while (start_addr < end_addr) {
273 pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
316670eb 274 (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
b0d623f7
A
275 virt += ps;
276 start_addr += ps;
277 }
278 return(virt);
279}
280
b0d623f7
A
281extern char *first_avail;
282extern vm_offset_t virtual_avail, virtual_end;
283extern pmap_paddr_t avail_start, avail_end;
284extern vm_offset_t sHIB;
285extern vm_offset_t eHIB;
286extern vm_offset_t stext;
287extern vm_offset_t etext;
316670eb
A
288extern vm_offset_t sdata, edata;
289extern vm_offset_t sconstdata, econstdata;
b0d623f7 290
6d2010ae
A
291extern void *KPTphys;
292
13f56ec4
A
293boolean_t pmap_smep_enabled = FALSE;
294
b0d623f7
A
295void
296pmap_cpu_init(void)
297{
298 /*
299 * Here early in the life of a processor (from cpu_mode_init()).
6d2010ae 300 * Ensure global page feature is disabled at this point.
b0d623f7 301 */
6d2010ae 302
b0d623f7
A
303 set_cr4(get_cr4() &~ CR4_PGE);
304
305 /*
306 * Initialize the per-cpu, TLB-related fields.
307 */
308 current_cpu_datap()->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
309 current_cpu_datap()->cpu_active_cr3 = kernel_pmap->pm_cr3;
310 current_cpu_datap()->cpu_tlb_invalid = FALSE;
6d2010ae
A
311 current_cpu_datap()->cpu_task_map = TASK_MAP_64BIT;
312 pmap_pcid_configure();
13f56ec4
A
313 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
314 boolean_t nsmep;
315 if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
316 set_cr4(get_cr4() | CR4_SMEP);
317 pmap_smep_enabled = TRUE;
318 }
319 }
b0d623f7
A
320}
321
322
323
324/*
325 * Bootstrap the system enough to run with virtual memory.
326 * Map the kernel's code and data, and allocate the system page table.
327 * Called with mapping OFF. Page_size must already be set.
328 */
329
330void
331pmap_bootstrap(
332 __unused vm_offset_t load_start,
333 __unused boolean_t IA32e)
334{
335#if NCOPY_WINDOWS > 0
336 vm_offset_t va;
337 int i;
338#endif
b0d623f7
A
339 assert(IA32e);
340
341 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address
342 * known to VM */
343 /*
344 * The kernel's pmap is statically allocated so we don't
345 * have to use pmap_create, which is unlikely to work
346 * correctly at this part of the boot sequence.
347 */
348
349 kernel_pmap = &kernel_pmap_store;
350 kernel_pmap->ref_count = 1;
316670eb 351 kernel_pmap->nx_enabled = TRUE;
b0d623f7
A
352 kernel_pmap->pm_task_map = TASK_MAP_64BIT;
353 kernel_pmap->pm_obj = (vm_object_t) NULL;
354 kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD);
355 kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT);
356 kernel_pmap->pm_pml4 = IdlePML4;
357 kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
6d2010ae 358 pmap_pcid_initialize_kernel(kernel_pmap);
b0d623f7 359
6d2010ae 360
b0d623f7
A
361
362 current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
363
364 nkpt = NKPT;
365 OSAddAtomic(NKPT, &inuse_ptepages_count);
6d2010ae
A
366 OSAddAtomic64(NKPT, &alloc_ptepages_count);
367 bootstrap_wired_pages = NKPT;
b0d623f7
A
368
369 virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
370 virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
371
372#if NCOPY_WINDOWS > 0
373 /*
374 * Reserve some special page table entries/VA space for temporary
375 * mapping of pages.
376 */
377#define SYSMAP(c, p, v, n) \
378 v = (c)va; va += ((n)*INTEL_PGBYTES);
379
380 va = virtual_avail;
381
382 for (i=0; i<PMAP_NWINDOWS; i++) {
383#if 1
384 kprintf("trying to do SYSMAP idx %d %p\n", i,
385 current_cpu_datap());
386 kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap);
387 kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow);
388 kprintf("two stuff %p %p\n",
389 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
390 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR));
391#endif
392 SYSMAP(caddr_t,
393 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
394 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
395 1);
396 current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP =
397 &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store);
398 *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
399 }
400
401 /* DMAP user for debugger */
402 SYSMAP(caddr_t, DMAP1, DADDR1, 1);
403 SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */
404
405 virtual_avail = va;
406#endif
407
408 if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) {
409 if (0 != ((npvhash + 1) & npvhash)) {
410 kprintf("invalid hash %d, must be ((2^N)-1), "
411 "using default %d\n", npvhash, NPVHASH);
412 npvhash = NPVHASH;
413 }
414 } else {
415 npvhash = NPVHASH;
416 }
417
b0d623f7
A
418 simple_lock_init(&kernel_pmap->lock, 0);
419 simple_lock_init(&pv_hashed_free_list_lock, 0);
420 simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
421 simple_lock_init(&pv_hash_table_lock,0);
422
423 pmap_cpu_init();
424
6d2010ae
A
425 if (pmap_pcid_ncpus)
426 printf("PMAP: PCID enabled\n");
427
13f56ec4
A
428 if (pmap_smep_enabled)
429 printf("PMAP: Supervisor Mode Execute Protection enabled\n");
7ddcb079 430
316670eb
A
431#if DEBUG
432 printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
433 printf("ml_early_random(): 0x%qx\n", ml_early_random());
434#endif
435 boolean_t ptmp;
436 /* Check if the user has requested disabling stack or heap no-execute
437 * enforcement. These are "const" variables; that qualifier is cast away
438 * when altering them. The TEXT/DATA const sections are marked
439 * write protected later in the kernel startup sequence, so altering
440 * them is possible at this point, in pmap_bootstrap().
441 */
442 if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
443 boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
444 *pdknxp = TRUE;
445 }
446
447 if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
448 boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
449 *pdknhp = TRUE;
450 }
451
6d2010ae
A
452 boot_args *args = (boot_args *)PE_state.bootArgs;
453 if (args->efiMode == kBootArgsEfiMode32) {
454 printf("EFI32: kernel virtual space limited to 4GB\n");
455 virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
456 }
b0d623f7
A
457 kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
458 (long)KERNEL_BASE, (long)virtual_end);
459 kprintf("Available physical space from 0x%llx to 0x%llx\n",
460 avail_start, avail_end);
461
462 /*
463 * The -no_shared_cr3 boot-arg is a debugging feature (set by default
464 * in the DEBUG kernel) to force the kernel to switch to its own map
465 * (and cr3) when control is in kernelspace. The kernel's map does not
466 * include (i.e. share) userspace so wild references will cause
467 * a panic. Only copyin and copyout are exempt from this.
468 */
469 (void) PE_parse_boot_argn("-no_shared_cr3",
470 &no_shared_cr3, sizeof (no_shared_cr3));
471 if (no_shared_cr3)
472 kprintf("Kernel not sharing user map\n");
473
474#ifdef PMAP_TRACES
475 if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
476 kprintf("Kernel traces for pmap operations enabled\n");
477 }
478#endif /* PMAP_TRACES */
479}
480
481void
482pmap_virtual_space(
483 vm_offset_t *startp,
484 vm_offset_t *endp)
485{
486 *startp = virtual_avail;
487 *endp = virtual_end;
488}
489
490/*
491 * Initialize the pmap module.
492 * Called by vm_init, to initialize any structures that the pmap
493 * system needs to map virtual memory.
494 */
495void
496pmap_init(void)
497{
498 long npages;
499 vm_offset_t addr;
060df5ea 500 vm_size_t s, vsize;
b0d623f7
A
501 vm_map_offset_t vaddr;
502 ppnum_t ppn;
503
504
505 kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
506 _vm_object_allocate((vm_object_size_t)NPML4PGS, &kpml4obj_object_store);
507
508 kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
509 _vm_object_allocate((vm_object_size_t)NPDPTPGS, &kpdptobj_object_store);
510
511 kernel_pmap->pm_obj = &kptobj_object_store;
512 _vm_object_allocate((vm_object_size_t)NPDEPGS, &kptobj_object_store);
513
514 /*
515 * Allocate memory for the pv_head_table and its lock bits,
516 * the modify bit array, and the pte_page table.
517 */
518
519 /*
520 * zero bias all these arrays now instead of off avail_start
521 * so we cover all memory
522 */
523
524 npages = i386_btop(avail_end);
525 s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
526 + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
527 + pv_lock_table_size(npages)
528 + pv_hash_lock_table_size((npvhash+1))
529 + npages);
530
531 s = round_page(s);
532 if (kernel_memory_allocate(kernel_map, &addr, s, 0,
533 KMA_KOBJECT | KMA_PERMANENT)
534 != KERN_SUCCESS)
535 panic("pmap_init");
536
537 memset((char *)addr, 0, s);
538
060df5ea
A
539 vaddr = addr;
540 vsize = s;
541
b0d623f7
A
542#if PV_DEBUG
543 if (0 == npvhash) panic("npvhash not initialized");
544#endif
545
546 /*
547 * Allocate the structures first to preserve word-alignment.
548 */
549 pv_head_table = (pv_rooted_entry_t) addr;
550 addr = (vm_offset_t) (pv_head_table + npages);
551
552 pv_hash_table = (pv_hashed_entry_t *)addr;
553 addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
554
555 pv_lock_table = (char *) addr;
556 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
557
558 pv_hash_lock_table = (char *) addr;
559 addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
560
561 pmap_phys_attributes = (char *) addr;
562
563 ppnum_t last_pn = i386_btop(avail_end);
564 unsigned int i;
565 pmap_memory_region_t *pmptr = pmap_memory_regions;
566 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
567 if (pmptr->type != kEfiConventionalMemory)
568 continue;
316670eb 569 ppnum_t pn;
b0d623f7
A
570 for (pn = pmptr->base; pn <= pmptr->end; pn++) {
571 if (pn < last_pn) {
572 pmap_phys_attributes[pn] |= PHYS_MANAGED;
060df5ea 573
b0d623f7
A
574 if (pn > last_managed_page)
575 last_managed_page = pn;
060df5ea 576
7ddcb079 577 if (pn >= lowest_hi && pn <= highest_hi)
060df5ea 578 pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
b0d623f7
A
579 }
580 }
581 }
060df5ea
A
582 while (vsize) {
583 ppn = pmap_find_phys(kernel_pmap, vaddr);
b0d623f7 584
060df5ea
A
585 pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
586
587 vaddr += PAGE_SIZE;
588 vsize -= PAGE_SIZE;
589 }
b0d623f7
A
590 /*
591 * Create the zone of physical maps,
592 * and of the physical-to-virtual entries.
593 */
594 s = (vm_size_t) sizeof(struct pmap);
595 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
060df5ea
A
596 zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
597
6d2010ae
A
598 pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors");
599 zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE);
600
6d2010ae 601 /* The anchor is required to be page aligned. Zone debugging adds
316670eb
A
602 * padding which may violate that requirement. Tell the zone
603 * subsystem that alignment is required.
6d2010ae 604 */
316670eb
A
605
606 zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
6d2010ae 607
b0d623f7 608 s = (vm_size_t) sizeof(struct pv_hashed_entry);
6d2010ae
A
609 pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
610 4096 * 3 /* LCM x86_64*/, "pv_list");
060df5ea 611 zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
b0d623f7
A
612
613 /* create pv entries for kernel pages mapped by low level
614 startup code. these have to exist so we can pmap_remove()
615 e.g. kext pages from the middle of our addr space */
616
617 vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS;
6d2010ae 618 for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) {
b0d623f7
A
619 pv_rooted_entry_t pv_e;
620
621 pv_e = pai_to_pvh(ppn);
622 pv_e->va = vaddr;
623 vaddr += PAGE_SIZE;
624 pv_e->pmap = kernel_pmap;
625 queue_init(&pv_e->qlink);
626 }
627 pmap_initialized = TRUE;
628
b0d623f7
A
629 max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
630
631 /*
632 * Ensure the kernel's PML4 entry exists for the basement
633 * before this is shared with any user.
634 */
316670eb
A
635 pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
636}
637
638static
639void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) {
640 uint64_t ev = sv + nxrosz, cv = sv;
641 pd_entry_t *pdep;
642 pt_entry_t *ptep = NULL;
643
644 assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
645
646 for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
647 uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
648
649 if (*pdep & INTEL_PTE_PS) {
650 if (NX)
651 *pdep |= INTEL_PTE_NX;
652 if (ro)
653 *pdep &= ~INTEL_PTE_WRITE;
654 cv += NBPD;
655 cv &= ~((uint64_t) PDEMASK);
656 pdep = pmap_pde(npmap, cv);
657 continue;
658 }
659
660 for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
661 if (NX)
662 *ptep |= INTEL_PTE_NX;
663 if (ro)
664 *ptep &= ~INTEL_PTE_WRITE;
665 cv += NBPT;
666 ptep = pmap_pte(npmap, cv);
667 }
668 }
669 DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
b0d623f7
A
670}
671
6d2010ae
A
672/*
673 * Called once VM is fully initialized so that we can release unused
674 * sections of low memory to the general pool.
675 * Also complete the set-up of identity-mapped sections of the kernel:
676 * 1) write-protect kernel text
677 * 2) map kernel text using large pages if possible
678 * 3) read and write-protect page zero (for K32)
679 * 4) map the global page at the appropriate virtual address.
680 *
681 * Use of large pages
682 * ------------------
683 * To effectively map and write-protect all kernel text pages, the text
684 * must be 2M-aligned at the base, and the data section above must also be
685 * 2M-aligned. That is, there's padding below and above. This is achieved
686 * through linker directives. Large pages are used only if this alignment
687 * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
688 * memory layout is:
689 *
690 * : :
691 * | __DATA |
692 * sdata: ================== 2Meg
693 * | |
694 * | zero-padding |
695 * | |
696 * etext: ------------------
697 * | |
698 * : :
699 * | |
700 * | __TEXT |
701 * | |
702 * : :
703 * | |
704 * stext: ================== 2Meg
705 * | |
706 * | zero-padding |
707 * | |
708 * eHIB: ------------------
709 * | __HIB |
710 * : :
711 *
712 * Prior to changing the mapping from 4K to 2M, the zero-padding pages
713 * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
714 * 4K pages covering [stext,etext] are coalesced as 2M large pages.
715 * The now unused level-1 PTE pages are also freed.
716 */
316670eb 717extern ppnum_t vm_kernel_base_page;
6d2010ae
A
718void
719pmap_lowmem_finalize(void)
720{
721 spl_t spl;
722 int i;
723
6d2010ae
A
724 /*
725 * Update wired memory statistics for early boot pages
726 */
316670eb 727 PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
6d2010ae
A
728
729 /*
316670eb 730 * Free pages in pmap regions below the base:
6d2010ae
A
731 * rdar://6332712
732 * We can't free all the pages to VM that EFI reports available.
733 * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
734 * There's also a size miscalculation here: pend is one page less
735 * than it should be but this is not fixed to be backwards
736 * compatible.
316670eb
A
737 * This is important for KASLR because up to 256*2MB = 512MB of space
738 * needs has to be released to VM.
6d2010ae
A
739 */
740 for (i = 0;
316670eb 741 pmap_memory_regions[i].end < vm_kernel_base_page;
6d2010ae 742 i++) {
316670eb
A
743 vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base);
744 vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
6d2010ae 745
316670eb
A
746 DBG("pmap region %d [%p..[%p\n",
747 i, (void *) pbase, (void *) pend);
748
749 if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED)
750 continue;
751 /*
752 * rdar://6332712
753 * Adjust limits not to free pages in range 0xc0000-0xff000.
754 */
755 if (pbase >= 0xc0000 && pend <= 0x100000)
756 continue;
757 if (pbase < 0xc0000 && pend > 0x100000) {
758 /* page range entirely within region, free lower part */
759 DBG("- ml_static_mfree(%p,%p)\n",
760 (void *) ml_static_ptovirt(pbase),
761 (void *) (0xc0000-pbase));
762 ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase);
763 pbase = 0x100000;
764 }
765 if (pbase < 0xc0000)
766 pend = MIN(pend, 0xc0000);
767 if (pend > 0x100000)
768 pbase = MAX(pbase, 0x100000);
769 DBG("- ml_static_mfree(%p,%p)\n",
6d2010ae 770 (void *) ml_static_ptovirt(pbase),
316670eb 771 (void *) (pend - pbase));
6d2010ae
A
772 ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
773 }
774
316670eb
A
775 /* A final pass to get rid of all initial identity mappings to
776 * low pages.
777 */
778 DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
779
780 /* Remove all mappings past the descriptor aliases and low globals */
781 pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
782
6d2010ae
A
783 /*
784 * If text and data are both 2MB-aligned,
785 * we can map text with large-pages,
786 * unless the -kernel_text_ps_4K boot-arg overrides.
787 */
788 if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
789 kprintf("Kernel text is 2MB aligned");
790 kernel_text_ps_4K = FALSE;
791 if (PE_parse_boot_argn("-kernel_text_ps_4K",
792 &kernel_text_ps_4K,
793 sizeof (kernel_text_ps_4K)))
794 kprintf(" but will be mapped with 4K pages\n");
795 else
796 kprintf(" and will be mapped with 2M pages\n");
797 }
798
799 (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
800 if (wpkernel)
801 kprintf("Kernel text %p-%p to be write-protected\n",
802 (void *) stext, (void *) etext);
803
804 spl = splhigh();
805
806 /*
807 * Scan over text if mappings are to be changed:
808 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
809 * - Change to large-pages if possible and not overriden.
810 */
811 if (kernel_text_ps_4K && wpkernel) {
812 vm_offset_t myva;
813 for (myva = stext; myva < etext; myva += PAGE_SIZE) {
814 pt_entry_t *ptep;
815
816 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
817 if (ptep)
316670eb 818 pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE);
6d2010ae
A
819 }
820 }
821
822 if (!kernel_text_ps_4K) {
823 vm_offset_t myva;
824
825 /*
826 * Release zero-filled page padding used for 2M-alignment.
827 */
828 DBG("ml_static_mfree(%p,%p) for padding below text\n",
829 (void *) eHIB, (void *) (stext - eHIB));
830 ml_static_mfree(eHIB, stext - eHIB);
831 DBG("ml_static_mfree(%p,%p) for padding above text\n",
832 (void *) etext, (void *) (sdata - etext));
833 ml_static_mfree(etext, sdata - etext);
834
835 /*
836 * Coalesce text pages into large pages.
837 */
838 for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
839 pt_entry_t *ptep;
840 vm_offset_t pte_phys;
841 pt_entry_t *pdep;
842 pt_entry_t pde;
843
844 pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
845 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
846 DBG("myva: %p pdep: %p ptep: %p\n",
847 (void *) myva, (void *) pdep, (void *) ptep);
848 if ((*ptep & INTEL_PTE_VALID) == 0)
849 continue;
850 pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
851 pde = *pdep & PTMASK; /* page attributes from pde */
852 pde |= INTEL_PTE_PS; /* make it a 2M entry */
853 pde |= pte_phys; /* take page frame from pte */
854
855 if (wpkernel)
316670eb 856 pde &= ~INTEL_PTE_WRITE;
6d2010ae
A
857 DBG("pmap_store_pte(%p,0x%llx)\n",
858 (void *)pdep, pde);
859 pmap_store_pte(pdep, pde);
860
861 /*
862 * Free the now-unused level-1 pte.
863 * Note: ptep is a virtual address to the pte in the
864 * recursive map. We can't use this address to free
865 * the page. Instead we need to compute its address
866 * in the Idle PTEs in "low memory".
867 */
868 vm_offset_t vm_ptep = (vm_offset_t) KPTphys
869 + (pte_phys >> PTPGSHIFT);
870 DBG("ml_static_mfree(%p,0x%x) for pte\n",
871 (void *) vm_ptep, PAGE_SIZE);
872 ml_static_mfree(vm_ptep, PAGE_SIZE);
873 }
874
875 /* Change variable read by sysctl machdep.pmap */
876 pmap_kernel_text_ps = I386_LPGBYTES;
877 }
878
316670eb
A
879 boolean_t doconstro = TRUE;
880
881 (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
882
883 if ((sconstdata | econstdata) & PAGE_MASK) {
884 kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata);
885 if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE))
886 doconstro = FALSE;
887 }
888
889 if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) {
890 kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata);
891 doconstro = FALSE;
892 }
893
894 if (doconstro)
895 kprintf("Marking const DATA read-only\n");
896
897 vm_offset_t dva;
898
899 for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
900 assert(((sdata | edata) & PAGE_MASK) == 0);
901 if ( (sdata | edata) & PAGE_MASK) {
902 kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata);
903 break;
904 }
905
906 pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
907
908 dpte = *dptep;
909
910 assert((dpte & INTEL_PTE_VALID));
911 if ((dpte & INTEL_PTE_VALID) == 0) {
912 kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata);
913 continue;
914 }
915
916 dpte |= INTEL_PTE_NX;
917 if (doconstro && (dva >= sconstdata) && (dva < econstdata)) {
918 dpte &= ~INTEL_PTE_WRITE;
919 }
920 pmap_store_pte(dptep, dpte);
921 }
922 kernel_segment_command_t * seg;
923 kernel_section_t * sec;
924
925 for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
926 if (!strcmp(seg->segname, "__TEXT") ||
927 !strcmp(seg->segname, "__DATA")) {
928 continue;
929 }
930 //XXX
931 if (!strcmp(seg->segname, "__KLD")) {
932 continue;
933 }
934 if (!strcmp(seg->segname, "__HIB")) {
935 for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
936 if (sec->addr & PAGE_MASK)
937 panic("__HIB segment's sections misaligned");
938 if (!strcmp(sec->sectname, "__text")) {
939 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
940 } else {
941 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
942 }
943 }
944 } else {
945 pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
946 }
947 }
948
949 /*
950 * If we're debugging, map the low global vector page at the fixed
951 * virtual address. Otherwise, remove the mapping for this.
952 */
953 if (debug_boot_arg) {
954 pt_entry_t *pte = NULL;
955 if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS)))
956 panic("lowmem pte");
957 /* make sure it is defined on page boundary */
958 assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
959 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
960 | INTEL_PTE_REF
961 | INTEL_PTE_MOD
962 | INTEL_PTE_WIRED
963 | INTEL_PTE_VALID
964 | INTEL_PTE_WRITE
965 | INTEL_PTE_NX);
966 } else {
967 pmap_remove(kernel_pmap,
968 LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
969 }
970
6d2010ae
A
971 splx(spl);
972 if (pmap_pcid_ncpus)
973 tlb_flush_global();
974 else
975 flush_tlb_raw();
976}
b0d623f7
A
977
978/*
979 * this function is only used for debugging fron the vm layer
980 */
981boolean_t
982pmap_verify_free(
983 ppnum_t pn)
984{
985 pv_rooted_entry_t pv_h;
986 int pai;
987 boolean_t result;
988
989 assert(pn != vm_page_fictitious_addr);
990
991 if (!pmap_initialized)
992 return(TRUE);
993
994 if (pn == vm_page_guard_addr)
995 return TRUE;
996
997 pai = ppn_to_pai(pn);
998 if (!IS_MANAGED_PAGE(pai))
999 return(FALSE);
1000 pv_h = pai_to_pvh(pn);
1001 result = (pv_h->pmap == PMAP_NULL);
1002 return(result);
1003}
1004
1005boolean_t
1006pmap_is_empty(
1007 pmap_t pmap,
1008 vm_map_offset_t va_start,
1009 vm_map_offset_t va_end)
1010{
1011 vm_map_offset_t offset;
1012 ppnum_t phys_page;
1013
1014 if (pmap == PMAP_NULL) {
1015 return TRUE;
1016 }
1017
1018 /*
1019 * Check the resident page count
1020 * - if it's zero, the pmap is completely empty.
1021 * This short-circuit test prevents a virtual address scan which is
1022 * painfully slow for 64-bit spaces.
1023 * This assumes the count is correct
1024 * .. the debug kernel ought to be checking perhaps by page table walk.
1025 */
1026 if (pmap->stats.resident_count == 0)
1027 return TRUE;
1028
1029 for (offset = va_start;
1030 offset < va_end;
1031 offset += PAGE_SIZE_64) {
1032 phys_page = pmap_find_phys(pmap, offset);
1033 if (phys_page) {
1034 kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1035 "page %d at 0x%llx\n",
1036 pmap, va_start, va_end, phys_page, offset);
1037 return FALSE;
1038 }
1039 }
1040
1041 return TRUE;
1042}
1043
1044
1045/*
1046 * Create and return a physical map.
1047 *
1048 * If the size specified for the map
1049 * is zero, the map is an actual physical
1050 * map, and may be referenced by the
1051 * hardware.
1052 *
1053 * If the size specified is non-zero,
1054 * the map will be used in software only, and
1055 * is bounded by that size.
1056 */
1057pmap_t
1058pmap_create(
316670eb 1059 ledger_t ledger,
b0d623f7
A
1060 vm_map_size_t sz,
1061 boolean_t is_64bit)
1062{
1063 pmap_t p;
1064 vm_size_t size;
1065 pml4_entry_t *pml4;
1066 pml4_entry_t *kpml4;
1067
1068 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
1069 (uint32_t) (sz>>32), (uint32_t) sz, is_64bit, 0, 0);
1070
1071 size = (vm_size_t) sz;
1072
1073 /*
1074 * A software use-only map doesn't even need a map.
1075 */
1076
1077 if (size != 0) {
1078 return(PMAP_NULL);
1079 }
1080
1081 p = (pmap_t) zalloc(pmap_zone);
1082 if (PMAP_NULL == p)
1083 panic("pmap_create zalloc");
6d2010ae
A
1084 /* Zero all fields */
1085 bzero(p, sizeof(*p));
b0d623f7
A
1086 /* init counts now since we'll be bumping some */
1087 simple_lock_init(&p->lock, 0);
1088 p->stats.resident_count = 0;
1089 p->stats.resident_max = 0;
1090 p->stats.wired_count = 0;
1091 p->ref_count = 1;
1092 p->nx_enabled = 1;
1093 p->pm_shared = FALSE;
316670eb
A
1094 ledger_reference(ledger);
1095 p->ledger = ledger;
b0d623f7
A
1096
1097 p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
6d2010ae
A
1098 if (pmap_pcid_ncpus)
1099 pmap_pcid_initialize(p);
316670eb 1100
6d2010ae 1101 p->pm_pml4 = zalloc(pmap_anchor_zone);
b0d623f7 1102
6d2010ae 1103 pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
b0d623f7 1104
6d2010ae 1105 memset((char *)p->pm_pml4, 0, PAGE_SIZE);
b0d623f7 1106
6d2010ae 1107 p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
b0d623f7
A
1108
1109 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1110
1111 p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS));
1112 if (NULL == p->pm_obj_pml4)
1113 panic("pmap_create pdpt obj");
1114
1115 p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS));
1116 if (NULL == p->pm_obj_pdpt)
1117 panic("pmap_create pdpt obj");
1118
1119 p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS));
1120 if (NULL == p->pm_obj)
1121 panic("pmap_create pte obj");
1122
6d2010ae 1123 /* All pmaps share the kernel's pml4 */
b0d623f7
A
1124 pml4 = pmap64_pml4(p, 0ULL);
1125 kpml4 = kernel_pmap->pm_pml4;
1126 pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX];
1127 pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX];
316670eb 1128 pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
b0d623f7
A
1129
1130 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
1131 p, is_64bit, 0, 0, 0);
1132
1133 return(p);
1134}
1135
1136/*
1137 * Retire the given physical map from service.
1138 * Should only be called if the map contains
1139 * no valid mappings.
1140 */
1141
1142void
6d2010ae 1143pmap_destroy(pmap_t p)
b0d623f7 1144{
6d2010ae 1145 int c;
b0d623f7
A
1146
1147 if (p == PMAP_NULL)
1148 return;
1149
1150 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
1151 p, 0, 0, 0, 0);
1152
1153 PMAP_LOCK(p);
1154
1155 c = --p->ref_count;
1156
6d2010ae
A
1157 pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
1158
b0d623f7
A
1159 if (c == 0) {
1160 /*
1161 * If some cpu is not using the physical pmap pointer that it
1162 * is supposed to be (see set_dirbase), we might be using the
1163 * pmap that is being destroyed! Make sure we are
1164 * physically on the right pmap:
1165 */
1166 PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
ebb1b9f4
A
1167 if (pmap_pcid_ncpus)
1168 pmap_destroy_pcid_sync(p);
b0d623f7 1169 }
ebb1b9f4 1170
b0d623f7
A
1171 PMAP_UNLOCK(p);
1172
1173 if (c != 0) {
1174 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1175 p, 1, 0, 0, 0);
6d2010ae 1176 pmap_assert(p == kernel_pmap);
b0d623f7
A
1177 return; /* still in use */
1178 }
1179
1180 /*
1181 * Free the memory maps, then the
1182 * pmap structure.
1183 */
1184 int inuse_ptepages = 0;
1185
6d2010ae 1186 zfree(pmap_anchor_zone, p->pm_pml4);
b0d623f7
A
1187
1188 inuse_ptepages += p->pm_obj_pml4->resident_page_count;
1189 vm_object_deallocate(p->pm_obj_pml4);
1190
1191 inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
1192 vm_object_deallocate(p->pm_obj_pdpt);
1193
1194 inuse_ptepages += p->pm_obj->resident_page_count;
1195 vm_object_deallocate(p->pm_obj);
1196
1197 OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count);
316670eb
A
1198 PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
1199 ledger_dereference(p->ledger);
b0d623f7
A
1200 zfree(pmap_zone, p);
1201
1202 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1203 0, 0, 0, 0, 0);
1204}
1205
1206/*
1207 * Add a reference to the specified pmap.
1208 */
1209
1210void
1211pmap_reference(pmap_t p)
1212{
1213 if (p != PMAP_NULL) {
1214 PMAP_LOCK(p);
1215 p->ref_count++;
1216 PMAP_UNLOCK(p);;
1217 }
1218}
1219
b0d623f7
A
1220/*
1221 * Remove phys addr if mapped in specified map
1222 *
1223 */
1224void
1225pmap_remove_some_phys(
1226 __unused pmap_t map,
1227 __unused ppnum_t pn)
1228{
1229
1230/* Implement to support working set code */
1231
1232}
1233
b0d623f7
A
1234/*
1235 * Set the physical protection on the
1236 * specified range of this map as requested.
1237 * Will not increase permissions.
1238 */
1239void
1240pmap_protect(
1241 pmap_t map,
1242 vm_map_offset_t sva,
1243 vm_map_offset_t eva,
1244 vm_prot_t prot)
1245{
1246 pt_entry_t *pde;
1247 pt_entry_t *spte, *epte;
1248 vm_map_offset_t lva;
1249 vm_map_offset_t orig_sva;
1250 boolean_t set_NX;
1251 int num_found = 0;
1252
1253 pmap_intr_assert();
1254
1255 if (map == PMAP_NULL)
1256 return;
1257
1258 if (prot == VM_PROT_NONE) {
1259 pmap_remove(map, sva, eva);
1260 return;
1261 }
1262 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
1263 map,
1264 (uint32_t) (sva >> 32), (uint32_t) sva,
1265 (uint32_t) (eva >> 32), (uint32_t) eva);
1266
1267 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled)
1268 set_NX = FALSE;
1269 else
1270 set_NX = TRUE;
1271
1272 PMAP_LOCK(map);
1273
1274 orig_sva = sva;
1275 while (sva < eva) {
1276 lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1);
1277 if (lva > eva)
1278 lva = eva;
1279 pde = pmap_pde(map, sva);
1280 if (pde && (*pde & INTEL_PTE_VALID)) {
1281 if (*pde & INTEL_PTE_PS) {
1282 /* superpage */
1283 spte = pde;
1284 epte = spte+1; /* excluded */
1285 } else {
1286 spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1)));
1287 spte = &spte[ptenum(sva)];
1288 epte = &spte[intel_btop(lva - sva)];
1289 }
1290
1291 for (; spte < epte; spte++) {
1292 if (!(*spte & INTEL_PTE_VALID))
1293 continue;
1294
1295 if (prot & VM_PROT_WRITE)
316670eb 1296 pmap_update_pte(spte, 0, INTEL_PTE_WRITE);
b0d623f7 1297 else
316670eb 1298 pmap_update_pte(spte, INTEL_PTE_WRITE, 0);
b0d623f7
A
1299
1300 if (set_NX)
316670eb 1301 pmap_update_pte(spte, 0, INTEL_PTE_NX);
b0d623f7 1302 else
316670eb 1303 pmap_update_pte(spte, INTEL_PTE_NX, 0);
b0d623f7
A
1304 num_found++;
1305 }
1306 }
1307 sva = lva;
1308 }
1309 if (num_found)
1310 PMAP_UPDATE_TLBS(map, orig_sva, eva);
1311
1312 PMAP_UNLOCK(map);
1313
1314 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
1315 0, 0, 0, 0, 0);
1316
1317}
1318
1319/* Map a (possibly) autogenned block */
1320void
1321pmap_map_block(
1322 pmap_t pmap,
1323 addr64_t va,
1324 ppnum_t pa,
1325 uint32_t size,
1326 vm_prot_t prot,
1327 int attr,
1328 __unused unsigned int flags)
1329{
1330 uint32_t page;
1331 int cur_page_size;
1332
1333 if (attr & VM_MEM_SUPERPAGE)
1334 cur_page_size = SUPERPAGE_SIZE;
1335 else
1336 cur_page_size = PAGE_SIZE;
1337
1338 for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
316670eb 1339 pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
b0d623f7
A
1340 va += cur_page_size;
1341 pa+=cur_page_size/PAGE_SIZE;
1342 }
1343}
1344
316670eb 1345kern_return_t
b0d623f7
A
1346pmap_expand_pml4(
1347 pmap_t map,
316670eb
A
1348 vm_map_offset_t vaddr,
1349 unsigned int options)
b0d623f7
A
1350{
1351 vm_page_t m;
1352 pmap_paddr_t pa;
1353 uint64_t i;
1354 ppnum_t pn;
1355 pml4_entry_t *pml4p;
1356
1357 DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
1358
1359 /*
1360 * Allocate a VM page for the pml4 page
1361 */
316670eb
A
1362 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1363 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1364 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1365 VM_PAGE_WAIT();
316670eb 1366 }
b0d623f7
A
1367 /*
1368 * put the page into the pmap's obj list so it
1369 * can be found later.
1370 */
1371 pn = m->phys_page;
1372 pa = i386_ptob(pn);
1373 i = pml4idx(map, vaddr);
1374
1375 /*
1376 * Zero the page.
1377 */
1378 pmap_zero_page(pn);
1379
1380 vm_page_lockspin_queues();
1381 vm_page_wire(m);
1382 vm_page_unlock_queues();
1383
1384 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1385 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1386 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1387
1388 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1389 vm_object_lock(map->pm_obj_pml4);
1390
1391 PMAP_LOCK(map);
1392 /*
1393 * See if someone else expanded us first
1394 */
1395 if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
1396 PMAP_UNLOCK(map);
1397 vm_object_unlock(map->pm_obj_pml4);
1398
1399 VM_PAGE_FREE(m);
1400
1401 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1402 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1403 return KERN_SUCCESS;
b0d623f7
A
1404 }
1405
1406#if 0 /* DEBUG */
1407 if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i)) {
1408 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1409 map, map->pm_obj_pml4, vaddr, i);
1410 }
1411#endif
1412 vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i);
1413 vm_object_unlock(map->pm_obj_pml4);
1414
1415 /*
1416 * Set the page directory entry for this page table.
1417 */
1418 pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
1419
1420 pmap_store_pte(pml4p, pa_to_pte(pa)
1421 | INTEL_PTE_VALID
1422 | INTEL_PTE_USER
1423 | INTEL_PTE_WRITE);
1424
1425 PMAP_UNLOCK(map);
1426
316670eb 1427 return KERN_SUCCESS;
b0d623f7
A
1428}
1429
316670eb
A
1430kern_return_t
1431pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
b0d623f7
A
1432{
1433 vm_page_t m;
1434 pmap_paddr_t pa;
1435 uint64_t i;
1436 ppnum_t pn;
1437 pdpt_entry_t *pdptp;
1438
1439 DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
1440
1441 while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
316670eb
A
1442 kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
1443 if (pep4kr != KERN_SUCCESS)
1444 return pep4kr;
b0d623f7
A
1445 }
1446
1447 /*
1448 * Allocate a VM page for the pdpt page
1449 */
316670eb
A
1450 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1451 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1452 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1453 VM_PAGE_WAIT();
316670eb 1454 }
b0d623f7
A
1455
1456 /*
1457 * put the page into the pmap's obj list so it
1458 * can be found later.
1459 */
1460 pn = m->phys_page;
1461 pa = i386_ptob(pn);
1462 i = pdptidx(map, vaddr);
1463
1464 /*
1465 * Zero the page.
1466 */
1467 pmap_zero_page(pn);
1468
1469 vm_page_lockspin_queues();
1470 vm_page_wire(m);
1471 vm_page_unlock_queues();
1472
1473 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1474 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1475 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1476
1477 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1478 vm_object_lock(map->pm_obj_pdpt);
1479
1480 PMAP_LOCK(map);
1481 /*
1482 * See if someone else expanded us first
1483 */
1484 if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
1485 PMAP_UNLOCK(map);
1486 vm_object_unlock(map->pm_obj_pdpt);
1487
1488 VM_PAGE_FREE(m);
1489
1490 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1491 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1492 return KERN_SUCCESS;
b0d623f7
A
1493 }
1494
1495#if 0 /* DEBUG */
1496 if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i)) {
1497 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1498 map, map->pm_obj_pdpt, vaddr, i);
1499 }
1500#endif
1501 vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i);
1502 vm_object_unlock(map->pm_obj_pdpt);
1503
1504 /*
1505 * Set the page directory entry for this page table.
1506 */
1507 pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
1508
1509 pmap_store_pte(pdptp, pa_to_pte(pa)
1510 | INTEL_PTE_VALID
1511 | INTEL_PTE_USER
1512 | INTEL_PTE_WRITE);
1513
1514 PMAP_UNLOCK(map);
1515
316670eb 1516 return KERN_SUCCESS;
b0d623f7
A
1517
1518}
1519
1520
1521
1522/*
1523 * Routine: pmap_expand
1524 *
1525 * Expands a pmap to be able to map the specified virtual address.
1526 *
1527 * Allocates new virtual memory for the P0 or P1 portion of the
1528 * pmap, then re-maps the physical pages that were in the old
1529 * pmap to be in the new pmap.
1530 *
1531 * Must be called with the pmap system and the pmap unlocked,
1532 * since these must be unlocked to use vm_allocate or vm_deallocate.
1533 * Thus it must be called in a loop that checks whether the map
1534 * has been expanded enough.
1535 * (We won't loop forever, since page tables aren't shrunk.)
1536 */
316670eb 1537kern_return_t
b0d623f7
A
1538pmap_expand(
1539 pmap_t map,
316670eb
A
1540 vm_map_offset_t vaddr,
1541 unsigned int options)
b0d623f7
A
1542{
1543 pt_entry_t *pdp;
1544 register vm_page_t m;
1545 register pmap_paddr_t pa;
1546 uint64_t i;
1547 ppnum_t pn;
1548
1549
1550 /*
1551 * For the kernel, the virtual address must be in or above the basement
1552 * which is for kexts and is in the 512GB immediately below the kernel..
1553 * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
1554 */
1555 if (map == kernel_pmap &&
1556 !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))
1557 panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
1558
1559
1560 while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
316670eb
A
1561 kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
1562 if (pepkr != KERN_SUCCESS)
1563 return pepkr;
b0d623f7
A
1564 }
1565
1566 /*
1567 * Allocate a VM page for the pde entries.
1568 */
316670eb
A
1569 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1570 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1571 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1572 VM_PAGE_WAIT();
316670eb 1573 }
b0d623f7
A
1574
1575 /*
1576 * put the page into the pmap's obj list so it
1577 * can be found later.
1578 */
1579 pn = m->phys_page;
1580 pa = i386_ptob(pn);
1581 i = pdeidx(map, vaddr);
1582
1583 /*
1584 * Zero the page.
1585 */
1586 pmap_zero_page(pn);
1587
1588 vm_page_lockspin_queues();
1589 vm_page_wire(m);
1590 vm_page_unlock_queues();
1591
1592 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1593 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1594 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1595
1596 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1597 vm_object_lock(map->pm_obj);
1598
1599 PMAP_LOCK(map);
1600
1601 /*
1602 * See if someone else expanded us first
1603 */
1604 if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
1605 PMAP_UNLOCK(map);
1606 vm_object_unlock(map->pm_obj);
1607
1608 VM_PAGE_FREE(m);
1609
1610 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1611 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1612 return KERN_SUCCESS;
b0d623f7
A
1613 }
1614
1615#if 0 /* DEBUG */
1616 if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i)) {
1617 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
1618 map, map->pm_obj, vaddr, i);
1619 }
1620#endif
1621 vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i);
1622 vm_object_unlock(map->pm_obj);
1623
1624 /*
1625 * Set the page directory entry for this page table.
1626 */
1627 pdp = pmap_pde(map, vaddr);
1628 pmap_store_pte(pdp, pa_to_pte(pa)
1629 | INTEL_PTE_VALID
1630 | INTEL_PTE_USER
1631 | INTEL_PTE_WRITE);
1632
1633 PMAP_UNLOCK(map);
1634
316670eb 1635 return KERN_SUCCESS;
b0d623f7
A
1636}
1637
1638/* On K64 machines with more than 32GB of memory, pmap_steal_memory
1639 * will allocate past the 1GB of pre-expanded virtual kernel area. This
1640 * function allocates all the page tables using memory from the same pool
1641 * that pmap_steal_memory uses, rather than calling vm_page_grab (which
1642 * isn't available yet). */
1643void
6d2010ae
A
1644pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
1645{
b0d623f7
A
1646 ppnum_t pn;
1647 pt_entry_t *pte;
1648
1649 PMAP_LOCK(pmap);
1650
1651 if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
0b4c1975 1652 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1653 panic("pmap_pre_expand");
1654
1655 pmap_zero_page(pn);
1656
1657 pte = pmap64_pml4(pmap, vaddr);
1658
1659 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1660 | INTEL_PTE_VALID
1661 | INTEL_PTE_USER
1662 | INTEL_PTE_WRITE);
1663 }
1664
1665 if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
0b4c1975 1666 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1667 panic("pmap_pre_expand");
1668
1669 pmap_zero_page(pn);
1670
1671 pte = pmap64_pdpt(pmap, vaddr);
1672
1673 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1674 | INTEL_PTE_VALID
1675 | INTEL_PTE_USER
1676 | INTEL_PTE_WRITE);
1677 }
1678
1679 if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
0b4c1975 1680 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1681 panic("pmap_pre_expand");
1682
1683 pmap_zero_page(pn);
1684
1685 pte = pmap64_pde(pmap, vaddr);
1686
1687 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1688 | INTEL_PTE_VALID
1689 | INTEL_PTE_USER
1690 | INTEL_PTE_WRITE);
1691 }
1692
1693 PMAP_UNLOCK(pmap);
1694}
1695
1696/*
1697 * pmap_sync_page_data_phys(ppnum_t pa)
1698 *
1699 * Invalidates all of the instruction cache on a physical page and
1700 * pushes any dirty data from the data cache for the same physical page
1701 * Not required in i386.
1702 */
1703void
1704pmap_sync_page_data_phys(__unused ppnum_t pa)
1705{
1706 return;
1707}
1708
1709/*
1710 * pmap_sync_page_attributes_phys(ppnum_t pa)
1711 *
1712 * Write back and invalidate all cachelines on a physical page.
1713 */
1714void
1715pmap_sync_page_attributes_phys(ppnum_t pa)
1716{
1717 cache_flush_page_phys(pa);
1718}
1719
1720
1721
1722#ifdef CURRENTLY_UNUSED_AND_UNTESTED
1723
1724int collect_ref;
1725int collect_unref;
1726
1727/*
1728 * Routine: pmap_collect
1729 * Function:
1730 * Garbage collects the physical map system for
1731 * pages which are no longer used.
1732 * Success need not be guaranteed -- that is, there
1733 * may well be pages which are not referenced, but
1734 * others may be collected.
1735 * Usage:
1736 * Called by the pageout daemon when pages are scarce.
1737 */
1738void
1739pmap_collect(
1740 pmap_t p)
1741{
1742 register pt_entry_t *pdp, *ptp;
1743 pt_entry_t *eptp;
1744 int wired;
1745
1746 if (p == PMAP_NULL)
1747 return;
1748
1749 if (p == kernel_pmap)
1750 return;
1751
1752 /*
1753 * Garbage collect map.
1754 */
1755 PMAP_LOCK(p);
1756
1757 for (pdp = (pt_entry_t *)p->dirbase;
1758 pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
1759 pdp++)
1760 {
1761 if (*pdp & INTEL_PTE_VALID) {
1762 if(*pdp & INTEL_PTE_REF) {
1763 pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
1764 collect_ref++;
1765 } else {
1766 collect_unref++;
1767 ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
1768 eptp = ptp + NPTEPG;
1769
1770 /*
1771 * If the pte page has any wired mappings, we cannot
1772 * free it.
1773 */
1774 wired = 0;
1775 {
1776 register pt_entry_t *ptep;
1777 for (ptep = ptp; ptep < eptp; ptep++) {
1778 if (iswired(*ptep)) {
1779 wired = 1;
1780 break;
1781 }
1782 }
1783 }
1784 if (!wired) {
1785 /*
1786 * Remove the virtual addresses mapped by this pte page.
1787 */
1788 pmap_remove_range(p,
1789 pdetova(pdp - (pt_entry_t *)p->dirbase),
1790 ptp,
1791 eptp);
1792
1793 /*
1794 * Invalidate the page directory pointer.
1795 */
1796 pmap_store_pte(pdp, 0x0);
1797
1798 PMAP_UNLOCK(p);
1799
1800 /*
1801 * And free the pte page itself.
1802 */
1803 {
1804 register vm_page_t m;
1805
1806 vm_object_lock(p->pm_obj);
1807
1808 m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]));
1809 if (m == VM_PAGE_NULL)
1810 panic("pmap_collect: pte page not in object");
1811
6d2010ae
A
1812 vm_object_unlock(p->pm_obj);
1813
b0d623f7
A
1814 VM_PAGE_FREE(m);
1815
1816 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb 1817 PMAP_ZINFO_PFREE(p, PAGE_SIZE);
b0d623f7
A
1818 }
1819
1820 PMAP_LOCK(p);
1821 }
1822 }
1823 }
1824 }
1825
1826 PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
1827 PMAP_UNLOCK(p);
1828 return;
1829
1830}
1831#endif
1832
1833
1834void
1835pmap_copy_page(ppnum_t src, ppnum_t dst)
1836{
1837 bcopy_phys((addr64_t)i386_ptob(src),
1838 (addr64_t)i386_ptob(dst),
1839 PAGE_SIZE);
1840}
1841
1842
1843/*
1844 * Routine: pmap_pageable
1845 * Function:
1846 * Make the specified pages (by pmap, offset)
1847 * pageable (or not) as requested.
1848 *
1849 * A page which is not pageable may not take
1850 * a fault; therefore, its page table entry
1851 * must remain valid for the duration.
1852 *
1853 * This routine is merely advisory; pmap_enter
1854 * will specify that these pages are to be wired
1855 * down (or not) as appropriate.
1856 */
1857void
1858pmap_pageable(
1859 __unused pmap_t pmap,
1860 __unused vm_map_offset_t start_addr,
1861 __unused vm_map_offset_t end_addr,
1862 __unused boolean_t pageable)
1863{
1864#ifdef lint
1865 pmap++; start_addr++; end_addr++; pageable++;
1866#endif /* lint */
1867}
1868
b0d623f7
A
1869void
1870invalidate_icache(__unused vm_offset_t addr,
1871 __unused unsigned cnt,
1872 __unused int phys)
1873{
1874 return;
1875}
1876
1877void
1878flush_dcache(__unused vm_offset_t addr,
1879 __unused unsigned count,
1880 __unused int phys)
1881{
1882 return;
1883}
1884
1885#if CONFIG_DTRACE
1886/*
1887 * Constrain DTrace copyin/copyout actions
1888 */
1889extern kern_return_t dtrace_copyio_preflight(addr64_t);
1890extern kern_return_t dtrace_copyio_postflight(addr64_t);
1891
1892kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
1893{
1894 thread_t thread = current_thread();
6d2010ae 1895 uint64_t ccr3;
b0d623f7
A
1896
1897 if (current_map() == kernel_map)
1898 return KERN_FAILURE;
6d2010ae
A
1899 else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE))
1900 return KERN_FAILURE;
1901 else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3))
b0d623f7
A
1902 return KERN_FAILURE;
1903 else if (thread->machine.specFlags & CopyIOActive)
1904 return KERN_FAILURE;
1905 else
1906 return KERN_SUCCESS;
1907}
1908
1909kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
1910{
1911 return KERN_SUCCESS;
1912}
1913#endif /* CONFIG_DTRACE */
1914
1915#include <mach_vm_debug.h>
1916#if MACH_VM_DEBUG
1917#include <vm/vm_debug.h>
1918
1919int
1920pmap_list_resident_pages(
1921 __unused pmap_t pmap,
1922 __unused vm_offset_t *listp,
1923 __unused int space)
1924{
1925 return 0;
1926}
1927#endif /* MACH_VM_DEBUG */
1928
1929
1930
1931/* temporary workaround */
1932boolean_t
1933coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
1934{
1935#if 0
1936 pt_entry_t *ptep;
1937
1938 ptep = pmap_pte(map->pmap, va);
1939 if (0 == ptep)
1940 return FALSE;
1941 return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
1942#else
1943 return TRUE;
1944#endif
1945}
1946
1947
1948boolean_t
1949phys_page_exists(ppnum_t pn)
1950{
1951 assert(pn != vm_page_fictitious_addr);
1952
1953 if (!pmap_initialized)
1954 return TRUE;
1955
1956 if (pn == vm_page_guard_addr)
1957 return FALSE;
1958
1959 if (!IS_MANAGED_PAGE(ppn_to_pai(pn)))
1960 return FALSE;
1961
1962 return TRUE;
1963}
1964
6d2010ae
A
1965
1966
b0d623f7
A
1967void
1968pmap_switch(pmap_t tpmap)
1969{
1970 spl_t s;
1971
1972 s = splhigh(); /* Make sure interruptions are disabled */
1973 set_dirbase(tpmap, current_thread());
1974 splx(s);
1975}
1976
1977
1978/*
1979 * disable no-execute capability on
1980 * the specified pmap
1981 */
1982void
1983pmap_disable_NX(pmap_t pmap)
1984{
1985 pmap->nx_enabled = 0;
1986}
1987
6d2010ae
A
1988void
1989pt_fake_zone_init(int zone_index)
1990{
1991 pt_fake_zone_index = zone_index;
1992}
1993
b0d623f7
A
1994void
1995pt_fake_zone_info(
1996 int *count,
1997 vm_size_t *cur_size,
1998 vm_size_t *max_size,
1999 vm_size_t *elem_size,
2000 vm_size_t *alloc_size,
6d2010ae 2001 uint64_t *sum_size,
b0d623f7 2002 int *collectable,
6d2010ae
A
2003 int *exhaustable,
2004 int *caller_acct)
b0d623f7
A
2005{
2006 *count = inuse_ptepages_count;
2007 *cur_size = PAGE_SIZE * inuse_ptepages_count;
2008 *max_size = PAGE_SIZE * (inuse_ptepages_count +
2009 vm_page_inactive_count +
2010 vm_page_active_count +
2011 vm_page_free_count);
2012 *elem_size = PAGE_SIZE;
2013 *alloc_size = PAGE_SIZE;
6d2010ae 2014 *sum_size = alloc_ptepages_count * PAGE_SIZE;
b0d623f7
A
2015
2016 *collectable = 1;
2017 *exhaustable = 0;
6d2010ae 2018 *caller_acct = 1;
b0d623f7
A
2019}
2020
2021static inline void
2022pmap_cpuset_NMIPI(cpu_set cpu_mask) {
2023 unsigned int cpu, cpu_bit;
2024 uint64_t deadline;
2025
2026 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2027 if (cpu_mask & cpu_bit)
2028 cpu_NMI_interrupt(cpu);
2029 }
2030 deadline = mach_absolute_time() + (LockTimeOut);
2031 while (mach_absolute_time() < deadline)
2032 cpu_pause();
2033}
2034
2035/*
2036 * Called with pmap locked, we:
2037 * - scan through per-cpu data to see which other cpus need to flush
2038 * - send an IPI to each non-idle cpu to be flushed
2039 * - wait for all to signal back that they are inactive or we see that
2040 * they are at a safe point (idle).
2041 * - flush the local tlb if active for this pmap
2042 * - return ... the caller will unlock the pmap
2043 */
6d2010ae 2044
b0d623f7 2045void
6d2010ae 2046pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv)
b0d623f7
A
2047{
2048 unsigned int cpu;
2049 unsigned int cpu_bit;
2050 cpu_set cpus_to_signal;
2051 unsigned int my_cpu = cpu_number();
2052 pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
2053 boolean_t flush_self = FALSE;
2054 uint64_t deadline;
6d2010ae 2055 boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
b0d623f7
A
2056
2057 assert((processor_avail_count < 2) ||
2058 (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2059
2060 /*
2061 * Scan other cpus for matching active or task CR3.
2062 * For idle cpus (with no active map) we mark them invalid but
2063 * don't signal -- they'll check as they go busy.
2064 */
2065 cpus_to_signal = 0;
6d2010ae
A
2066
2067 if (pmap_pcid_ncpus) {
2068 pmap_pcid_invalidate_all_cpus(pmap);
2069 __asm__ volatile("mfence":::"memory");
2070 }
2071
b0d623f7
A
2072 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2073 if (!cpu_datap(cpu)->cpu_running)
2074 continue;
2075 uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
2076 uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
2077
2078 if ((pmap_cr3 == cpu_task_cr3) ||
2079 (pmap_cr3 == cpu_active_cr3) ||
6d2010ae 2080 (pmap_is_shared)) {
b0d623f7
A
2081 if (cpu == my_cpu) {
2082 flush_self = TRUE;
2083 continue;
2084 }
6d2010ae
A
2085 if (pmap_pcid_ncpus && pmap_is_shared)
2086 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2087 else
2088 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
2089 __asm__ volatile("mfence":::"memory");
b0d623f7
A
2090
2091 /*
2092 * We don't need to signal processors which will flush
2093 * lazily at the idle state or kernel boundary.
2094 * For example, if we're invalidating the kernel pmap,
2095 * processors currently in userspace don't need to flush
2096 * their TLBs until the next time they enter the kernel.
2097 * Alterations to the address space of a task active
2098 * on a remote processor result in a signal, to
2099 * account for copy operations. (There may be room
2100 * for optimization in such cases).
2101 * The order of the loads below with respect
2102 * to the store to the "cpu_tlb_invalid" field above
2103 * is important--hence the barrier.
2104 */
2105 if (CPU_CR3_IS_ACTIVE(cpu) &&
2106 (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
2107 pmap->pm_shared ||
2108 (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
2109 cpus_to_signal |= cpu_bit;
2110 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2111 }
2112 }
2113 }
2114
6d2010ae
A
2115 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
2116 pmap, cpus_to_signal, flush_self, startv, endv);
b0d623f7
A
2117
2118 /*
2119 * Flush local tlb if required.
2120 * Do this now to overlap with other processors responding.
2121 */
6d2010ae
A
2122 if (flush_self) {
2123 if (pmap_pcid_ncpus) {
2124 pmap_pcid_validate_cpu(pmap, my_cpu);
2125 if (pmap_is_shared)
2126 tlb_flush_global();
2127 else
2128 flush_tlb_raw();
2129 }
2130 else
2131 flush_tlb_raw();
2132 }
b0d623f7
A
2133
2134 if (cpus_to_signal) {
2135 cpu_set cpus_to_respond = cpus_to_signal;
2136
2137 deadline = mach_absolute_time() + LockTimeOut;
2138 /*
2139 * Wait for those other cpus to acknowledge
2140 */
2141 while (cpus_to_respond != 0) {
060df5ea 2142 long orig_acks = 0;
b0d623f7
A
2143
2144 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
6d2010ae
A
2145 /* Consider checking local/global invalidity
2146 * as appropriate in the PCID case.
2147 */
b0d623f7
A
2148 if ((cpus_to_respond & cpu_bit) != 0) {
2149 if (!cpu_datap(cpu)->cpu_running ||
2150 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2151 !CPU_CR3_IS_ACTIVE(cpu)) {
2152 cpus_to_respond &= ~cpu_bit;
2153 }
2154 cpu_pause();
2155 }
2156 if (cpus_to_respond == 0)
2157 break;
2158 }
6d2010ae 2159 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
060df5ea
A
2160 if (machine_timeout_suspended())
2161 continue;
2162 pmap_tlb_flush_timeout = TRUE;
2163 orig_acks = NMIPI_acks;
2164 pmap_cpuset_NMIPI(cpus_to_respond);
2165
2166 panic("TLB invalidation IPI timeout: "
2167 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
2168 cpus_to_respond, orig_acks, NMIPI_acks);
2169 }
b0d623f7
A
2170 }
2171 }
2172
316670eb
A
2173 if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
2174 panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
2175 }
2176
6d2010ae
A
2177 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
2178 pmap, cpus_to_signal, startv, endv, 0);
b0d623f7
A
2179}
2180
2181void
2182process_pmap_updates(void)
2183{
6d2010ae
A
2184 int ccpu = cpu_number();
2185 pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2186 if (pmap_pcid_ncpus) {
2187 pmap_pcid_validate_current();
2188 if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
2189 cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
2190 tlb_flush_global();
2191 }
2192 else {
2193 cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE;
2194 flush_tlb_raw();
2195 }
2196 }
2197 else {
2198 current_cpu_datap()->cpu_tlb_invalid = FALSE;
2199 flush_tlb_raw();
2200 }
b0d623f7 2201
b0d623f7
A
2202 __asm__ volatile("mfence");
2203}
2204
2205void
2206pmap_update_interrupt(void)
2207{
2208 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
2209 0, 0, 0, 0, 0);
2210
2211 process_pmap_updates();
2212
2213 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
2214 0, 0, 0, 0, 0);
2215}
316670eb
A
2216
2217#include <mach/mach_vm.h> /* mach_vm_region_recurse() */
2218/* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
2219 * and identify ranges with mismatched VM permissions and PTE permissions
2220 */
2221kern_return_t
2222pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) {
2223 vm_offset_t cv = sv;
2224 kern_return_t rv = KERN_SUCCESS;
2225 uint64_t skip4 = 0, skip2 = 0;
2226
2227 sv &= ~PAGE_MASK_64;
2228 ev &= ~PAGE_MASK_64;
2229 while (cv < ev) {
2230 if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
2231 (cv < 0xFFFF800000000000ULL))) {
2232 cv = 0xFFFF800000000000ULL;
2233 }
2234 /* Potential inconsistencies from not holding pmap lock
2235 * but harmless for the moment.
2236 */
2237 if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
2238 if ((cv + NBPML4) > cv)
2239 cv += NBPML4;
2240 else
2241 break;
2242 skip4++;
2243 continue;
2244 }
2245 if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
2246 if ((cv + NBPD) > cv)
2247 cv += NBPD;
2248 else
2249 break;
2250 skip2++;
2251 continue;
2252 }
2253
2254 pt_entry_t *ptep = pmap_pte(ipmap, cv);
2255 if (ptep && (*ptep & INTEL_PTE_VALID)) {
2256 if (*ptep & INTEL_PTE_WRITE) {
2257 if (!(*ptep & INTEL_PTE_NX)) {
2258 kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
2259 rv = KERN_FAILURE;
2260 }
2261 }
2262 }
2263 cv += PAGE_SIZE;
2264 }
2265 kprintf("Completed pmap scan\n");
2266 cv = sv;
2267
2268 struct vm_region_submap_info_64 vbr;
2269 mach_msg_type_number_t vbrcount = 0;
2270 mach_vm_size_t vmsize;
2271 vm_prot_t prot;
2272 uint32_t nesting_depth = 0;
2273 kern_return_t kret;
2274
2275 while (cv < ev) {
2276
2277 for (;;) {
2278 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
2279 if((kret = mach_vm_region_recurse(ivmmap,
2280 (mach_vm_address_t *) &cv, &vmsize, &nesting_depth,
2281 (vm_region_recurse_info_t)&vbr,
2282 &vbrcount)) != KERN_SUCCESS) {
2283 break;
2284 }
2285
2286 if(vbr.is_submap) {
2287 nesting_depth++;
2288 continue;
2289 } else {
2290 break;
2291 }
2292 }
2293
2294 if(kret != KERN_SUCCESS)
2295 break;
2296
2297 prot = vbr.protection;
2298
2299 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
2300 kprintf("W+X map entry at address 0x%lx\n", cv);
2301 rv = KERN_FAILURE;
2302 }
2303
2304 if (prot) {
2305 vm_offset_t pcv;
2306 for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
2307 pt_entry_t *ptep = pmap_pte(ipmap, pcv);
2308 vm_prot_t tprot;
2309
2310 if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID))
2311 continue;
2312 tprot = VM_PROT_READ;
2313 if (*ptep & INTEL_PTE_WRITE)
2314 tprot |= VM_PROT_WRITE;
2315 if ((*ptep & INTEL_PTE_NX) == 0)
2316 tprot |= VM_PROT_EXECUTE;
2317 if (tprot != prot) {
2318 kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
2319 rv = KERN_FAILURE;
2320 }
2321 }
2322 }
2323 cv += vmsize;
2324 }
2325 return rv;
2326}