]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/pmap.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / x86_64 / pmap.c
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * File: pmap.c
61 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * (These guys wrote the Vax version)
63 *
64 * Physical Map management code for Intel i386, i486, and i860.
65 *
66 * Manages physical address maps.
67 *
68 * In addition to hardware address maps, this
69 * module is called upon to provide software-use-only
70 * maps which may or may not be stored in the same
71 * form as hardware maps. These pseudo-maps are
72 * used to store intermediate results from copy
73 * operations to and from address spaces.
74 *
75 * Since the information managed by this module is
76 * also stored by the logical address mapping module,
77 * this module may throw away valid virtual-to-physical
78 * mappings at almost any time. However, invalidations
79 * of virtual-to-physical mappings must be done as
80 * requested.
81 *
82 * In order to cope with hardware architectures which
83 * make virtual-to-physical map invalidates expensive,
84 * this module may delay invalidate or reduced protection
85 * operations until such time as they are actually
86 * necessary. This module is given full information as
87 * to which processors are currently using which maps,
88 * and to when physical maps must be made correct.
89 */
90
91#include <string.h>
b0d623f7
A
92#include <mach_ldebug.h>
93
94#include <libkern/OSAtomic.h>
95
96#include <mach/machine/vm_types.h>
97
98#include <mach/boolean.h>
99#include <kern/thread.h>
100#include <kern/zalloc.h>
101#include <kern/queue.h>
316670eb 102#include <kern/ledger.h>
6d2010ae 103#include <kern/mach_param.h>
b0d623f7
A
104
105#include <kern/lock.h>
106#include <kern/kalloc.h>
107#include <kern/spl.h>
108
109#include <vm/pmap.h>
110#include <vm/vm_map.h>
111#include <vm/vm_kern.h>
112#include <mach/vm_param.h>
113#include <mach/vm_prot.h>
114#include <vm/vm_object.h>
115#include <vm/vm_page.h>
116
117#include <mach/machine/vm_param.h>
118#include <machine/thread.h>
119
120#include <kern/misc_protos.h> /* prototyping */
121#include <i386/misc_protos.h>
6d2010ae 122#include <i386/i386_lowmem.h>
b0d623f7
A
123#include <x86_64/lowglobals.h>
124
125#include <i386/cpuid.h>
126#include <i386/cpu_data.h>
127#include <i386/cpu_number.h>
128#include <i386/machine_cpu.h>
129#include <i386/seg.h>
130#include <i386/serial_io.h>
131#include <i386/cpu_capabilities.h>
132#include <i386/machine_routines.h>
133#include <i386/proc_reg.h>
134#include <i386/tsc.h>
135#include <i386/pmap_internal.h>
6d2010ae 136#include <i386/pmap_pcid.h>
b0d623f7 137
b0d623f7
A
138#include <vm/vm_protos.h>
139
140#include <i386/mp.h>
141#include <i386/mp_desc.h>
316670eb
A
142#include <libkern/kernel_mach_header.h>
143
144#include <pexpert/i386/efi.h>
b0d623f7
A
145
146
b0d623f7
A
147#ifdef IWANTTODEBUG
148#undef DEBUG
149#define DEBUG 1
150#define POSTCODE_DELAY 1
151#include <i386/postcode.h>
152#endif /* IWANTTODEBUG */
153
6d2010ae
A
154#ifdef PMAP_DEBUG
155#define DBG(x...) kprintf("DBG: " x)
b0d623f7
A
156#else
157#define DBG(x...)
158#endif
6d2010ae
A
159/* Compile time assert to ensure adjacency/alignment of per-CPU data fields used
160 * in the trampolines for kernel/user boundary TLB coherency.
b0d623f7 161 */
6d2010ae
A
162char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1];
163boolean_t pmap_trace = FALSE;
b0d623f7 164
6d2010ae 165boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */
b0d623f7
A
166
167int nx_enabled = 1; /* enable no-execute protection */
168int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */
169int allow_stack_exec = 0; /* No apps may execute from the stack by default */
170
171const boolean_t cpu_64bit = TRUE; /* Mais oui! */
172
b0d623f7
A
173uint64_t max_preemption_latency_tsc = 0;
174
b0d623f7
A
175pv_hashed_entry_t *pv_hash_table; /* hash lists */
176
177uint32_t npvhash = 0;
178
b0d623f7
A
179pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL;
180pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL;
181decl_simple_lock_data(,pv_hashed_free_list_lock)
182decl_simple_lock_data(,pv_hashed_kern_free_list_lock)
183decl_simple_lock_data(,pv_hash_table_lock)
184
b0d623f7
A
185zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */
186
b0d623f7
A
187/*
188 * First and last physical addresses that we maintain any information
189 * for. Initialized to zero so that pmap operations done before
190 * pmap_init won't touch any non-existent structures.
191 */
192boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
193
194static struct vm_object kptobj_object_store;
195static struct vm_object kpml4obj_object_store;
196static struct vm_object kpdptobj_object_store;
197
198/*
6d2010ae 199 * Array of physical page attribites for managed pages.
b0d623f7
A
200 * One byte per physical page.
201 */
202char *pmap_phys_attributes;
316670eb 203ppnum_t last_managed_page = 0;
6d2010ae
A
204
205/*
206 * Amount of virtual memory mapped by one
207 * page-directory entry.
208 */
209
b0d623f7
A
210uint64_t pde_mapped_size = PDE_MAPPED_SIZE;
211
b0d623f7
A
212unsigned pmap_memory_region_count;
213unsigned pmap_memory_region_current;
214
215pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE];
216
217/*
218 * Other useful macros.
219 */
220#define current_pmap() (vm_map_pmap(current_thread()->map))
221
222struct pmap kernel_pmap_store;
223pmap_t kernel_pmap;
224
b0d623f7
A
225struct zone *pmap_zone; /* zone of pmap structures */
226
6d2010ae
A
227struct zone *pmap_anchor_zone;
228int pmap_debug = 0; /* flag for debugging prints */
229
b0d623f7 230unsigned int inuse_ptepages_count = 0;
6d2010ae
A
231long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */
232unsigned int bootstrap_wired_pages = 0;
233int pt_fake_zone_index = -1;
b0d623f7 234
6d2010ae 235extern long NMIPI_acks;
b0d623f7 236
6d2010ae
A
237boolean_t kernel_text_ps_4K = TRUE;
238boolean_t wpkernel = TRUE;
b0d623f7
A
239
240extern char end;
241
242static int nkpt;
243
244pt_entry_t *DMAP1, *DMAP2;
245caddr_t DADDR1;
246caddr_t DADDR2;
b0d623f7 247
316670eb
A
248const boolean_t pmap_disable_kheap_nx = FALSE;
249const boolean_t pmap_disable_kstack_nx = FALSE;
250extern boolean_t doconstro_override;
b0d623f7 251
316670eb 252extern long __stack_chk_guard[];
b0d623f7
A
253
254/*
255 * Map memory at initialization. The physical addresses being
256 * mapped are not managed and are never unmapped.
257 *
258 * For now, VM is already on, we only need to map the
259 * specified memory.
260 */
261vm_offset_t
262pmap_map(
263 vm_offset_t virt,
264 vm_map_offset_t start_addr,
265 vm_map_offset_t end_addr,
266 vm_prot_t prot,
267 unsigned int flags)
268{
269 int ps;
270
271 ps = PAGE_SIZE;
272 while (start_addr < end_addr) {
273 pmap_enter(kernel_pmap, (vm_map_offset_t)virt,
316670eb 274 (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE);
b0d623f7
A
275 virt += ps;
276 start_addr += ps;
277 }
278 return(virt);
279}
280
b0d623f7
A
281extern char *first_avail;
282extern vm_offset_t virtual_avail, virtual_end;
283extern pmap_paddr_t avail_start, avail_end;
284extern vm_offset_t sHIB;
285extern vm_offset_t eHIB;
286extern vm_offset_t stext;
287extern vm_offset_t etext;
316670eb
A
288extern vm_offset_t sdata, edata;
289extern vm_offset_t sconstdata, econstdata;
b0d623f7 290
6d2010ae
A
291extern void *KPTphys;
292
13f56ec4
A
293boolean_t pmap_smep_enabled = FALSE;
294
b0d623f7
A
295void
296pmap_cpu_init(void)
297{
bd504ef0 298 cpu_data_t *cdp = current_cpu_datap();
b0d623f7
A
299 /*
300 * Here early in the life of a processor (from cpu_mode_init()).
6d2010ae 301 * Ensure global page feature is disabled at this point.
b0d623f7 302 */
6d2010ae 303
b0d623f7
A
304 set_cr4(get_cr4() &~ CR4_PGE);
305
306 /*
307 * Initialize the per-cpu, TLB-related fields.
308 */
bd504ef0
A
309 cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3;
310 cdp->cpu_active_cr3 = kernel_pmap->pm_cr3;
311 cdp->cpu_tlb_invalid = FALSE;
312 cdp->cpu_task_map = TASK_MAP_64BIT;
6d2010ae 313 pmap_pcid_configure();
13f56ec4
A
314 if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) {
315 boolean_t nsmep;
316 if (!PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) {
317 set_cr4(get_cr4() | CR4_SMEP);
318 pmap_smep_enabled = TRUE;
319 }
320 }
bd504ef0
A
321
322 if (cdp->cpu_fixed_pmcs_enabled) {
323 boolean_t enable = TRUE;
324 cpu_pmc_control(&enable);
325 }
b0d623f7
A
326}
327
328
329
330/*
331 * Bootstrap the system enough to run with virtual memory.
332 * Map the kernel's code and data, and allocate the system page table.
333 * Called with mapping OFF. Page_size must already be set.
334 */
335
336void
337pmap_bootstrap(
338 __unused vm_offset_t load_start,
339 __unused boolean_t IA32e)
340{
341#if NCOPY_WINDOWS > 0
342 vm_offset_t va;
343 int i;
344#endif
b0d623f7
A
345 assert(IA32e);
346
347 vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address
348 * known to VM */
349 /*
350 * The kernel's pmap is statically allocated so we don't
351 * have to use pmap_create, which is unlikely to work
352 * correctly at this part of the boot sequence.
353 */
354
355 kernel_pmap = &kernel_pmap_store;
356 kernel_pmap->ref_count = 1;
316670eb 357 kernel_pmap->nx_enabled = TRUE;
b0d623f7
A
358 kernel_pmap->pm_task_map = TASK_MAP_64BIT;
359 kernel_pmap->pm_obj = (vm_object_t) NULL;
360 kernel_pmap->dirbase = (pd_entry_t *)((uintptr_t)IdlePTD);
361 kernel_pmap->pm_pdpt = (pd_entry_t *) ((uintptr_t)IdlePDPT);
362 kernel_pmap->pm_pml4 = IdlePML4;
363 kernel_pmap->pm_cr3 = (uintptr_t)ID_MAP_VTOP(IdlePML4);
6d2010ae 364 pmap_pcid_initialize_kernel(kernel_pmap);
b0d623f7 365
6d2010ae 366
b0d623f7
A
367
368 current_cpu_datap()->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3;
369
370 nkpt = NKPT;
371 OSAddAtomic(NKPT, &inuse_ptepages_count);
6d2010ae
A
372 OSAddAtomic64(NKPT, &alloc_ptepages_count);
373 bootstrap_wired_pages = NKPT;
b0d623f7
A
374
375 virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail;
376 virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS);
377
378#if NCOPY_WINDOWS > 0
379 /*
380 * Reserve some special page table entries/VA space for temporary
381 * mapping of pages.
382 */
383#define SYSMAP(c, p, v, n) \
384 v = (c)va; va += ((n)*INTEL_PGBYTES);
385
386 va = virtual_avail;
387
388 for (i=0; i<PMAP_NWINDOWS; i++) {
389#if 1
390 kprintf("trying to do SYSMAP idx %d %p\n", i,
391 current_cpu_datap());
392 kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap);
393 kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow);
394 kprintf("two stuff %p %p\n",
395 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
396 (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR));
397#endif
398 SYSMAP(caddr_t,
399 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP),
400 (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR),
401 1);
402 current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP =
403 &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store);
404 *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0;
405 }
406
407 /* DMAP user for debugger */
408 SYSMAP(caddr_t, DMAP1, DADDR1, 1);
409 SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */
410
411 virtual_avail = va;
412#endif
413
414 if (PE_parse_boot_argn("npvhash", &npvhash, sizeof (npvhash))) {
415 if (0 != ((npvhash + 1) & npvhash)) {
416 kprintf("invalid hash %d, must be ((2^N)-1), "
417 "using default %d\n", npvhash, NPVHASH);
418 npvhash = NPVHASH;
419 }
420 } else {
421 npvhash = NPVHASH;
422 }
423
b0d623f7
A
424 simple_lock_init(&kernel_pmap->lock, 0);
425 simple_lock_init(&pv_hashed_free_list_lock, 0);
426 simple_lock_init(&pv_hashed_kern_free_list_lock, 0);
427 simple_lock_init(&pv_hash_table_lock,0);
428
429 pmap_cpu_init();
430
6d2010ae
A
431 if (pmap_pcid_ncpus)
432 printf("PMAP: PCID enabled\n");
433
13f56ec4
A
434 if (pmap_smep_enabled)
435 printf("PMAP: Supervisor Mode Execute Protection enabled\n");
7ddcb079 436
316670eb
A
437#if DEBUG
438 printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]);
439 printf("ml_early_random(): 0x%qx\n", ml_early_random());
440#endif
441 boolean_t ptmp;
442 /* Check if the user has requested disabling stack or heap no-execute
443 * enforcement. These are "const" variables; that qualifier is cast away
444 * when altering them. The TEXT/DATA const sections are marked
445 * write protected later in the kernel startup sequence, so altering
446 * them is possible at this point, in pmap_bootstrap().
447 */
448 if (PE_parse_boot_argn("-pmap_disable_kheap_nx", &ptmp, sizeof(ptmp))) {
449 boolean_t *pdknxp = (boolean_t *) &pmap_disable_kheap_nx;
450 *pdknxp = TRUE;
451 }
452
453 if (PE_parse_boot_argn("-pmap_disable_kstack_nx", &ptmp, sizeof(ptmp))) {
454 boolean_t *pdknhp = (boolean_t *) &pmap_disable_kstack_nx;
455 *pdknhp = TRUE;
456 }
457
6d2010ae
A
458 boot_args *args = (boot_args *)PE_state.bootArgs;
459 if (args->efiMode == kBootArgsEfiMode32) {
460 printf("EFI32: kernel virtual space limited to 4GB\n");
461 virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32;
462 }
b0d623f7
A
463 kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n",
464 (long)KERNEL_BASE, (long)virtual_end);
465 kprintf("Available physical space from 0x%llx to 0x%llx\n",
466 avail_start, avail_end);
467
468 /*
469 * The -no_shared_cr3 boot-arg is a debugging feature (set by default
470 * in the DEBUG kernel) to force the kernel to switch to its own map
471 * (and cr3) when control is in kernelspace. The kernel's map does not
472 * include (i.e. share) userspace so wild references will cause
473 * a panic. Only copyin and copyout are exempt from this.
474 */
475 (void) PE_parse_boot_argn("-no_shared_cr3",
476 &no_shared_cr3, sizeof (no_shared_cr3));
477 if (no_shared_cr3)
478 kprintf("Kernel not sharing user map\n");
479
480#ifdef PMAP_TRACES
481 if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) {
482 kprintf("Kernel traces for pmap operations enabled\n");
483 }
484#endif /* PMAP_TRACES */
485}
486
487void
488pmap_virtual_space(
489 vm_offset_t *startp,
490 vm_offset_t *endp)
491{
492 *startp = virtual_avail;
493 *endp = virtual_end;
494}
495
39236c6e
A
496
497
498
499#if HIBERNATION
500
501#include <IOKit/IOHibernatePrivate.h>
502
503int32_t pmap_npages;
504int32_t pmap_teardown_last_valid_compact_indx = -1;
505
506
507void hibernate_rebuild_pmap_structs(void);
508void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
509void pmap_pack_index(uint32_t);
510int32_t pmap_unpack_index(pv_rooted_entry_t);
511
512
513int32_t
514pmap_unpack_index(pv_rooted_entry_t pv_h)
515{
516 int32_t indx = 0;
517
518 indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48);
519 indx = indx << 16;
520 indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48);
521
522 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48);
523 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48);
524
525 return (indx);
526}
527
528
529void
530pmap_pack_index(uint32_t indx)
531{
532 pv_rooted_entry_t pv_h;
533
534 pv_h = &pv_head_table[indx];
535
536 *((uint64_t *)(&pv_h->qlink.next)) &= ~((uint64_t)0xffff << 48);
537 *((uint64_t *)(&pv_h->qlink.prev)) &= ~((uint64_t)0xffff << 48);
538
539 *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)(indx >> 16)) << 48;
540 *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)(indx & 0xffff)) << 48;
541}
542
543
544void
545hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end)
546{
547 int32_t i;
548 int32_t compact_target_indx;
549
550 compact_target_indx = 0;
551
552 for (i = 0; i < pmap_npages; i++) {
553 if (pv_head_table[i].pmap == PMAP_NULL) {
554
555 if (pv_head_table[compact_target_indx].pmap != PMAP_NULL)
556 compact_target_indx = i;
557 } else {
558 pmap_pack_index((uint32_t)i);
559
560 if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) {
561 /*
562 * we've got a hole to fill, so
563 * move this pv_rooted_entry_t to it's new home
564 */
565 pv_head_table[compact_target_indx] = pv_head_table[i];
566 pv_head_table[i].pmap = PMAP_NULL;
567
568 pmap_teardown_last_valid_compact_indx = compact_target_indx;
569 compact_target_indx++;
570 } else
571 pmap_teardown_last_valid_compact_indx = i;
572 }
573 }
574 *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1];
575 *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1];
576
577 HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
578}
579
580
581void
582hibernate_rebuild_pmap_structs(void)
583{
584 int32_t cindx, eindx, rindx;
585 pv_rooted_entry_t pv_h;
586
587 eindx = (int32_t)pmap_npages;
588
589 for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
590
591 pv_h = &pv_head_table[cindx];
592
593 rindx = pmap_unpack_index(pv_h);
594 assert(rindx < pmap_npages);
595
596 if (rindx != cindx) {
597 /*
598 * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs,
599 * so move it back to its real location
600 */
601 pv_head_table[rindx] = pv_head_table[cindx];
602 }
603 if (rindx+1 != eindx) {
604 /*
605 * the 'hole' between this vm_rooted_entry_t and the previous
606 * vm_rooted_entry_t we moved needs to be initialized as
607 * a range of zero'd vm_rooted_entry_t's
608 */
609 bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry));
610 }
611 eindx = rindx;
612 }
613 if (rindx)
614 bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry));
615
616 HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx);
617}
618
619#endif
620
b0d623f7
A
621/*
622 * Initialize the pmap module.
623 * Called by vm_init, to initialize any structures that the pmap
624 * system needs to map virtual memory.
625 */
626void
627pmap_init(void)
628{
629 long npages;
630 vm_offset_t addr;
060df5ea 631 vm_size_t s, vsize;
b0d623f7
A
632 vm_map_offset_t vaddr;
633 ppnum_t ppn;
634
635
636 kernel_pmap->pm_obj_pml4 = &kpml4obj_object_store;
39236c6e 637 _vm_object_allocate((vm_object_size_t)NPML4PGS * PAGE_SIZE, &kpml4obj_object_store);
b0d623f7
A
638
639 kernel_pmap->pm_obj_pdpt = &kpdptobj_object_store;
39236c6e 640 _vm_object_allocate((vm_object_size_t)NPDPTPGS * PAGE_SIZE, &kpdptobj_object_store);
b0d623f7
A
641
642 kernel_pmap->pm_obj = &kptobj_object_store;
39236c6e 643 _vm_object_allocate((vm_object_size_t)NPDEPGS * PAGE_SIZE, &kptobj_object_store);
b0d623f7
A
644
645 /*
646 * Allocate memory for the pv_head_table and its lock bits,
647 * the modify bit array, and the pte_page table.
648 */
649
650 /*
651 * zero bias all these arrays now instead of off avail_start
652 * so we cover all memory
653 */
654
655 npages = i386_btop(avail_end);
39236c6e
A
656#if HIBERNATION
657 pmap_npages = (uint32_t)npages;
658#endif
b0d623f7
A
659 s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages
660 + (sizeof (struct pv_hashed_entry_t *) * (npvhash+1))
661 + pv_lock_table_size(npages)
662 + pv_hash_lock_table_size((npvhash+1))
663 + npages);
664
665 s = round_page(s);
666 if (kernel_memory_allocate(kernel_map, &addr, s, 0,
667 KMA_KOBJECT | KMA_PERMANENT)
668 != KERN_SUCCESS)
669 panic("pmap_init");
670
671 memset((char *)addr, 0, s);
672
060df5ea
A
673 vaddr = addr;
674 vsize = s;
675
b0d623f7
A
676#if PV_DEBUG
677 if (0 == npvhash) panic("npvhash not initialized");
678#endif
679
680 /*
681 * Allocate the structures first to preserve word-alignment.
682 */
683 pv_head_table = (pv_rooted_entry_t) addr;
684 addr = (vm_offset_t) (pv_head_table + npages);
685
686 pv_hash_table = (pv_hashed_entry_t *)addr;
687 addr = (vm_offset_t) (pv_hash_table + (npvhash + 1));
688
689 pv_lock_table = (char *) addr;
690 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
691
692 pv_hash_lock_table = (char *) addr;
693 addr = (vm_offset_t) (pv_hash_lock_table + pv_hash_lock_table_size((npvhash+1)));
694
695 pmap_phys_attributes = (char *) addr;
696
697 ppnum_t last_pn = i386_btop(avail_end);
698 unsigned int i;
699 pmap_memory_region_t *pmptr = pmap_memory_regions;
700 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
701 if (pmptr->type != kEfiConventionalMemory)
702 continue;
316670eb 703 ppnum_t pn;
b0d623f7
A
704 for (pn = pmptr->base; pn <= pmptr->end; pn++) {
705 if (pn < last_pn) {
706 pmap_phys_attributes[pn] |= PHYS_MANAGED;
060df5ea 707
b0d623f7
A
708 if (pn > last_managed_page)
709 last_managed_page = pn;
060df5ea 710
7ddcb079 711 if (pn >= lowest_hi && pn <= highest_hi)
060df5ea 712 pmap_phys_attributes[pn] |= PHYS_NOENCRYPT;
b0d623f7
A
713 }
714 }
715 }
060df5ea
A
716 while (vsize) {
717 ppn = pmap_find_phys(kernel_pmap, vaddr);
b0d623f7 718
060df5ea
A
719 pmap_phys_attributes[ppn] |= PHYS_NOENCRYPT;
720
721 vaddr += PAGE_SIZE;
722 vsize -= PAGE_SIZE;
723 }
b0d623f7
A
724 /*
725 * Create the zone of physical maps,
726 * and of the physical-to-virtual entries.
727 */
728 s = (vm_size_t) sizeof(struct pmap);
729 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
060df5ea
A
730 zone_change(pmap_zone, Z_NOENCRYPT, TRUE);
731
6d2010ae
A
732 pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors");
733 zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE);
734
6d2010ae 735 /* The anchor is required to be page aligned. Zone debugging adds
316670eb
A
736 * padding which may violate that requirement. Tell the zone
737 * subsystem that alignment is required.
6d2010ae 738 */
316670eb
A
739
740 zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE);
6d2010ae 741
b0d623f7 742 s = (vm_size_t) sizeof(struct pv_hashed_entry);
6d2010ae
A
743 pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */,
744 4096 * 3 /* LCM x86_64*/, "pv_list");
060df5ea 745 zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE);
b0d623f7
A
746
747 /* create pv entries for kernel pages mapped by low level
748 startup code. these have to exist so we can pmap_remove()
749 e.g. kext pages from the middle of our addr space */
750
751 vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS;
6d2010ae 752 for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) {
b0d623f7
A
753 pv_rooted_entry_t pv_e;
754
755 pv_e = pai_to_pvh(ppn);
756 pv_e->va = vaddr;
757 vaddr += PAGE_SIZE;
758 pv_e->pmap = kernel_pmap;
759 queue_init(&pv_e->qlink);
760 }
761 pmap_initialized = TRUE;
762
b0d623f7
A
763 max_preemption_latency_tsc = tmrCvt((uint64_t)MAX_PREEMPTION_LATENCY_NS, tscFCvtn2t);
764
765 /*
766 * Ensure the kernel's PML4 entry exists for the basement
767 * before this is shared with any user.
768 */
316670eb
A
769 pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE);
770}
771
772static
773void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) {
774 uint64_t ev = sv + nxrosz, cv = sv;
775 pd_entry_t *pdep;
776 pt_entry_t *ptep = NULL;
777
778 assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0);
779
780 for (pdep = pmap_pde(npmap, cv); pdep != NULL && (cv < ev);) {
781 uint64_t pdev = (cv & ~((uint64_t)PDEMASK));
782
783 if (*pdep & INTEL_PTE_PS) {
784 if (NX)
785 *pdep |= INTEL_PTE_NX;
786 if (ro)
787 *pdep &= ~INTEL_PTE_WRITE;
788 cv += NBPD;
789 cv &= ~((uint64_t) PDEMASK);
790 pdep = pmap_pde(npmap, cv);
791 continue;
792 }
793
794 for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) {
795 if (NX)
796 *ptep |= INTEL_PTE_NX;
797 if (ro)
798 *ptep &= ~INTEL_PTE_WRITE;
799 cv += NBPT;
800 ptep = pmap_pte(npmap, cv);
801 }
802 }
803 DPRINTF("%s(0x%llx, 0x%llx, %u, %u): 0x%llx, 0x%llx\n", __FUNCTION__, sv, nxrosz, NX, ro, cv, ptep ? *ptep: 0);
b0d623f7
A
804}
805
6d2010ae
A
806/*
807 * Called once VM is fully initialized so that we can release unused
808 * sections of low memory to the general pool.
809 * Also complete the set-up of identity-mapped sections of the kernel:
810 * 1) write-protect kernel text
811 * 2) map kernel text using large pages if possible
812 * 3) read and write-protect page zero (for K32)
813 * 4) map the global page at the appropriate virtual address.
814 *
815 * Use of large pages
816 * ------------------
817 * To effectively map and write-protect all kernel text pages, the text
818 * must be 2M-aligned at the base, and the data section above must also be
819 * 2M-aligned. That is, there's padding below and above. This is achieved
820 * through linker directives. Large pages are used only if this alignment
821 * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
822 * memory layout is:
823 *
824 * : :
825 * | __DATA |
826 * sdata: ================== 2Meg
827 * | |
828 * | zero-padding |
829 * | |
830 * etext: ------------------
831 * | |
832 * : :
833 * | |
834 * | __TEXT |
835 * | |
836 * : :
837 * | |
838 * stext: ================== 2Meg
839 * | |
840 * | zero-padding |
841 * | |
842 * eHIB: ------------------
843 * | __HIB |
844 * : :
845 *
846 * Prior to changing the mapping from 4K to 2M, the zero-padding pages
847 * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
848 * 4K pages covering [stext,etext] are coalesced as 2M large pages.
849 * The now unused level-1 PTE pages are also freed.
850 */
316670eb 851extern ppnum_t vm_kernel_base_page;
6d2010ae
A
852void
853pmap_lowmem_finalize(void)
854{
855 spl_t spl;
856 int i;
857
6d2010ae
A
858 /*
859 * Update wired memory statistics for early boot pages
860 */
316670eb 861 PMAP_ZINFO_PALLOC(kernel_pmap, bootstrap_wired_pages * PAGE_SIZE);
6d2010ae
A
862
863 /*
316670eb 864 * Free pages in pmap regions below the base:
6d2010ae
A
865 * rdar://6332712
866 * We can't free all the pages to VM that EFI reports available.
867 * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
868 * There's also a size miscalculation here: pend is one page less
869 * than it should be but this is not fixed to be backwards
870 * compatible.
316670eb
A
871 * This is important for KASLR because up to 256*2MB = 512MB of space
872 * needs has to be released to VM.
6d2010ae
A
873 */
874 for (i = 0;
316670eb 875 pmap_memory_regions[i].end < vm_kernel_base_page;
6d2010ae 876 i++) {
316670eb
A
877 vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base);
878 vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
6d2010ae 879
316670eb
A
880 DBG("pmap region %d [%p..[%p\n",
881 i, (void *) pbase, (void *) pend);
882
883 if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED)
884 continue;
885 /*
886 * rdar://6332712
887 * Adjust limits not to free pages in range 0xc0000-0xff000.
888 */
889 if (pbase >= 0xc0000 && pend <= 0x100000)
890 continue;
891 if (pbase < 0xc0000 && pend > 0x100000) {
892 /* page range entirely within region, free lower part */
893 DBG("- ml_static_mfree(%p,%p)\n",
894 (void *) ml_static_ptovirt(pbase),
895 (void *) (0xc0000-pbase));
896 ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase);
897 pbase = 0x100000;
898 }
899 if (pbase < 0xc0000)
900 pend = MIN(pend, 0xc0000);
901 if (pend > 0x100000)
902 pbase = MAX(pbase, 0x100000);
903 DBG("- ml_static_mfree(%p,%p)\n",
6d2010ae 904 (void *) ml_static_ptovirt(pbase),
316670eb 905 (void *) (pend - pbase));
6d2010ae
A
906 ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
907 }
908
316670eb
A
909 /* A final pass to get rid of all initial identity mappings to
910 * low pages.
911 */
912 DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
913
914 /* Remove all mappings past the descriptor aliases and low globals */
915 pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
916
6d2010ae
A
917 /*
918 * If text and data are both 2MB-aligned,
919 * we can map text with large-pages,
920 * unless the -kernel_text_ps_4K boot-arg overrides.
921 */
922 if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
923 kprintf("Kernel text is 2MB aligned");
924 kernel_text_ps_4K = FALSE;
925 if (PE_parse_boot_argn("-kernel_text_ps_4K",
926 &kernel_text_ps_4K,
927 sizeof (kernel_text_ps_4K)))
928 kprintf(" but will be mapped with 4K pages\n");
929 else
930 kprintf(" and will be mapped with 2M pages\n");
931 }
932
933 (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
934 if (wpkernel)
935 kprintf("Kernel text %p-%p to be write-protected\n",
936 (void *) stext, (void *) etext);
937
938 spl = splhigh();
939
940 /*
941 * Scan over text if mappings are to be changed:
942 * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
943 * - Change to large-pages if possible and not overriden.
944 */
945 if (kernel_text_ps_4K && wpkernel) {
946 vm_offset_t myva;
947 for (myva = stext; myva < etext; myva += PAGE_SIZE) {
948 pt_entry_t *ptep;
949
950 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
951 if (ptep)
316670eb 952 pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE);
6d2010ae
A
953 }
954 }
955
956 if (!kernel_text_ps_4K) {
957 vm_offset_t myva;
958
959 /*
960 * Release zero-filled page padding used for 2M-alignment.
961 */
962 DBG("ml_static_mfree(%p,%p) for padding below text\n",
963 (void *) eHIB, (void *) (stext - eHIB));
964 ml_static_mfree(eHIB, stext - eHIB);
965 DBG("ml_static_mfree(%p,%p) for padding above text\n",
966 (void *) etext, (void *) (sdata - etext));
967 ml_static_mfree(etext, sdata - etext);
968
969 /*
970 * Coalesce text pages into large pages.
971 */
972 for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
973 pt_entry_t *ptep;
974 vm_offset_t pte_phys;
975 pt_entry_t *pdep;
976 pt_entry_t pde;
977
978 pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
979 ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
980 DBG("myva: %p pdep: %p ptep: %p\n",
981 (void *) myva, (void *) pdep, (void *) ptep);
982 if ((*ptep & INTEL_PTE_VALID) == 0)
983 continue;
984 pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
985 pde = *pdep & PTMASK; /* page attributes from pde */
986 pde |= INTEL_PTE_PS; /* make it a 2M entry */
987 pde |= pte_phys; /* take page frame from pte */
988
989 if (wpkernel)
316670eb 990 pde &= ~INTEL_PTE_WRITE;
6d2010ae
A
991 DBG("pmap_store_pte(%p,0x%llx)\n",
992 (void *)pdep, pde);
993 pmap_store_pte(pdep, pde);
994
995 /*
996 * Free the now-unused level-1 pte.
997 * Note: ptep is a virtual address to the pte in the
998 * recursive map. We can't use this address to free
999 * the page. Instead we need to compute its address
1000 * in the Idle PTEs in "low memory".
1001 */
1002 vm_offset_t vm_ptep = (vm_offset_t) KPTphys
1003 + (pte_phys >> PTPGSHIFT);
1004 DBG("ml_static_mfree(%p,0x%x) for pte\n",
1005 (void *) vm_ptep, PAGE_SIZE);
1006 ml_static_mfree(vm_ptep, PAGE_SIZE);
1007 }
1008
1009 /* Change variable read by sysctl machdep.pmap */
1010 pmap_kernel_text_ps = I386_LPGBYTES;
1011 }
1012
316670eb
A
1013 boolean_t doconstro = TRUE;
1014
1015 (void) PE_parse_boot_argn("dataconstro", &doconstro, sizeof(doconstro));
1016
1017 if ((sconstdata | econstdata) & PAGE_MASK) {
1018 kprintf("Const DATA misaligned 0x%lx 0x%lx\n", sconstdata, econstdata);
1019 if ((sconstdata & PAGE_MASK) || (doconstro_override == FALSE))
1020 doconstro = FALSE;
1021 }
1022
1023 if ((sconstdata > edata) || (sconstdata < sdata) || ((econstdata - sconstdata) >= (edata - sdata))) {
1024 kprintf("Const DATA incorrect size 0x%lx 0x%lx 0x%lx 0x%lx\n", sconstdata, econstdata, sdata, edata);
1025 doconstro = FALSE;
1026 }
1027
1028 if (doconstro)
1029 kprintf("Marking const DATA read-only\n");
1030
1031 vm_offset_t dva;
1032
1033 for (dva = sdata; dva < edata; dva += I386_PGBYTES) {
1034 assert(((sdata | edata) & PAGE_MASK) == 0);
1035 if ( (sdata | edata) & PAGE_MASK) {
1036 kprintf("DATA misaligned, 0x%lx, 0x%lx\n", sdata, edata);
1037 break;
1038 }
1039
1040 pt_entry_t dpte, *dptep = pmap_pte(kernel_pmap, dva);
1041
1042 dpte = *dptep;
1043
1044 assert((dpte & INTEL_PTE_VALID));
1045 if ((dpte & INTEL_PTE_VALID) == 0) {
1046 kprintf("Missing data mapping 0x%lx 0x%lx 0x%lx\n", dva, sdata, edata);
1047 continue;
1048 }
1049
1050 dpte |= INTEL_PTE_NX;
1051 if (doconstro && (dva >= sconstdata) && (dva < econstdata)) {
1052 dpte &= ~INTEL_PTE_WRITE;
1053 }
1054 pmap_store_pte(dptep, dpte);
1055 }
1056 kernel_segment_command_t * seg;
1057 kernel_section_t * sec;
1058
1059 for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) {
1060 if (!strcmp(seg->segname, "__TEXT") ||
1061 !strcmp(seg->segname, "__DATA")) {
1062 continue;
1063 }
1064 //XXX
1065 if (!strcmp(seg->segname, "__KLD")) {
1066 continue;
1067 }
1068 if (!strcmp(seg->segname, "__HIB")) {
1069 for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
1070 if (sec->addr & PAGE_MASK)
1071 panic("__HIB segment's sections misaligned");
1072 if (!strcmp(sec->sectname, "__text")) {
1073 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE);
1074 } else {
1075 pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), TRUE, FALSE);
1076 }
1077 }
1078 } else {
1079 pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE);
1080 }
1081 }
1082
1083 /*
1084 * If we're debugging, map the low global vector page at the fixed
1085 * virtual address. Otherwise, remove the mapping for this.
1086 */
1087 if (debug_boot_arg) {
1088 pt_entry_t *pte = NULL;
1089 if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS)))
1090 panic("lowmem pte");
1091 /* make sure it is defined on page boundary */
1092 assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
1093 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
1094 | INTEL_PTE_REF
1095 | INTEL_PTE_MOD
1096 | INTEL_PTE_WIRED
1097 | INTEL_PTE_VALID
1098 | INTEL_PTE_WRITE
1099 | INTEL_PTE_NX);
1100 } else {
1101 pmap_remove(kernel_pmap,
1102 LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE);
1103 }
1104
6d2010ae
A
1105 splx(spl);
1106 if (pmap_pcid_ncpus)
1107 tlb_flush_global();
1108 else
1109 flush_tlb_raw();
1110}
b0d623f7
A
1111
1112/*
1113 * this function is only used for debugging fron the vm layer
1114 */
1115boolean_t
1116pmap_verify_free(
1117 ppnum_t pn)
1118{
1119 pv_rooted_entry_t pv_h;
1120 int pai;
1121 boolean_t result;
1122
1123 assert(pn != vm_page_fictitious_addr);
1124
1125 if (!pmap_initialized)
1126 return(TRUE);
1127
1128 if (pn == vm_page_guard_addr)
1129 return TRUE;
1130
1131 pai = ppn_to_pai(pn);
1132 if (!IS_MANAGED_PAGE(pai))
1133 return(FALSE);
1134 pv_h = pai_to_pvh(pn);
1135 result = (pv_h->pmap == PMAP_NULL);
1136 return(result);
1137}
1138
1139boolean_t
1140pmap_is_empty(
1141 pmap_t pmap,
1142 vm_map_offset_t va_start,
1143 vm_map_offset_t va_end)
1144{
1145 vm_map_offset_t offset;
1146 ppnum_t phys_page;
1147
1148 if (pmap == PMAP_NULL) {
1149 return TRUE;
1150 }
1151
1152 /*
1153 * Check the resident page count
1154 * - if it's zero, the pmap is completely empty.
1155 * This short-circuit test prevents a virtual address scan which is
1156 * painfully slow for 64-bit spaces.
1157 * This assumes the count is correct
1158 * .. the debug kernel ought to be checking perhaps by page table walk.
1159 */
1160 if (pmap->stats.resident_count == 0)
1161 return TRUE;
1162
1163 for (offset = va_start;
1164 offset < va_end;
1165 offset += PAGE_SIZE_64) {
1166 phys_page = pmap_find_phys(pmap, offset);
1167 if (phys_page) {
1168 kprintf("pmap_is_empty(%p,0x%llx,0x%llx): "
1169 "page %d at 0x%llx\n",
1170 pmap, va_start, va_end, phys_page, offset);
1171 return FALSE;
1172 }
1173 }
1174
1175 return TRUE;
1176}
1177
1178
1179/*
1180 * Create and return a physical map.
1181 *
1182 * If the size specified for the map
1183 * is zero, the map is an actual physical
1184 * map, and may be referenced by the
1185 * hardware.
1186 *
1187 * If the size specified is non-zero,
1188 * the map will be used in software only, and
1189 * is bounded by that size.
1190 */
1191pmap_t
1192pmap_create(
316670eb 1193 ledger_t ledger,
b0d623f7
A
1194 vm_map_size_t sz,
1195 boolean_t is_64bit)
1196{
1197 pmap_t p;
1198 vm_size_t size;
1199 pml4_entry_t *pml4;
1200 pml4_entry_t *kpml4;
1201
1202 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
1203 (uint32_t) (sz>>32), (uint32_t) sz, is_64bit, 0, 0);
1204
1205 size = (vm_size_t) sz;
1206
1207 /*
1208 * A software use-only map doesn't even need a map.
1209 */
1210
1211 if (size != 0) {
1212 return(PMAP_NULL);
1213 }
1214
1215 p = (pmap_t) zalloc(pmap_zone);
1216 if (PMAP_NULL == p)
1217 panic("pmap_create zalloc");
6d2010ae
A
1218 /* Zero all fields */
1219 bzero(p, sizeof(*p));
b0d623f7
A
1220 /* init counts now since we'll be bumping some */
1221 simple_lock_init(&p->lock, 0);
39236c6e 1222#if 00
b0d623f7
A
1223 p->stats.resident_count = 0;
1224 p->stats.resident_max = 0;
1225 p->stats.wired_count = 0;
39236c6e
A
1226#else
1227 bzero(&p->stats, sizeof (p->stats));
1228#endif
b0d623f7
A
1229 p->ref_count = 1;
1230 p->nx_enabled = 1;
1231 p->pm_shared = FALSE;
316670eb
A
1232 ledger_reference(ledger);
1233 p->ledger = ledger;
b0d623f7
A
1234
1235 p->pm_task_map = is_64bit ? TASK_MAP_64BIT : TASK_MAP_32BIT;;
6d2010ae
A
1236 if (pmap_pcid_ncpus)
1237 pmap_pcid_initialize(p);
316670eb 1238
6d2010ae 1239 p->pm_pml4 = zalloc(pmap_anchor_zone);
b0d623f7 1240
6d2010ae 1241 pmap_assert((((uintptr_t)p->pm_pml4) & PAGE_MASK) == 0);
b0d623f7 1242
6d2010ae 1243 memset((char *)p->pm_pml4, 0, PAGE_SIZE);
b0d623f7 1244
6d2010ae 1245 p->pm_cr3 = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
b0d623f7
A
1246
1247 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1248
39236c6e 1249 p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE);
b0d623f7
A
1250 if (NULL == p->pm_obj_pml4)
1251 panic("pmap_create pdpt obj");
1252
39236c6e 1253 p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE);
b0d623f7
A
1254 if (NULL == p->pm_obj_pdpt)
1255 panic("pmap_create pdpt obj");
1256
39236c6e 1257 p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE);
b0d623f7
A
1258 if (NULL == p->pm_obj)
1259 panic("pmap_create pte obj");
1260
6d2010ae 1261 /* All pmaps share the kernel's pml4 */
b0d623f7
A
1262 pml4 = pmap64_pml4(p, 0ULL);
1263 kpml4 = kernel_pmap->pm_pml4;
1264 pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX];
1265 pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX];
316670eb 1266 pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX];
b0d623f7
A
1267
1268 PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START,
1269 p, is_64bit, 0, 0, 0);
1270
1271 return(p);
1272}
1273
1274/*
1275 * Retire the given physical map from service.
1276 * Should only be called if the map contains
1277 * no valid mappings.
1278 */
1279
1280void
6d2010ae 1281pmap_destroy(pmap_t p)
b0d623f7 1282{
6d2010ae 1283 int c;
b0d623f7
A
1284
1285 if (p == PMAP_NULL)
1286 return;
1287
1288 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START,
1289 p, 0, 0, 0, 0);
1290
1291 PMAP_LOCK(p);
1292
1293 c = --p->ref_count;
1294
6d2010ae
A
1295 pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE);
1296
b0d623f7
A
1297 if (c == 0) {
1298 /*
1299 * If some cpu is not using the physical pmap pointer that it
1300 * is supposed to be (see set_dirbase), we might be using the
1301 * pmap that is being destroyed! Make sure we are
1302 * physically on the right pmap:
1303 */
1304 PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL);
ebb1b9f4
A
1305 if (pmap_pcid_ncpus)
1306 pmap_destroy_pcid_sync(p);
b0d623f7 1307 }
ebb1b9f4 1308
b0d623f7
A
1309 PMAP_UNLOCK(p);
1310
1311 if (c != 0) {
1312 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1313 p, 1, 0, 0, 0);
6d2010ae 1314 pmap_assert(p == kernel_pmap);
b0d623f7
A
1315 return; /* still in use */
1316 }
1317
1318 /*
1319 * Free the memory maps, then the
1320 * pmap structure.
1321 */
1322 int inuse_ptepages = 0;
1323
6d2010ae 1324 zfree(pmap_anchor_zone, p->pm_pml4);
b0d623f7
A
1325
1326 inuse_ptepages += p->pm_obj_pml4->resident_page_count;
1327 vm_object_deallocate(p->pm_obj_pml4);
1328
1329 inuse_ptepages += p->pm_obj_pdpt->resident_page_count;
1330 vm_object_deallocate(p->pm_obj_pdpt);
1331
1332 inuse_ptepages += p->pm_obj->resident_page_count;
1333 vm_object_deallocate(p->pm_obj);
1334
1335 OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count);
316670eb
A
1336 PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE);
1337 ledger_dereference(p->ledger);
b0d623f7
A
1338 zfree(pmap_zone, p);
1339
1340 PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END,
1341 0, 0, 0, 0, 0);
1342}
1343
1344/*
1345 * Add a reference to the specified pmap.
1346 */
1347
1348void
1349pmap_reference(pmap_t p)
1350{
1351 if (p != PMAP_NULL) {
1352 PMAP_LOCK(p);
1353 p->ref_count++;
1354 PMAP_UNLOCK(p);;
1355 }
1356}
1357
b0d623f7
A
1358/*
1359 * Remove phys addr if mapped in specified map
1360 *
1361 */
1362void
1363pmap_remove_some_phys(
1364 __unused pmap_t map,
1365 __unused ppnum_t pn)
1366{
1367
1368/* Implement to support working set code */
1369
1370}
1371
39236c6e
A
1372
1373void
1374pmap_protect(
1375 pmap_t map,
1376 vm_map_offset_t sva,
1377 vm_map_offset_t eva,
1378 vm_prot_t prot)
1379{
1380 pmap_protect_options(map, sva, eva, prot, 0, NULL);
1381}
1382
1383
b0d623f7
A
1384/*
1385 * Set the physical protection on the
1386 * specified range of this map as requested.
1387 * Will not increase permissions.
1388 */
1389void
39236c6e 1390pmap_protect_options(
b0d623f7
A
1391 pmap_t map,
1392 vm_map_offset_t sva,
1393 vm_map_offset_t eva,
39236c6e
A
1394 vm_prot_t prot,
1395 unsigned int options,
1396 void *arg)
b0d623f7
A
1397{
1398 pt_entry_t *pde;
1399 pt_entry_t *spte, *epte;
1400 vm_map_offset_t lva;
1401 vm_map_offset_t orig_sva;
1402 boolean_t set_NX;
1403 int num_found = 0;
1404
1405 pmap_intr_assert();
1406
1407 if (map == PMAP_NULL)
1408 return;
1409
1410 if (prot == VM_PROT_NONE) {
39236c6e 1411 pmap_remove_options(map, sva, eva, options);
b0d623f7
A
1412 return;
1413 }
1414 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
1415 map,
1416 (uint32_t) (sva >> 32), (uint32_t) sva,
1417 (uint32_t) (eva >> 32), (uint32_t) eva);
1418
1419 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled)
1420 set_NX = FALSE;
1421 else
1422 set_NX = TRUE;
1423
1424 PMAP_LOCK(map);
1425
1426 orig_sva = sva;
1427 while (sva < eva) {
1428 lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1);
1429 if (lva > eva)
1430 lva = eva;
1431 pde = pmap_pde(map, sva);
1432 if (pde && (*pde & INTEL_PTE_VALID)) {
1433 if (*pde & INTEL_PTE_PS) {
1434 /* superpage */
1435 spte = pde;
1436 epte = spte+1; /* excluded */
1437 } else {
1438 spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1)));
1439 spte = &spte[ptenum(sva)];
1440 epte = &spte[intel_btop(lva - sva)];
1441 }
1442
1443 for (; spte < epte; spte++) {
1444 if (!(*spte & INTEL_PTE_VALID))
1445 continue;
1446
1447 if (prot & VM_PROT_WRITE)
316670eb 1448 pmap_update_pte(spte, 0, INTEL_PTE_WRITE);
b0d623f7 1449 else
316670eb 1450 pmap_update_pte(spte, INTEL_PTE_WRITE, 0);
b0d623f7
A
1451
1452 if (set_NX)
316670eb 1453 pmap_update_pte(spte, 0, INTEL_PTE_NX);
b0d623f7 1454 else
316670eb 1455 pmap_update_pte(spte, INTEL_PTE_NX, 0);
b0d623f7
A
1456 num_found++;
1457 }
1458 }
1459 sva = lva;
1460 }
39236c6e
A
1461 if (num_found) {
1462 if (options & PMAP_OPTIONS_NOFLUSH)
1463 PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg);
1464 else
1465 PMAP_UPDATE_TLBS(map, orig_sva, eva);
1466 }
b0d623f7
A
1467 PMAP_UNLOCK(map);
1468
1469 PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END,
1470 0, 0, 0, 0, 0);
1471
1472}
1473
1474/* Map a (possibly) autogenned block */
1475void
1476pmap_map_block(
1477 pmap_t pmap,
1478 addr64_t va,
1479 ppnum_t pa,
1480 uint32_t size,
1481 vm_prot_t prot,
1482 int attr,
1483 __unused unsigned int flags)
1484{
1485 uint32_t page;
1486 int cur_page_size;
1487
1488 if (attr & VM_MEM_SUPERPAGE)
1489 cur_page_size = SUPERPAGE_SIZE;
1490 else
1491 cur_page_size = PAGE_SIZE;
1492
1493 for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) {
316670eb 1494 pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE);
b0d623f7
A
1495 va += cur_page_size;
1496 pa+=cur_page_size/PAGE_SIZE;
1497 }
1498}
1499
316670eb 1500kern_return_t
b0d623f7
A
1501pmap_expand_pml4(
1502 pmap_t map,
316670eb
A
1503 vm_map_offset_t vaddr,
1504 unsigned int options)
b0d623f7
A
1505{
1506 vm_page_t m;
1507 pmap_paddr_t pa;
1508 uint64_t i;
1509 ppnum_t pn;
1510 pml4_entry_t *pml4p;
1511
1512 DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr);
1513
1514 /*
1515 * Allocate a VM page for the pml4 page
1516 */
316670eb
A
1517 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1518 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1519 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1520 VM_PAGE_WAIT();
316670eb 1521 }
b0d623f7
A
1522 /*
1523 * put the page into the pmap's obj list so it
1524 * can be found later.
1525 */
1526 pn = m->phys_page;
1527 pa = i386_ptob(pn);
1528 i = pml4idx(map, vaddr);
1529
1530 /*
1531 * Zero the page.
1532 */
1533 pmap_zero_page(pn);
1534
1535 vm_page_lockspin_queues();
1536 vm_page_wire(m);
1537 vm_page_unlock_queues();
1538
1539 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1540 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1541 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1542
1543 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1544 vm_object_lock(map->pm_obj_pml4);
1545
1546 PMAP_LOCK(map);
1547 /*
1548 * See if someone else expanded us first
1549 */
1550 if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) {
1551 PMAP_UNLOCK(map);
1552 vm_object_unlock(map->pm_obj_pml4);
1553
1554 VM_PAGE_FREE(m);
1555
1556 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1557 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1558 return KERN_SUCCESS;
b0d623f7
A
1559 }
1560
1561#if 0 /* DEBUG */
39236c6e 1562 if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1563 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1564 map, map->pm_obj_pml4, vaddr, i);
1565 }
1566#endif
39236c6e 1567 vm_page_insert(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE);
b0d623f7
A
1568 vm_object_unlock(map->pm_obj_pml4);
1569
1570 /*
1571 * Set the page directory entry for this page table.
1572 */
1573 pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */
1574
1575 pmap_store_pte(pml4p, pa_to_pte(pa)
1576 | INTEL_PTE_VALID
1577 | INTEL_PTE_USER
1578 | INTEL_PTE_WRITE);
1579
1580 PMAP_UNLOCK(map);
1581
316670eb 1582 return KERN_SUCCESS;
b0d623f7
A
1583}
1584
316670eb
A
1585kern_return_t
1586pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options)
b0d623f7
A
1587{
1588 vm_page_t m;
1589 pmap_paddr_t pa;
1590 uint64_t i;
1591 ppnum_t pn;
1592 pdpt_entry_t *pdptp;
1593
1594 DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr);
1595
1596 while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) {
316670eb
A
1597 kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options);
1598 if (pep4kr != KERN_SUCCESS)
1599 return pep4kr;
b0d623f7
A
1600 }
1601
1602 /*
1603 * Allocate a VM page for the pdpt page
1604 */
316670eb
A
1605 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1606 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1607 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1608 VM_PAGE_WAIT();
316670eb 1609 }
b0d623f7
A
1610
1611 /*
1612 * put the page into the pmap's obj list so it
1613 * can be found later.
1614 */
1615 pn = m->phys_page;
1616 pa = i386_ptob(pn);
1617 i = pdptidx(map, vaddr);
1618
1619 /*
1620 * Zero the page.
1621 */
1622 pmap_zero_page(pn);
1623
1624 vm_page_lockspin_queues();
1625 vm_page_wire(m);
1626 vm_page_unlock_queues();
1627
1628 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1629 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1630 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1631
1632 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1633 vm_object_lock(map->pm_obj_pdpt);
1634
1635 PMAP_LOCK(map);
1636 /*
1637 * See if someone else expanded us first
1638 */
1639 if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) {
1640 PMAP_UNLOCK(map);
1641 vm_object_unlock(map->pm_obj_pdpt);
1642
1643 VM_PAGE_FREE(m);
1644
1645 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1646 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1647 return KERN_SUCCESS;
b0d623f7
A
1648 }
1649
1650#if 0 /* DEBUG */
39236c6e 1651 if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1652 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
1653 map, map->pm_obj_pdpt, vaddr, i);
1654 }
1655#endif
39236c6e 1656 vm_page_insert(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE);
b0d623f7
A
1657 vm_object_unlock(map->pm_obj_pdpt);
1658
1659 /*
1660 * Set the page directory entry for this page table.
1661 */
1662 pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */
1663
1664 pmap_store_pte(pdptp, pa_to_pte(pa)
1665 | INTEL_PTE_VALID
1666 | INTEL_PTE_USER
1667 | INTEL_PTE_WRITE);
1668
1669 PMAP_UNLOCK(map);
1670
316670eb 1671 return KERN_SUCCESS;
b0d623f7
A
1672
1673}
1674
1675
1676
1677/*
1678 * Routine: pmap_expand
1679 *
1680 * Expands a pmap to be able to map the specified virtual address.
1681 *
1682 * Allocates new virtual memory for the P0 or P1 portion of the
1683 * pmap, then re-maps the physical pages that were in the old
1684 * pmap to be in the new pmap.
1685 *
1686 * Must be called with the pmap system and the pmap unlocked,
1687 * since these must be unlocked to use vm_allocate or vm_deallocate.
1688 * Thus it must be called in a loop that checks whether the map
1689 * has been expanded enough.
1690 * (We won't loop forever, since page tables aren't shrunk.)
1691 */
316670eb 1692kern_return_t
b0d623f7
A
1693pmap_expand(
1694 pmap_t map,
316670eb
A
1695 vm_map_offset_t vaddr,
1696 unsigned int options)
b0d623f7
A
1697{
1698 pt_entry_t *pdp;
1699 register vm_page_t m;
1700 register pmap_paddr_t pa;
1701 uint64_t i;
1702 ppnum_t pn;
1703
1704
1705 /*
1706 * For the kernel, the virtual address must be in or above the basement
1707 * which is for kexts and is in the 512GB immediately below the kernel..
1708 * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT
1709 */
1710 if (map == kernel_pmap &&
1711 !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))
1712 panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr);
1713
1714
1715 while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) {
316670eb
A
1716 kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options);
1717 if (pepkr != KERN_SUCCESS)
1718 return pepkr;
b0d623f7
A
1719 }
1720
1721 /*
1722 * Allocate a VM page for the pde entries.
1723 */
316670eb
A
1724 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
1725 if (options & PMAP_EXPAND_OPTIONS_NOWAIT)
1726 return KERN_RESOURCE_SHORTAGE;
b0d623f7 1727 VM_PAGE_WAIT();
316670eb 1728 }
b0d623f7
A
1729
1730 /*
1731 * put the page into the pmap's obj list so it
1732 * can be found later.
1733 */
1734 pn = m->phys_page;
1735 pa = i386_ptob(pn);
1736 i = pdeidx(map, vaddr);
1737
1738 /*
1739 * Zero the page.
1740 */
1741 pmap_zero_page(pn);
1742
1743 vm_page_lockspin_queues();
1744 vm_page_wire(m);
1745 vm_page_unlock_queues();
1746
1747 OSAddAtomic(1, &inuse_ptepages_count);
6d2010ae 1748 OSAddAtomic64(1, &alloc_ptepages_count);
316670eb 1749 PMAP_ZINFO_PALLOC(map, PAGE_SIZE);
b0d623f7
A
1750
1751 /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */
1752 vm_object_lock(map->pm_obj);
1753
1754 PMAP_LOCK(map);
1755
1756 /*
1757 * See if someone else expanded us first
1758 */
1759 if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) {
1760 PMAP_UNLOCK(map);
1761 vm_object_unlock(map->pm_obj);
1762
1763 VM_PAGE_FREE(m);
1764
1765 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb
A
1766 PMAP_ZINFO_PFREE(map, PAGE_SIZE);
1767 return KERN_SUCCESS;
b0d623f7
A
1768 }
1769
1770#if 0 /* DEBUG */
39236c6e 1771 if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) {
b0d623f7
A
1772 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
1773 map, map->pm_obj, vaddr, i);
1774 }
1775#endif
39236c6e 1776 vm_page_insert(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE);
b0d623f7
A
1777 vm_object_unlock(map->pm_obj);
1778
1779 /*
1780 * Set the page directory entry for this page table.
1781 */
1782 pdp = pmap_pde(map, vaddr);
1783 pmap_store_pte(pdp, pa_to_pte(pa)
1784 | INTEL_PTE_VALID
1785 | INTEL_PTE_USER
1786 | INTEL_PTE_WRITE);
1787
1788 PMAP_UNLOCK(map);
1789
316670eb 1790 return KERN_SUCCESS;
b0d623f7
A
1791}
1792
1793/* On K64 machines with more than 32GB of memory, pmap_steal_memory
1794 * will allocate past the 1GB of pre-expanded virtual kernel area. This
1795 * function allocates all the page tables using memory from the same pool
1796 * that pmap_steal_memory uses, rather than calling vm_page_grab (which
1797 * isn't available yet). */
1798void
6d2010ae
A
1799pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr)
1800{
b0d623f7
A
1801 ppnum_t pn;
1802 pt_entry_t *pte;
1803
1804 PMAP_LOCK(pmap);
1805
1806 if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) {
0b4c1975 1807 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1808 panic("pmap_pre_expand");
1809
1810 pmap_zero_page(pn);
1811
1812 pte = pmap64_pml4(pmap, vaddr);
1813
1814 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1815 | INTEL_PTE_VALID
1816 | INTEL_PTE_USER
1817 | INTEL_PTE_WRITE);
1818 }
1819
1820 if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) {
0b4c1975 1821 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1822 panic("pmap_pre_expand");
1823
1824 pmap_zero_page(pn);
1825
1826 pte = pmap64_pdpt(pmap, vaddr);
1827
1828 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1829 | INTEL_PTE_VALID
1830 | INTEL_PTE_USER
1831 | INTEL_PTE_WRITE);
1832 }
1833
1834 if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) {
0b4c1975 1835 if (!pmap_next_page_hi(&pn))
b0d623f7
A
1836 panic("pmap_pre_expand");
1837
1838 pmap_zero_page(pn);
1839
1840 pte = pmap64_pde(pmap, vaddr);
1841
1842 pmap_store_pte(pte, pa_to_pte(i386_ptob(pn))
1843 | INTEL_PTE_VALID
1844 | INTEL_PTE_USER
1845 | INTEL_PTE_WRITE);
1846 }
1847
1848 PMAP_UNLOCK(pmap);
1849}
1850
1851/*
1852 * pmap_sync_page_data_phys(ppnum_t pa)
1853 *
1854 * Invalidates all of the instruction cache on a physical page and
1855 * pushes any dirty data from the data cache for the same physical page
1856 * Not required in i386.
1857 */
1858void
1859pmap_sync_page_data_phys(__unused ppnum_t pa)
1860{
1861 return;
1862}
1863
1864/*
1865 * pmap_sync_page_attributes_phys(ppnum_t pa)
1866 *
1867 * Write back and invalidate all cachelines on a physical page.
1868 */
1869void
1870pmap_sync_page_attributes_phys(ppnum_t pa)
1871{
1872 cache_flush_page_phys(pa);
1873}
1874
1875
1876
1877#ifdef CURRENTLY_UNUSED_AND_UNTESTED
1878
1879int collect_ref;
1880int collect_unref;
1881
1882/*
1883 * Routine: pmap_collect
1884 * Function:
1885 * Garbage collects the physical map system for
1886 * pages which are no longer used.
1887 * Success need not be guaranteed -- that is, there
1888 * may well be pages which are not referenced, but
1889 * others may be collected.
1890 * Usage:
1891 * Called by the pageout daemon when pages are scarce.
1892 */
1893void
1894pmap_collect(
1895 pmap_t p)
1896{
1897 register pt_entry_t *pdp, *ptp;
1898 pt_entry_t *eptp;
1899 int wired;
1900
1901 if (p == PMAP_NULL)
1902 return;
1903
1904 if (p == kernel_pmap)
1905 return;
1906
1907 /*
1908 * Garbage collect map.
1909 */
1910 PMAP_LOCK(p);
1911
1912 for (pdp = (pt_entry_t *)p->dirbase;
1913 pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)];
1914 pdp++)
1915 {
1916 if (*pdp & INTEL_PTE_VALID) {
1917 if(*pdp & INTEL_PTE_REF) {
1918 pmap_store_pte(pdp, *pdp & ~INTEL_PTE_REF);
1919 collect_ref++;
1920 } else {
1921 collect_unref++;
1922 ptp = pmap_pte(p, pdetova(pdp - (pt_entry_t *)p->dirbase));
1923 eptp = ptp + NPTEPG;
1924
1925 /*
1926 * If the pte page has any wired mappings, we cannot
1927 * free it.
1928 */
1929 wired = 0;
1930 {
1931 register pt_entry_t *ptep;
1932 for (ptep = ptp; ptep < eptp; ptep++) {
1933 if (iswired(*ptep)) {
1934 wired = 1;
1935 break;
1936 }
1937 }
1938 }
1939 if (!wired) {
1940 /*
1941 * Remove the virtual addresses mapped by this pte page.
1942 */
1943 pmap_remove_range(p,
1944 pdetova(pdp - (pt_entry_t *)p->dirbase),
1945 ptp,
1946 eptp);
1947
1948 /*
1949 * Invalidate the page directory pointer.
1950 */
1951 pmap_store_pte(pdp, 0x0);
1952
1953 PMAP_UNLOCK(p);
1954
1955 /*
1956 * And free the pte page itself.
1957 */
1958 {
1959 register vm_page_t m;
1960
1961 vm_object_lock(p->pm_obj);
1962
39236c6e 1963 m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE);
b0d623f7
A
1964 if (m == VM_PAGE_NULL)
1965 panic("pmap_collect: pte page not in object");
1966
6d2010ae
A
1967 vm_object_unlock(p->pm_obj);
1968
b0d623f7
A
1969 VM_PAGE_FREE(m);
1970
1971 OSAddAtomic(-1, &inuse_ptepages_count);
316670eb 1972 PMAP_ZINFO_PFREE(p, PAGE_SIZE);
b0d623f7
A
1973 }
1974
1975 PMAP_LOCK(p);
1976 }
1977 }
1978 }
1979 }
1980
1981 PMAP_UPDATE_TLBS(p, 0x0, 0xFFFFFFFFFFFFF000ULL);
1982 PMAP_UNLOCK(p);
1983 return;
1984
1985}
1986#endif
1987
1988
1989void
1990pmap_copy_page(ppnum_t src, ppnum_t dst)
1991{
1992 bcopy_phys((addr64_t)i386_ptob(src),
1993 (addr64_t)i386_ptob(dst),
1994 PAGE_SIZE);
1995}
1996
1997
1998/*
1999 * Routine: pmap_pageable
2000 * Function:
2001 * Make the specified pages (by pmap, offset)
2002 * pageable (or not) as requested.
2003 *
2004 * A page which is not pageable may not take
2005 * a fault; therefore, its page table entry
2006 * must remain valid for the duration.
2007 *
2008 * This routine is merely advisory; pmap_enter
2009 * will specify that these pages are to be wired
2010 * down (or not) as appropriate.
2011 */
2012void
2013pmap_pageable(
2014 __unused pmap_t pmap,
2015 __unused vm_map_offset_t start_addr,
2016 __unused vm_map_offset_t end_addr,
2017 __unused boolean_t pageable)
2018{
2019#ifdef lint
2020 pmap++; start_addr++; end_addr++; pageable++;
2021#endif /* lint */
2022}
2023
b0d623f7
A
2024void
2025invalidate_icache(__unused vm_offset_t addr,
2026 __unused unsigned cnt,
2027 __unused int phys)
2028{
2029 return;
2030}
2031
2032void
2033flush_dcache(__unused vm_offset_t addr,
2034 __unused unsigned count,
2035 __unused int phys)
2036{
2037 return;
2038}
2039
2040#if CONFIG_DTRACE
2041/*
2042 * Constrain DTrace copyin/copyout actions
2043 */
2044extern kern_return_t dtrace_copyio_preflight(addr64_t);
2045extern kern_return_t dtrace_copyio_postflight(addr64_t);
2046
2047kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
2048{
2049 thread_t thread = current_thread();
6d2010ae 2050 uint64_t ccr3;
b0d623f7
A
2051 if (current_map() == kernel_map)
2052 return KERN_FAILURE;
6d2010ae
A
2053 else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE))
2054 return KERN_FAILURE;
2055 else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3))
b0d623f7 2056 return KERN_FAILURE;
b0d623f7
A
2057 else
2058 return KERN_SUCCESS;
2059}
2060
2061kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
2062{
2063 return KERN_SUCCESS;
2064}
2065#endif /* CONFIG_DTRACE */
2066
2067#include <mach_vm_debug.h>
2068#if MACH_VM_DEBUG
2069#include <vm/vm_debug.h>
2070
2071int
2072pmap_list_resident_pages(
2073 __unused pmap_t pmap,
2074 __unused vm_offset_t *listp,
2075 __unused int space)
2076{
2077 return 0;
2078}
2079#endif /* MACH_VM_DEBUG */
2080
2081
2082
2083/* temporary workaround */
2084boolean_t
2085coredumpok(__unused vm_map_t map, __unused vm_offset_t va)
2086{
2087#if 0
2088 pt_entry_t *ptep;
2089
2090 ptep = pmap_pte(map->pmap, va);
2091 if (0 == ptep)
2092 return FALSE;
2093 return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED));
2094#else
2095 return TRUE;
2096#endif
2097}
2098
2099
2100boolean_t
2101phys_page_exists(ppnum_t pn)
2102{
2103 assert(pn != vm_page_fictitious_addr);
2104
2105 if (!pmap_initialized)
2106 return TRUE;
2107
2108 if (pn == vm_page_guard_addr)
2109 return FALSE;
2110
2111 if (!IS_MANAGED_PAGE(ppn_to_pai(pn)))
2112 return FALSE;
2113
2114 return TRUE;
2115}
2116
6d2010ae
A
2117
2118
b0d623f7
A
2119void
2120pmap_switch(pmap_t tpmap)
2121{
2122 spl_t s;
2123
2124 s = splhigh(); /* Make sure interruptions are disabled */
2125 set_dirbase(tpmap, current_thread());
2126 splx(s);
2127}
2128
2129
2130/*
2131 * disable no-execute capability on
2132 * the specified pmap
2133 */
2134void
2135pmap_disable_NX(pmap_t pmap)
2136{
2137 pmap->nx_enabled = 0;
2138}
2139
6d2010ae
A
2140void
2141pt_fake_zone_init(int zone_index)
2142{
2143 pt_fake_zone_index = zone_index;
2144}
2145
b0d623f7
A
2146void
2147pt_fake_zone_info(
2148 int *count,
2149 vm_size_t *cur_size,
2150 vm_size_t *max_size,
2151 vm_size_t *elem_size,
2152 vm_size_t *alloc_size,
6d2010ae 2153 uint64_t *sum_size,
b0d623f7 2154 int *collectable,
6d2010ae
A
2155 int *exhaustable,
2156 int *caller_acct)
b0d623f7
A
2157{
2158 *count = inuse_ptepages_count;
2159 *cur_size = PAGE_SIZE * inuse_ptepages_count;
2160 *max_size = PAGE_SIZE * (inuse_ptepages_count +
2161 vm_page_inactive_count +
2162 vm_page_active_count +
2163 vm_page_free_count);
2164 *elem_size = PAGE_SIZE;
2165 *alloc_size = PAGE_SIZE;
6d2010ae 2166 *sum_size = alloc_ptepages_count * PAGE_SIZE;
b0d623f7
A
2167
2168 *collectable = 1;
2169 *exhaustable = 0;
6d2010ae 2170 *caller_acct = 1;
b0d623f7
A
2171}
2172
2173static inline void
2174pmap_cpuset_NMIPI(cpu_set cpu_mask) {
2175 unsigned int cpu, cpu_bit;
2176 uint64_t deadline;
2177
2178 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2179 if (cpu_mask & cpu_bit)
2180 cpu_NMI_interrupt(cpu);
2181 }
2182 deadline = mach_absolute_time() + (LockTimeOut);
2183 while (mach_absolute_time() < deadline)
2184 cpu_pause();
2185}
2186
39236c6e
A
2187
2188void
2189pmap_flush_context_init(pmap_flush_context *pfc)
2190{
2191 pfc->pfc_cpus = 0;
2192 pfc->pfc_invalid_global = 0;
2193}
2194
2195void
2196pmap_flush(
2197 pmap_flush_context *pfc)
2198{
2199 unsigned int my_cpu;
2200 unsigned int cpu;
2201 unsigned int cpu_bit;
2202 cpu_set cpus_to_respond = 0;
2203 cpu_set cpus_to_signal = 0;
2204 cpu_set cpus_signaled = 0;
2205 boolean_t flush_self = FALSE;
2206 uint64_t deadline;
2207
2208 mp_disable_preemption();
2209
2210 my_cpu = cpu_number();
2211 cpus_to_signal = pfc->pfc_cpus;
2212
2213 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START,
2214 NULL, cpus_to_signal, 0, 0, 0);
2215
2216 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) {
2217
2218 if (cpus_to_signal & cpu_bit) {
2219
2220 cpus_to_signal &= ~cpu_bit;
2221
2222 if (!cpu_datap(cpu)->cpu_running)
2223 continue;
2224
2225 if (pfc->pfc_invalid_global & cpu_bit)
2226 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2227 else
2228 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
2229 mfence();
2230
2231 if (cpu == my_cpu) {
2232 flush_self = TRUE;
2233 continue;
2234 }
2235 if (CPU_CR3_IS_ACTIVE(cpu)) {
2236 cpus_to_respond |= cpu_bit;
2237 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2238 }
2239 }
2240 }
2241 cpus_signaled = cpus_to_respond;
2242
2243 /*
2244 * Flush local tlb if required.
2245 * Do this now to overlap with other processors responding.
2246 */
2247 if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE)
2248 process_pmap_updates();
2249
2250 if (cpus_to_respond) {
2251
2252 deadline = mach_absolute_time() + LockTimeOut;
2253 /*
2254 * Wait for those other cpus to acknowledge
2255 */
2256 while (cpus_to_respond != 0) {
2257 long orig_acks = 0;
2258
2259 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2260 /* Consider checking local/global invalidity
2261 * as appropriate in the PCID case.
2262 */
2263 if ((cpus_to_respond & cpu_bit) != 0) {
2264 if (!cpu_datap(cpu)->cpu_running ||
2265 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2266 !CPU_CR3_IS_ACTIVE(cpu)) {
2267 cpus_to_respond &= ~cpu_bit;
2268 }
2269 cpu_pause();
2270 }
2271 if (cpus_to_respond == 0)
2272 break;
2273 }
2274 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
2275 if (machine_timeout_suspended())
2276 continue;
2277 pmap_tlb_flush_timeout = TRUE;
2278 orig_acks = NMIPI_acks;
2279 pmap_cpuset_NMIPI(cpus_to_respond);
2280
2281 panic("TLB invalidation IPI timeout: "
2282 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
2283 cpus_to_respond, orig_acks, NMIPI_acks);
2284 }
2285 }
2286 }
2287 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END,
2288 NULL, cpus_signaled, flush_self, 0, 0);
2289
2290 mp_enable_preemption();
2291}
2292
2293
b0d623f7
A
2294/*
2295 * Called with pmap locked, we:
2296 * - scan through per-cpu data to see which other cpus need to flush
2297 * - send an IPI to each non-idle cpu to be flushed
2298 * - wait for all to signal back that they are inactive or we see that
2299 * they are at a safe point (idle).
2300 * - flush the local tlb if active for this pmap
2301 * - return ... the caller will unlock the pmap
2302 */
6d2010ae 2303
b0d623f7 2304void
39236c6e 2305pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc)
b0d623f7
A
2306{
2307 unsigned int cpu;
2308 unsigned int cpu_bit;
2309 cpu_set cpus_to_signal;
2310 unsigned int my_cpu = cpu_number();
2311 pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
2312 boolean_t flush_self = FALSE;
2313 uint64_t deadline;
6d2010ae 2314 boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap));
39236c6e 2315 boolean_t need_global_flush = FALSE;
b0d623f7
A
2316
2317 assert((processor_avail_count < 2) ||
2318 (ml_get_interrupts_enabled() && get_preemption_level() != 0));
2319
2320 /*
2321 * Scan other cpus for matching active or task CR3.
2322 * For idle cpus (with no active map) we mark them invalid but
2323 * don't signal -- they'll check as they go busy.
2324 */
2325 cpus_to_signal = 0;
6d2010ae
A
2326
2327 if (pmap_pcid_ncpus) {
39236c6e
A
2328 if (pmap_is_shared)
2329 need_global_flush = TRUE;
6d2010ae 2330 pmap_pcid_invalidate_all_cpus(pmap);
39236c6e 2331 mfence();
6d2010ae 2332 }
b0d623f7
A
2333 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
2334 if (!cpu_datap(cpu)->cpu_running)
2335 continue;
2336 uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu);
2337 uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu);
2338
2339 if ((pmap_cr3 == cpu_task_cr3) ||
2340 (pmap_cr3 == cpu_active_cr3) ||
6d2010ae 2341 (pmap_is_shared)) {
39236c6e
A
2342
2343 if (options & PMAP_DELAY_TLB_FLUSH) {
2344 if (need_global_flush == TRUE)
2345 pfc->pfc_invalid_global |= cpu_bit;
2346 pfc->pfc_cpus |= cpu_bit;
2347
2348 continue;
2349 }
b0d623f7
A
2350 if (cpu == my_cpu) {
2351 flush_self = TRUE;
2352 continue;
2353 }
39236c6e 2354 if (need_global_flush == TRUE)
6d2010ae
A
2355 cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE;
2356 else
2357 cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE;
39236c6e 2358 mfence();
b0d623f7
A
2359
2360 /*
2361 * We don't need to signal processors which will flush
2362 * lazily at the idle state or kernel boundary.
2363 * For example, if we're invalidating the kernel pmap,
2364 * processors currently in userspace don't need to flush
2365 * their TLBs until the next time they enter the kernel.
2366 * Alterations to the address space of a task active
2367 * on a remote processor result in a signal, to
2368 * account for copy operations. (There may be room
2369 * for optimization in such cases).
2370 * The order of the loads below with respect
2371 * to the store to the "cpu_tlb_invalid" field above
2372 * is important--hence the barrier.
2373 */
2374 if (CPU_CR3_IS_ACTIVE(cpu) &&
2375 (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) ||
39236c6e
A
2376 pmap->pm_shared ||
2377 (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) {
b0d623f7
A
2378 cpus_to_signal |= cpu_bit;
2379 i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC);
2380 }
2381 }
2382 }
39236c6e
A
2383 if ((options & PMAP_DELAY_TLB_FLUSH))
2384 return;
b0d623f7 2385
39236c6e
A
2386 if (pmap == kernel_pmap) {
2387 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_KERN_TLBS) | DBG_FUNC_START,
2388 pmap, cpus_to_signal, flush_self, startv, endv);
2389 } else {
2390 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_START,
2391 pmap, cpus_to_signal, flush_self, startv, endv);
2392 }
b0d623f7
A
2393 /*
2394 * Flush local tlb if required.
2395 * Do this now to overlap with other processors responding.
2396 */
6d2010ae
A
2397 if (flush_self) {
2398 if (pmap_pcid_ncpus) {
2399 pmap_pcid_validate_cpu(pmap, my_cpu);
2400 if (pmap_is_shared)
2401 tlb_flush_global();
2402 else
2403 flush_tlb_raw();
2404 }
2405 else
2406 flush_tlb_raw();
2407 }
b0d623f7
A
2408
2409 if (cpus_to_signal) {
2410 cpu_set cpus_to_respond = cpus_to_signal;
2411
2412 deadline = mach_absolute_time() + LockTimeOut;
2413 /*
2414 * Wait for those other cpus to acknowledge
2415 */
2416 while (cpus_to_respond != 0) {
060df5ea 2417 long orig_acks = 0;
b0d623f7
A
2418
2419 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
6d2010ae
A
2420 /* Consider checking local/global invalidity
2421 * as appropriate in the PCID case.
2422 */
b0d623f7
A
2423 if ((cpus_to_respond & cpu_bit) != 0) {
2424 if (!cpu_datap(cpu)->cpu_running ||
2425 cpu_datap(cpu)->cpu_tlb_invalid == FALSE ||
2426 !CPU_CR3_IS_ACTIVE(cpu)) {
2427 cpus_to_respond &= ~cpu_bit;
2428 }
2429 cpu_pause();
2430 }
2431 if (cpus_to_respond == 0)
2432 break;
2433 }
6d2010ae 2434 if (cpus_to_respond && (mach_absolute_time() > deadline)) {
060df5ea
A
2435 if (machine_timeout_suspended())
2436 continue;
2437 pmap_tlb_flush_timeout = TRUE;
2438 orig_acks = NMIPI_acks;
2439 pmap_cpuset_NMIPI(cpus_to_respond);
2440
2441 panic("TLB invalidation IPI timeout: "
2442 "CPU(s) failed to respond to interrupts, unresponsive CPU bitmap: 0x%lx, NMIPI acks: orig: 0x%lx, now: 0x%lx",
2443 cpus_to_respond, orig_acks, NMIPI_acks);
2444 }
b0d623f7
A
2445 }
2446 }
2447
316670eb 2448 if (__improbable((pmap == kernel_pmap) && (flush_self != TRUE))) {
39236c6e
A
2449 panic("pmap_flush_tlbs: pmap == kernel_pmap && flush_self != TRUE; kernel CR3: 0x%llX, pmap_cr3: 0x%llx, CPU active CR3: 0x%llX, CPU Task Map: %d", kernel_pmap->pm_cr3, pmap_cr3, current_cpu_datap()->cpu_active_cr3, current_cpu_datap()->cpu_task_map);
2450 }
2451
2452 if (pmap == kernel_pmap) {
2453 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_KERN_TLBS) | DBG_FUNC_END,
2454 pmap, cpus_to_signal, startv, endv, 0);
2455 } else {
2456 PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS) | DBG_FUNC_END,
2457 pmap, cpus_to_signal, startv, endv, 0);
316670eb
A
2458 }
2459
b0d623f7
A
2460}
2461
2462void
2463process_pmap_updates(void)
2464{
6d2010ae
A
2465 int ccpu = cpu_number();
2466 pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
2467 if (pmap_pcid_ncpus) {
2468 pmap_pcid_validate_current();
2469 if (cpu_datap(ccpu)->cpu_tlb_invalid_global) {
2470 cpu_datap(ccpu)->cpu_tlb_invalid = FALSE;
2471 tlb_flush_global();
2472 }
2473 else {
2474 cpu_datap(ccpu)->cpu_tlb_invalid_local = FALSE;
2475 flush_tlb_raw();
2476 }
2477 }
2478 else {
2479 current_cpu_datap()->cpu_tlb_invalid = FALSE;
2480 flush_tlb_raw();
2481 }
b0d623f7 2482
39236c6e 2483 mfence();
b0d623f7
A
2484}
2485
2486void
2487pmap_update_interrupt(void)
2488{
2489 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START,
2490 0, 0, 0, 0, 0);
2491
39236c6e
A
2492 if (current_cpu_datap()->cpu_tlb_invalid)
2493 process_pmap_updates();
b0d623f7
A
2494
2495 PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END,
2496 0, 0, 0, 0, 0);
2497}
316670eb
A
2498
2499#include <mach/mach_vm.h> /* mach_vm_region_recurse() */
2500/* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries
2501 * and identify ranges with mismatched VM permissions and PTE permissions
2502 */
2503kern_return_t
2504pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) {
2505 vm_offset_t cv = sv;
2506 kern_return_t rv = KERN_SUCCESS;
2507 uint64_t skip4 = 0, skip2 = 0;
2508
2509 sv &= ~PAGE_MASK_64;
2510 ev &= ~PAGE_MASK_64;
2511 while (cv < ev) {
2512 if (__improbable((cv > 0x00007FFFFFFFFFFFULL) &&
2513 (cv < 0xFFFF800000000000ULL))) {
2514 cv = 0xFFFF800000000000ULL;
2515 }
2516 /* Potential inconsistencies from not holding pmap lock
2517 * but harmless for the moment.
2518 */
2519 if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) {
2520 if ((cv + NBPML4) > cv)
2521 cv += NBPML4;
2522 else
2523 break;
2524 skip4++;
2525 continue;
2526 }
2527 if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) {
2528 if ((cv + NBPD) > cv)
2529 cv += NBPD;
2530 else
2531 break;
2532 skip2++;
2533 continue;
2534 }
2535
2536 pt_entry_t *ptep = pmap_pte(ipmap, cv);
2537 if (ptep && (*ptep & INTEL_PTE_VALID)) {
2538 if (*ptep & INTEL_PTE_WRITE) {
2539 if (!(*ptep & INTEL_PTE_NX)) {
2540 kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep)))));
2541 rv = KERN_FAILURE;
2542 }
2543 }
2544 }
2545 cv += PAGE_SIZE;
2546 }
2547 kprintf("Completed pmap scan\n");
2548 cv = sv;
2549
2550 struct vm_region_submap_info_64 vbr;
2551 mach_msg_type_number_t vbrcount = 0;
2552 mach_vm_size_t vmsize;
2553 vm_prot_t prot;
2554 uint32_t nesting_depth = 0;
2555 kern_return_t kret;
2556
2557 while (cv < ev) {
2558
2559 for (;;) {
2560 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
2561 if((kret = mach_vm_region_recurse(ivmmap,
2562 (mach_vm_address_t *) &cv, &vmsize, &nesting_depth,
2563 (vm_region_recurse_info_t)&vbr,
2564 &vbrcount)) != KERN_SUCCESS) {
2565 break;
2566 }
2567
2568 if(vbr.is_submap) {
2569 nesting_depth++;
2570 continue;
2571 } else {
2572 break;
2573 }
2574 }
2575
2576 if(kret != KERN_SUCCESS)
2577 break;
2578
2579 prot = vbr.protection;
2580
2581 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
2582 kprintf("W+X map entry at address 0x%lx\n", cv);
2583 rv = KERN_FAILURE;
2584 }
2585
2586 if (prot) {
2587 vm_offset_t pcv;
2588 for (pcv = cv; pcv < cv + vmsize; pcv += PAGE_SIZE) {
2589 pt_entry_t *ptep = pmap_pte(ipmap, pcv);
2590 vm_prot_t tprot;
2591
2592 if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID))
2593 continue;
2594 tprot = VM_PROT_READ;
2595 if (*ptep & INTEL_PTE_WRITE)
2596 tprot |= VM_PROT_WRITE;
2597 if ((*ptep & INTEL_PTE_NX) == 0)
2598 tprot |= VM_PROT_EXECUTE;
2599 if (tprot != prot) {
2600 kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot);
2601 rv = KERN_FAILURE;
2602 }
2603 }
2604 }
2605 cv += vmsize;
2606 }
2607 return rv;
2608}