+/*
+ * Called once VM is fully initialized so that we can release unused
+ * sections of low memory to the general pool.
+ * Also complete the set-up of identity-mapped sections of the kernel:
+ * 1) write-protect kernel text
+ * 2) map kernel text using large pages if possible
+ * 3) read and write-protect page zero (for K32)
+ * 4) map the global page at the appropriate virtual address.
+ *
+ * Use of large pages
+ * ------------------
+ * To effectively map and write-protect all kernel text pages, the text
+ * must be 2M-aligned at the base, and the data section above must also be
+ * 2M-aligned. That is, there's padding below and above. This is achieved
+ * through linker directives. Large pages are used only if this alignment
+ * exists (and not overriden by the -kernel_text_page_4K boot-arg). The
+ * memory layout is:
+ *
+ * : :
+ * | __DATA |
+ * sdata: ================== 2Meg
+ * | |
+ * | zero-padding |
+ * | |
+ * etext: ------------------
+ * | |
+ * : :
+ * | |
+ * | __TEXT |
+ * | |
+ * : :
+ * | |
+ * stext: ================== 2Meg
+ * | |
+ * | zero-padding |
+ * | |
+ * eHIB: ------------------
+ * | __HIB |
+ * : :
+ *
+ * Prior to changing the mapping from 4K to 2M, the zero-padding pages
+ * [eHIB,stext] and [etext,sdata] are ml_static_mfree()'d. Then all the
+ * 4K pages covering [stext,etext] are coalesced as 2M large pages.
+ * The now unused level-1 PTE pages are also freed.
+ */
+extern uint32_t pmap_reserved_ranges;
+void
+pmap_lowmem_finalize(void)
+{
+ spl_t spl;
+ int i;
+
+ /* Check the kernel is linked at the expected base address */
+ if (i386_btop(kvtophys((vm_offset_t) &IdlePML4)) !=
+ I386_KERNEL_IMAGE_BASE_PAGE)
+ panic("pmap_lowmem_finalize() unexpected kernel base address");
+
+ /*
+ * Update wired memory statistics for early boot pages
+ */
+ PMAP_ZINFO_PALLOC(bootstrap_wired_pages * PAGE_SIZE);
+
+ /*
+ * Free all pages in pmap regions below the base:
+ * rdar://6332712
+ * We can't free all the pages to VM that EFI reports available.
+ * Pages in the range 0xc0000-0xff000 aren't safe over sleep/wake.
+ * There's also a size miscalculation here: pend is one page less
+ * than it should be but this is not fixed to be backwards
+ * compatible.
+ * Due to this current EFI limitation, we take only the first
+ * entry in the memory region table. However, the loop is retained
+ * (with the intended termination criteria commented out) in the
+ * hope that some day we can free all low-memory ranges.
+ */
+ for (i = 0;
+// pmap_memory_regions[i].end <= I386_KERNEL_IMAGE_BASE_PAGE;
+ i < 1 && (pmap_reserved_ranges == 0);
+ i++) {
+ vm_offset_t pbase = (vm_offset_t)i386_ptob(pmap_memory_regions[i].base);
+ vm_offset_t pend = (vm_offset_t)i386_ptob(pmap_memory_regions[i].end);
+// vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1);
+
+ DBG("ml_static_mfree(%p,%p) for pmap region %d\n",
+ (void *) ml_static_ptovirt(pbase),
+ (void *) (pend - pbase), i);
+ ml_static_mfree(ml_static_ptovirt(pbase), pend - pbase);
+ }
+
+ /*
+ * If text and data are both 2MB-aligned,
+ * we can map text with large-pages,
+ * unless the -kernel_text_ps_4K boot-arg overrides.
+ */
+ if ((stext & I386_LPGMASK) == 0 && (sdata & I386_LPGMASK) == 0) {
+ kprintf("Kernel text is 2MB aligned");
+ kernel_text_ps_4K = FALSE;
+ if (PE_parse_boot_argn("-kernel_text_ps_4K",
+ &kernel_text_ps_4K,
+ sizeof (kernel_text_ps_4K)))
+ kprintf(" but will be mapped with 4K pages\n");
+ else
+ kprintf(" and will be mapped with 2M pages\n");
+ }
+
+ (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel));
+ if (wpkernel)
+ kprintf("Kernel text %p-%p to be write-protected\n",
+ (void *) stext, (void *) etext);
+
+ spl = splhigh();
+
+ /*
+ * Scan over text if mappings are to be changed:
+ * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0
+ * - Change to large-pages if possible and not overriden.
+ */
+ if (kernel_text_ps_4K && wpkernel) {
+ vm_offset_t myva;
+ for (myva = stext; myva < etext; myva += PAGE_SIZE) {
+ pt_entry_t *ptep;
+
+ ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+ if (ptep)
+ pmap_store_pte(ptep, *ptep & ~INTEL_PTE_RW);
+ }
+ }
+
+ if (!kernel_text_ps_4K) {
+ vm_offset_t myva;
+
+ /*
+ * Release zero-filled page padding used for 2M-alignment.
+ */
+ DBG("ml_static_mfree(%p,%p) for padding below text\n",
+ (void *) eHIB, (void *) (stext - eHIB));
+ ml_static_mfree(eHIB, stext - eHIB);
+ DBG("ml_static_mfree(%p,%p) for padding above text\n",
+ (void *) etext, (void *) (sdata - etext));
+ ml_static_mfree(etext, sdata - etext);
+
+ /*
+ * Coalesce text pages into large pages.
+ */
+ for (myva = stext; myva < sdata; myva += I386_LPGBYTES) {
+ pt_entry_t *ptep;
+ vm_offset_t pte_phys;
+ pt_entry_t *pdep;
+ pt_entry_t pde;
+
+ pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva);
+ ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva);
+ DBG("myva: %p pdep: %p ptep: %p\n",
+ (void *) myva, (void *) pdep, (void *) ptep);
+ if ((*ptep & INTEL_PTE_VALID) == 0)
+ continue;
+ pte_phys = (vm_offset_t)(*ptep & PG_FRAME);
+ pde = *pdep & PTMASK; /* page attributes from pde */
+ pde |= INTEL_PTE_PS; /* make it a 2M entry */
+ pde |= pte_phys; /* take page frame from pte */
+
+ if (wpkernel)
+ pde &= ~INTEL_PTE_RW;
+ DBG("pmap_store_pte(%p,0x%llx)\n",
+ (void *)pdep, pde);
+ pmap_store_pte(pdep, pde);
+
+ /*
+ * Free the now-unused level-1 pte.
+ * Note: ptep is a virtual address to the pte in the
+ * recursive map. We can't use this address to free
+ * the page. Instead we need to compute its address
+ * in the Idle PTEs in "low memory".
+ */
+ vm_offset_t vm_ptep = (vm_offset_t) KPTphys
+ + (pte_phys >> PTPGSHIFT);
+ DBG("ml_static_mfree(%p,0x%x) for pte\n",
+ (void *) vm_ptep, PAGE_SIZE);
+ ml_static_mfree(vm_ptep, PAGE_SIZE);
+ }
+
+ /* Change variable read by sysctl machdep.pmap */
+ pmap_kernel_text_ps = I386_LPGBYTES;
+ }
+
+ /* map lowmem global page into fixed addr */
+ pt_entry_t *pte = NULL;
+ if (0 == (pte = pmap_pte(kernel_pmap,
+ VM_MIN_KERNEL_LOADED_ADDRESS + 0x2000)))
+ panic("lowmem pte");
+ /* make sure it is defined on page boundary */
+ assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK));
+ pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
+ | INTEL_PTE_REF
+ | INTEL_PTE_MOD
+ | INTEL_PTE_WIRED
+ | INTEL_PTE_VALID
+ | INTEL_PTE_RW);
+ splx(spl);
+ if (pmap_pcid_ncpus)
+ tlb_flush_global();
+ else
+ flush_tlb_raw();
+}