/*
- * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <i386/cpuid.h>
#include <mach/thread_status.h>
#include <pexpert/i386/efi.h>
+#include <pexpert/pexpert.h>
#include <i386/i386_lowmem.h>
+#include <i386/misc_protos.h>
#include <x86_64/lowglobals.h>
#include <i386/pal_routines.h>
#include <mach-o/loader.h>
#include <libkern/kernel_mach_header.h>
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
vm_size_t mem_size = 0;
pmap_paddr_t first_avail = 0;/* first after page tables */
-uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
+uint64_t max_mem; /* Size of physical memory minus carveouts (bytes), adjusted by maxmem */
+uint64_t max_mem_actual; /* Actual size of physical memory (bytes) adjusted by
+ * the maxmem boot-arg */
uint64_t mem_actual;
uint64_t sane_size = 0; /* Memory size for defaults calculations */
#define MAXLORESERVE (32 * 1024 * 1024)
ppnum_t max_ppnum = 0;
-ppnum_t lowest_lo = 0;
-ppnum_t lowest_hi = 0;
-ppnum_t highest_hi = 0;
+
+/*
+ * pmap_high_used* are the highest range of physical memory used for kernel
+ * internals (page tables, vm_pages) via pmap_steal_memory() that don't
+ * need to be encrypted in hibernation images. There can be one gap in
+ * the middle of this due to fragmentation when using a mix of small
+ * and large pages. In that case, the fragment lives between the high
+ * and middle ranges.
+ */
+ppnum_t pmap_high_used_top = 0;
+ppnum_t pmap_high_used_bottom = 0;
+ppnum_t pmap_middle_used_top = 0;
+ppnum_t pmap_middle_used_bottom = 0;
enum {PMAP_MAX_RESERVED_RANGES = 32};
uint32_t pmap_reserved_pages_allocated = 0;
*/
extern void *last_kernel_symbol;
+#define LG_PPNUM_PAGES (I386_LPGBYTES >> PAGE_SHIFT)
+#define LG_PPNUM_MASK (I386_LPGMASK >> PAGE_SHIFT)
+
+/* set so no region large page fragment pages exist */
+#define RESET_FRAG(r) (((r)->alloc_frag_up = 1), ((r)->alloc_frag_down = 0))
+
boolean_t memmap = FALSE;
#if DEBUG || DEVELOPMENT
static void
addr64_t efi_start, efi_end;
for (j = 0; j < pmap_memory_region_count; j++, p++) {
- kprintf("pmap region %d type %d base 0x%llx alloc_up 0x%llx alloc_down 0x%llx top 0x%llx\n",
+ kprintf("pmap region %d type %d base 0x%llx alloc_up 0x%llx alloc_down 0x%llx"
+ " alloc_frag_up 0x%llx alloc_frag_down 0x%llx top 0x%llx\n",
j, p->type,
(addr64_t) p->base << I386_PGSHIFT,
(addr64_t) p->alloc_up << I386_PGSHIFT,
(addr64_t) p->alloc_down << I386_PGSHIFT,
+ (addr64_t) p->alloc_frag_up << I386_PGSHIFT,
+ (addr64_t) p->alloc_frag_down << I386_PGSHIFT,
(addr64_t) p->end << I386_PGSHIFT);
region_start = (addr64_t) p->base << I386_PGSHIFT;
region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1;
segDATA = getsegbynamefromheader(&_mh_execute_header,
"__DATA");
segCONST = getsegbynamefromheader(&_mh_execute_header,
- "__CONST");
+ "__DATA_CONST");
cursectTEXT = lastsectTEXT = firstsect(segTEXT);
/* Discover the last TEXT section within the TEXT segment */
while ((cursectTEXT = nextsect(segTEXT, cursectTEXT)) != NULL) {
segSizeConst = segCONST->vmsize;
econst = sconst + segSizeConst;
- assert(((sconst | econst) & PAGE_MASK) == 0);
+ kc_format_t kc_format = KCFormatUnknown;
+
+ /* XXX: FIXME_IN_dyld: For new-style kernel caches, the ending address of __DATA_CONST may not be page-aligned */
+ if (PE_get_primary_kc_format(&kc_format) && kc_format == KCFormatFileset) {
+ /* Round up the end */
+ econst = P2ROUNDUP(econst, PAGE_SIZE);
+ edata = P2ROUNDUP(edata, PAGE_SIZE);
+ } else {
+ assert(((sconst | econst) & PAGE_MASK) == 0);
+ assert(((sdata | edata) & PAGE_MASK) == 0);
+ }
DPRINTF("segTEXTB = %p\n", (void *) segTEXTB);
DPRINTF("segDATAB = %p\n", (void *) segDATAB);
vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
vm_slinkedit = segLINKB;
vm_elinkedit = segLINKB + segSizeLINK;
+
+ /*
+ * In the fileset world, we want to be able to (un)slide addresses from
+ * the kernel or any of the kexts (e.g., for kernel logging metadata
+ * passed between the kernel and logd in userspace). VM_KERNEL_UNSLIDE
+ * (via VM_KERNEL_IS_SLID) should apply to the addresses in the range
+ * from the first basement address to the last boot kc address.
+ *
+ * ^
+ * :
+ * |
+ * vm_kernel_slid_top - ---------------------------------------------
+ * |
+ * :
+ * : Boot kc (kexts in the boot kc here)
+ * : - - - - - - - - - - - - - - - - - - - - - - -
+ * :
+ * :
+ * | Boot kc (kernel here)
+ * - ---------------------------------------------
+ * |
+ * :
+ * | Basement (kexts in pageable and aux kcs here)
+ * vm_kernel_slid_base - ---------------------------------------------
+ * 0
+ */
+
vm_kernel_slid_base = vm_kext_base + vm_kernel_slide;
- vm_kernel_slid_top = vm_prelink_einfo;
+ vm_kernel_slid_top = (kc_format == KCFormatFileset) ?
+ vm_slinkedit : vm_prelink_einfo;
+
+ vm_page_kernelcache_count = (unsigned int) (atop_64(vm_kernel_top - vm_kernel_base));
vm_set_page_size();
(top < vm_kernel_base_page)) {
pmptr->alloc_up = pmptr->base;
pmptr->alloc_down = pmptr->end;
+ RESET_FRAG(pmptr);
pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
} else {
/*
*/
pmptr->alloc_up = top + 1;
pmptr->alloc_down = top;
+ RESET_FRAG(pmptr);
}
pmptr->type = pmap_type;
pmptr->attribute = mptr->Attribute;
pmptr->end = (fap - 1);
pmptr->alloc_up = pmptr->end + 1;
pmptr->alloc_down = pmptr->end;
+ RESET_FRAG(pmptr);
pmptr->type = pmap_type;
pmptr->attribute = mptr->Attribute;
/*
pmptr->type = pmap_type;
pmptr->attribute = mptr->Attribute;
pmptr->alloc_down = pmptr->end = top;
+ RESET_FRAG(pmptr);
if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
pmptr->type = pmap_type;
pmptr->attribute = mptr->Attribute;
pmptr->alloc_down = pmptr->end = top;
+ RESET_FRAG(pmptr);
if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
}
(pmptr->base == (prev_pmptr->end + 1))) {
prev_pmptr->end = pmptr->end;
prev_pmptr->alloc_down = pmptr->alloc_down;
+ RESET_FRAG(pmptr);
} else {
pmap_memory_region_count++;
prev_pmptr = pmptr;
sane_size = mem_actual;
/*
- * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64).
+ * We cap at KERNEL_MAXMEM bytes (currently 1536GB).
* Unless overriden by the maxmem= boot-arg
* -- which is a non-zero maxmem argument to this function.
*/
if (pages_to_use == 0) {
pmap_memory_regions[cur_region].end = cur_end;
pmap_memory_regions[cur_region].alloc_down = cur_end;
+ RESET_FRAG(&pmap_memory_regions[cur_region]);
}
cur_region++;
mem_size = (vm_size_t)sane_size;
}
max_mem = sane_size;
+ max_mem_actual = sane_size;
kprintf("Physical memory %llu MB\n", sane_size / MB);
return (unsigned int)avail_remaining;
}
-
boolean_t pmap_next_page_reserved(ppnum_t *);
/*
* Pick a page from a "kernel private" reserved range; works around
- * errata on some hardware.
+ * errata on some hardware. EFI marks pages which can't be used for
+ * certain kinds of I/O-ish activities as reserved. We reserve them for
+ * kernel internal usage and prevent them from ever going on regular
+ * free list.
*/
boolean_t
-pmap_next_page_reserved(ppnum_t *pn)
+pmap_next_page_reserved(
+ ppnum_t *pn)
{
+ uint32_t n;
+ pmap_memory_region_t *region;
+ uint32_t reserved_index;
+
if (pmap_reserved_ranges) {
- uint32_t n;
- pmap_memory_region_t *region;
for (n = 0; n < pmap_last_reserved_range_index; n++) {
- uint32_t reserved_index = pmap_reserved_range_indices[n];
+ reserved_index = pmap_reserved_range_indices[n];
region = &pmap_memory_regions[reserved_index];
if (region->alloc_up <= region->alloc_down) {
*pn = region->alloc_up++;
- avail_remaining--;
-
- if (*pn > max_ppnum) {
- max_ppnum = *pn;
- }
+ } else if (region->alloc_frag_up <= region->alloc_frag_down) {
+ *pn = region->alloc_frag_up++;
+ } else {
+ continue;
+ }
+ avail_remaining--;
- if (lowest_lo == 0 || *pn < lowest_lo) {
- lowest_lo = *pn;
- }
+ if (*pn > max_ppnum) {
+ max_ppnum = *pn;
+ }
- pmap_reserved_pages_allocated++;
+ pmap_reserved_pages_allocated++;
#if DEBUG
- if (region->alloc_up > region->alloc_down) {
- kprintf("Exhausted reserved range index: %u, base: 0x%x end: 0x%x, type: 0x%x, attribute: 0x%llx\n", reserved_index, region->base, region->end, region->type, region->attribute);
- }
-#endif
- return TRUE;
+ if (region->alloc_up > region->alloc_down) {
+ kprintf("Exhausted reserved range index: %u, base: 0x%x end: 0x%x, type: 0x%x, attribute: 0x%llx\n", reserved_index, region->base, region->end, region->type, region->attribute);
}
+#endif
+ return TRUE;
}
}
return FALSE;
}
+/*
+ * Return the highest large page available. Fails once there are no more large pages.
+ */
+kern_return_t
+pmap_next_page_large(
+ ppnum_t *pn)
+{
+ int r;
+ pmap_memory_region_t *region;
+ ppnum_t frag_start;
+ ppnum_t lgpg;
+
+ if (avail_remaining < LG_PPNUM_PAGES) {
+ return KERN_FAILURE;
+ }
+
+ for (r = pmap_memory_region_count - 1; r >= 0; r--) {
+ region = &pmap_memory_regions[r];
+
+ /*
+ * First check if there is enough memory.
+ */
+ if (region->alloc_down < region->alloc_up ||
+ (region->alloc_down - region->alloc_up + 1) < LG_PPNUM_PAGES) {
+ continue;
+ }
+
+ /*
+ * Find the starting large page, creating a fragment if needed.
+ */
+ if ((region->alloc_down & LG_PPNUM_MASK) == LG_PPNUM_MASK) {
+ lgpg = (region->alloc_down & ~LG_PPNUM_MASK);
+ } else {
+ /* Can only have 1 fragment per region at a time */
+ if (region->alloc_frag_up <= region->alloc_frag_down) {
+ continue;
+ }
+
+ /* Check for enough room below any fragment. */
+ frag_start = (region->alloc_down & ~LG_PPNUM_MASK);
+ if (frag_start < region->alloc_up ||
+ frag_start - region->alloc_up < LG_PPNUM_PAGES) {
+ continue;
+ }
+
+ lgpg = frag_start - LG_PPNUM_PAGES;
+ region->alloc_frag_up = frag_start;
+ region->alloc_frag_down = region->alloc_down;
+ }
+
+ *pn = lgpg;
+ region->alloc_down = lgpg - 1;
+
+
+ avail_remaining -= LG_PPNUM_PAGES;
+ if (*pn + LG_PPNUM_MASK > max_ppnum) {
+ max_ppnum = *pn + LG_PPNUM_MASK;
+ }
+
+ return KERN_SUCCESS;
+ }
+ return KERN_FAILURE;
+}
boolean_t
pmap_next_page_hi(
- ppnum_t *pn)
+ ppnum_t *pn,
+ boolean_t might_free)
{
pmap_memory_region_t *region;
- int n;
+ int n;
- if (pmap_next_page_reserved(pn)) {
+ if (!might_free && pmap_next_page_reserved(pn)) {
return TRUE;
}
if (avail_remaining) {
for (n = pmap_memory_region_count - 1; n >= 0; n--) {
region = &pmap_memory_regions[n];
-
- if (region->alloc_down >= region->alloc_up) {
+ if (region->alloc_frag_up <= region->alloc_frag_down) {
+ *pn = region->alloc_frag_down--;
+ } else if (region->alloc_down >= region->alloc_up) {
*pn = region->alloc_down--;
- avail_remaining--;
-
- if (*pn > max_ppnum) {
- max_ppnum = *pn;
- }
-
- if (lowest_lo == 0 || *pn < lowest_lo) {
- lowest_lo = *pn;
- }
-
- if (lowest_hi == 0 || *pn < lowest_hi) {
- lowest_hi = *pn;
- }
+ } else {
+ continue;
+ }
- if (*pn > highest_hi) {
- highest_hi = *pn;
- }
+ avail_remaining--;
- return TRUE;
+ if (*pn > max_ppnum) {
+ max_ppnum = *pn;
}
+
+ return TRUE;
}
}
return FALSE;
}
+/*
+ * Record which high pages have been allocated so far,
+ * so that pmap_init() can mark them PMAP_NOENCRYPT, which
+ * makes hibernation faster.
+ *
+ * Because of the code in pmap_next_page_large(), we could
+ * theoretically have fragments in several regions.
+ * In practice that just doesn't happen. The last pmap region
+ * is normally the largest and will satisfy all pmap_next_hi/large()
+ * allocations. Since this information is used as an optimization
+ * and it's ok to be conservative, we'll just record the information
+ * for the final region.
+ */
+void
+pmap_hi_pages_done(void)
+{
+ pmap_memory_region_t *r;
+
+ r = &pmap_memory_regions[pmap_memory_region_count - 1];
+ pmap_high_used_top = r->end;
+ if (r->alloc_frag_up <= r->alloc_frag_down) {
+ pmap_high_used_bottom = r->alloc_frag_down + 1;
+ pmap_middle_used_top = r->alloc_frag_up - 1;
+ if (r->alloc_up <= r->alloc_down) {
+ pmap_middle_used_bottom = r->alloc_down + 1;
+ } else {
+ pmap_high_used_bottom = r->base;
+ }
+ } else {
+ if (r->alloc_up <= r->alloc_down) {
+ pmap_high_used_bottom = r->alloc_down + 1;
+ } else {
+ pmap_high_used_bottom = r->base;
+ }
+ }
+#if DEBUG || DEVELOPMENT
+ kprintf("pmap_high_used_top 0x%x\n", pmap_high_used_top);
+ kprintf("pmap_high_used_bottom 0x%x\n", pmap_high_used_bottom);
+ kprintf("pmap_middle_used_top 0x%x\n", pmap_middle_used_top);
+ kprintf("pmap_middle_used_bottom 0x%x\n", pmap_middle_used_bottom);
+#endif
+}
+/*
+ * Return the next available page from lowest memory for general use.
+ */
boolean_t
pmap_next_page(
- ppnum_t *pn)
+ ppnum_t *pn)
{
+ pmap_memory_region_t *region;
+
if (avail_remaining) {
while (pmap_memory_region_current < pmap_memory_region_count) {
- if (pmap_memory_regions[pmap_memory_region_current].alloc_up >
- pmap_memory_regions[pmap_memory_region_current].alloc_down) {
+ region = &pmap_memory_regions[pmap_memory_region_current];
+ if (region->alloc_up <= region->alloc_down) {
+ *pn = region->alloc_up++;
+ } else if (region->alloc_frag_up <= region->alloc_frag_down) {
+ *pn = region->alloc_frag_up++;
+ } else {
pmap_memory_region_current++;
continue;
}
- *pn = pmap_memory_regions[pmap_memory_region_current].alloc_up++;
avail_remaining--;
if (*pn > max_ppnum) {
max_ppnum = *pn;
}
- if (lowest_lo == 0 || *pn < lowest_lo) {
- lowest_lo = *pn;
- }
-
return TRUE;
}
}