- if (i386_ptob(pmptr->end) > avail_end ) {
- avail_end = i386_ptob(pmptr->end);
- }
- avail_remaining += (pmptr->end - pmptr->base);
- pmap_memory_region_count++;
- pmptr++;
- }
-#else /* non PAE follows */
-#define FOURGIG 0x0000000100000000ULL
- for (i=0; i < args->memoryMapCount; i++,mptr++) {
- ppnum_t base, top;
-
- base = (ppnum_t) (mptr->base >> I386_PGSHIFT);
- top = (ppnum_t) ((mptr->base + mptr->length) >> I386_PGSHIFT) - 1;
-
- if (maxmem) {
- if (base >= maxpg) break;
- top = (top > maxpg)? maxpg : top;
- }
-
- if (kMemoryRangeUsable != mptr->type) continue;
-
- // save other regions
- if (kMemoryRangeNVS == mptr->type) {
- // Mark this as a memory range (for hibernation),
- // but don't count as usable memory
- pmptr->base = base;
- pmptr->end = ((mptr->base + mptr->length + I386_PGBYTES - 1) >> I386_PGSHIFT) - 1;
- pmptr->alloc = pmptr->end;
- pmptr->type = mptr->type;
- kprintf("NVS region: 0x%x ->0x%x\n", pmptr->base, pmptr->end);
- } else if (kMemoryRangeUsable != mptr->type) {
- continue;
- } else {
- // Usable memory region
- sane_size += (uint64_t)(mptr->length);
- if (top < fap) {
- /* entire range below first_avail */
- /* salvage some low memory pages */
- /* we use some very low memory at startup */
- /* mark as already allocated here */
- pmptr->base = 0x18; /* PAE and HIB use below this */
- pmptr->alloc = pmptr->end = top; /* mark as already mapped */
- pmptr->type = mptr->type;
- } else if (mptr->base >= FOURGIG) {
- /* entire range above 4GB (pre PAE) */
- continue;
- } else if ( (base < fap) &&
- (top > fap)) {
- /* spans first_avail */
- /* put mem below first avail in table but
- mark already allocated */
- pmptr->base = base;
- pmptr->alloc = pmptr->end = (fap - 1);
- pmptr->type = mptr->type;
- /* we bump these here inline so the accounting below works
- correctly */
- pmptr++;
- pmap_memory_region_count++;
- pmptr->alloc = pmptr->base = fap;
- pmptr->type = mptr->type;
- pmptr->end = top;
- } else if ( (mptr->base < FOURGIG) &&
- ((mptr->base+mptr->length) > FOURGIG) ) {
- /* spans across 4GB (pre PAE) */
- pmptr->alloc = pmptr->base = base;
- pmptr->type = mptr->type;
- pmptr->end = (FOURGIG >> I386_PGSHIFT) - 1;
- } else {
- /* entire range useable */
- pmptr->alloc = pmptr->base = base;
- pmptr->type = mptr->type;
- pmptr->end = top;
- }
-
- if (i386_ptob(pmptr->end) > avail_end ) {
- avail_end = i386_ptob(pmptr->end);
- }
-
- avail_remaining += (pmptr->end - pmptr->base);
- pmap_memory_region_count++;
- pmptr++;
- }
+ region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
+ pmap_type = mptr->Type;
+
+ switch (mptr->Type) {
+ case kEfiLoaderCode:
+ case kEfiLoaderData:
+ case kEfiBootServicesCode:
+ case kEfiBootServicesData:
+ case kEfiConventionalMemory:
+ /*
+ * Consolidate usable memory types into one.
+ */
+ pmap_type = kEfiConventionalMemory;
+ sane_size += region_bytes;
+ firmware_Conventional_bytes += region_bytes;
+ break;
+ /*
+ * sane_size should reflect the total amount of physical
+ * RAM in the system, not just the amount that is
+ * available for the OS to use.
+ * FIXME:Consider deriving this value from SMBIOS tables
+ * rather than reverse engineering the memory map.
+ * Alternatively, see
+ * <rdar://problem/4642773> Memory map should
+ * describe all memory
+ * Firmware on some systems guarantees that the memory
+ * map is complete via the "RomReservedMemoryTracked"
+ * feature field--consult that where possible to
+ * avoid the "round up to 128M" workaround below.
+ */
+
+ case kEfiRuntimeServicesCode:
+ case kEfiRuntimeServicesData:
+ firmware_RuntimeServices_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiACPIReclaimMemory:
+ firmware_ACPIReclaim_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiACPIMemoryNVS:
+ firmware_ACPINVS_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiPalCode:
+ firmware_PalCode_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+
+ case kEfiReservedMemoryType:
+ firmware_Reserved_bytes += region_bytes;
+ break;
+ case kEfiUnusableMemory:
+ firmware_Unusable_bytes += region_bytes;
+ break;
+ case kEfiMemoryMappedIO:
+ case kEfiMemoryMappedIOPortSpace:
+ firmware_MMIO_bytes += region_bytes;
+ break;
+ default:
+ firmware_other_bytes += region_bytes;
+ break;
+ }
+
+ DBG("EFI region %d: type %u/%d, base 0x%x, top 0x%x %s\n",
+ i, mptr->Type, pmap_type, base, top,
+ (mptr->Attribute&EFI_MEMORY_KERN_RESERVED)? "RESERVED" :
+ (mptr->Attribute&EFI_MEMORY_RUNTIME)? "RUNTIME" : "");
+
+ if (maxpg) {
+ if (base >= maxpg)
+ break;
+ top = (top > maxpg) ? maxpg : top;
+ }
+
+ /*
+ * handle each region
+ */
+ if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
+ pmap_type != kEfiConventionalMemory) {
+ prev_pmptr = 0;
+ continue;
+ } else {
+ /*
+ * Usable memory region
+ */
+ if (top < I386_LOWMEM_RESERVED ||
+ !pal_is_usable_memory(base, top)) {
+ prev_pmptr = 0;
+ continue;
+ }
+ /*
+ * A range may be marked with with the
+ * EFI_MEMORY_KERN_RESERVED attribute
+ * on some systems, to indicate that the range
+ * must not be made available to devices.
+ */
+
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
+ if (++pmap_reserved_ranges > PMAP_MAX_RESERVED_RANGES) {
+ panic("Too many reserved ranges %u\n", pmap_reserved_ranges);
+ }
+ }
+
+ if (top < fap) {
+ /*
+ * entire range below first_avail
+ * salvage some low memory pages
+ * we use some very low memory at startup
+ * mark as already allocated here
+ */
+ if (base >= I386_LOWMEM_RESERVED)
+ pmptr->base = base;
+ else
+ pmptr->base = I386_LOWMEM_RESERVED;
+
+ pmptr->end = top;
+
+
+ if ((mptr->Attribute & EFI_MEMORY_KERN_RESERVED) &&
+ (top < vm_kernel_base_page)) {
+ pmptr->alloc = pmptr->base;
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ }
+ else {
+ /*
+ * mark as already mapped
+ */
+ pmptr->alloc = top;
+ }
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ }
+ else if ( (base < fap) && (top > fap) ) {
+ /*
+ * spans first_avail
+ * put mem below first avail in table but
+ * mark already allocated
+ */
+ pmptr->base = base;
+ pmptr->alloc = pmptr->end = (fap - 1);
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ /*
+ * we bump these here inline so the accounting
+ * below works correctly
+ */
+ pmptr++;
+ pmap_memory_region_count++;
+
+ pmptr->alloc = pmptr->base = fap;
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ pmptr->end = top;
+
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ } else {
+ /*
+ * entire range useable
+ */
+ pmptr->alloc = pmptr->base = base;
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ pmptr->end = top;
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ }
+
+ if (i386_ptob(pmptr->end) > avail_end )
+ avail_end = i386_ptob(pmptr->end);
+
+ avail_remaining += (pmptr->end - pmptr->base);
+ coalescing_permitted = (prev_pmptr && (pmptr->attribute == prev_pmptr->attribute) && ((pmptr->attribute & EFI_MEMORY_KERN_RESERVED) == 0));
+ /*
+ * Consolidate contiguous memory regions, if possible
+ */
+ if (prev_pmptr &&
+ (pmptr->type == prev_pmptr->type) &&
+ (coalescing_permitted) &&
+ (pmptr->base == pmptr->alloc) &&
+ (pmptr->base == (prev_pmptr->end + 1)))
+ {
+ if (prev_pmptr->end == prev_pmptr->alloc)
+ prev_pmptr->alloc = pmptr->base;
+ prev_pmptr->end = pmptr->end;
+ } else {
+ pmap_memory_region_count++;
+ prev_pmptr = pmptr;
+ pmptr++;
+ }
+ }