+ region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
+ pmap_type = mptr->Type;
+
+ switch (mptr->Type) {
+ case kEfiLoaderCode:
+ case kEfiLoaderData:
+ case kEfiBootServicesCode:
+ case kEfiBootServicesData:
+ case kEfiConventionalMemory:
+ /*
+ * Consolidate usable memory types into one.
+ */
+ pmap_type = kEfiConventionalMemory;
+ sane_size += region_bytes;
+ firmware_Conventional_bytes += region_bytes;
+ break;
+ /*
+ * sane_size should reflect the total amount of physical
+ * RAM in the system, not just the amount that is
+ * available for the OS to use.
+ * FIXME:Consider deriving this value from SMBIOS tables
+ * rather than reverse engineering the memory map.
+ * Alternatively, see
+ * <rdar://problem/4642773> Memory map should
+ * describe all memory
+ * Firmware on some systems guarantees that the memory
+ * map is complete via the "RomReservedMemoryTracked"
+ * feature field--consult that where possible to
+ * avoid the "round up to 128M" workaround below.
+ */
+
+ case kEfiRuntimeServicesCode:
+ case kEfiRuntimeServicesData:
+ firmware_RuntimeServices_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiACPIReclaimMemory:
+ firmware_ACPIReclaim_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiACPIMemoryNVS:
+ firmware_ACPINVS_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+ case kEfiPalCode:
+ firmware_PalCode_bytes += region_bytes;
+ sane_size += region_bytes;
+ break;
+
+ case kEfiReservedMemoryType:
+ firmware_Reserved_bytes += region_bytes;
+ break;
+ case kEfiUnusableMemory:
+ firmware_Unusable_bytes += region_bytes;
+ break;
+ case kEfiMemoryMappedIO:
+ case kEfiMemoryMappedIOPortSpace:
+ firmware_MMIO_bytes += region_bytes;
+ break;
+ default:
+ firmware_other_bytes += region_bytes;
+ break;
+ }
+
+ DBG("EFI region %d: type %u/%d, base 0x%x, top 0x%x %s\n",
+ i, mptr->Type, pmap_type, base, top,
+ (mptr->Attribute&EFI_MEMORY_KERN_RESERVED)? "RESERVED" :
+ (mptr->Attribute&EFI_MEMORY_RUNTIME)? "RUNTIME" : "");
+
+ if (maxpg) {
+ if (base >= maxpg)
+ break;
+ top = (top > maxpg) ? maxpg : top;
+ }
+
+ /*
+ * handle each region
+ */
+ if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
+ pmap_type != kEfiConventionalMemory) {
+ prev_pmptr = 0;
+ continue;
+ } else {
+ /*
+ * Usable memory region
+ */
+ if (top < I386_LOWMEM_RESERVED ||
+ !pal_is_usable_memory(base, top)) {
+ prev_pmptr = 0;
+ continue;
+ }
+ /*
+ * A range may be marked with with the
+ * EFI_MEMORY_KERN_RESERVED attribute
+ * on some systems, to indicate that the range
+ * must not be made available to devices.
+ */
+
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
+ if (++pmap_reserved_ranges > PMAP_MAX_RESERVED_RANGES) {
+ panic("Too many reserved ranges %u\n", pmap_reserved_ranges);
+ }
+ }
+
+ if (top < fap) {
+ /*
+ * entire range below first_avail
+ * salvage some low memory pages
+ * we use some very low memory at startup
+ * mark as already allocated here
+ */
+ if (base >= I386_LOWMEM_RESERVED)
+ pmptr->base = base;
+ else
+ pmptr->base = I386_LOWMEM_RESERVED;
+
+ pmptr->end = top;
+
+
+ if ((mptr->Attribute & EFI_MEMORY_KERN_RESERVED) &&
+ (top < vm_kernel_base_page)) {
+ pmptr->alloc_up = pmptr->base;
+ pmptr->alloc_down = pmptr->end;
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ }
+ else {
+ /*
+ * mark as already mapped
+ */
+ pmptr->alloc_up = top + 1;
+ pmptr->alloc_down = top;
+ }
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ }
+ else if ( (base < fap) && (top > fap) ) {
+ /*
+ * spans first_avail
+ * put mem below first avail in table but
+ * mark already allocated
+ */
+ pmptr->base = base;
+ pmptr->end = (fap - 1);
+ pmptr->alloc_up = pmptr->end + 1;
+ pmptr->alloc_down = pmptr->end;
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ /*
+ * we bump these here inline so the accounting
+ * below works correctly
+ */
+ pmptr++;
+ pmap_memory_region_count++;
+
+ pmptr->alloc_up = pmptr->base = fap;
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ pmptr->alloc_down = pmptr->end = top;
+
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ } else {
+ /*
+ * entire range useable
+ */
+ pmptr->alloc_up = pmptr->base = base;
+ pmptr->type = pmap_type;
+ pmptr->attribute = mptr->Attribute;
+ pmptr->alloc_down = pmptr->end = top;
+ if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
+ pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
+ }
+
+ if (i386_ptob(pmptr->end) > avail_end )
+ avail_end = i386_ptob(pmptr->end);
+
+ avail_remaining += (pmptr->end - pmptr->base);
+ coalescing_permitted = (prev_pmptr && (pmptr->attribute == prev_pmptr->attribute) && ((pmptr->attribute & EFI_MEMORY_KERN_RESERVED) == 0));
+ /*
+ * Consolidate contiguous memory regions, if possible
+ */
+ if (prev_pmptr &&
+ (pmptr->type == prev_pmptr->type) &&
+ (coalescing_permitted) &&
+ (pmptr->base == pmptr->alloc_up) &&
+ (prev_pmptr->end == prev_pmptr->alloc_down) &&
+ (pmptr->base == (prev_pmptr->end + 1)))
+ {
+ prev_pmptr->end = pmptr->end;
+ prev_pmptr->alloc_down = pmptr->alloc_down;
+ } else {
+ pmap_memory_region_count++;
+ prev_pmptr = pmptr;
+ pmptr++;
+ }
+ }