- for (i=0; i < args->memoryMapCount; i++,mptr++) {
- ppnum_t base, top;
-
- base = (ppnum_t) (mptr->base >> I386_PGSHIFT);
- top = (ppnum_t) ((mptr->base + mptr->length) >> I386_PGSHIFT) - 1;
-
- if (maxmem) {
- if (base >= maxpg) break;
- top = (top > maxpg)? maxpg : top;
- }
-
- if (kMemoryRangeUsable != mptr->type) continue;
-
- // save other regions
- if (kMemoryRangeNVS == mptr->type) {
- // Mark this as a memory range (for hibernation),
- // but don't count as usable memory
- pmptr->base = base;
- pmptr->end = ((mptr->base + mptr->length + I386_PGBYTES - 1) >> I386_PGSHIFT) - 1;
- pmptr->alloc = pmptr->end;
- pmptr->type = mptr->type;
- kprintf("NVS region: 0x%x ->0x%x\n", pmptr->base, pmptr->end);
- } else if (kMemoryRangeUsable != mptr->type) {
- continue;
- } else {
- // Usable memory region
- sane_size += (uint64_t)(mptr->length);
- if (top < fap) {
- /* entire range below first_avail */
- /* salvage some low memory pages */
- /* we use some very low memory at startup */
- /* mark as already allocated here */
- pmptr->base = 0x18; /* PAE and HIB use below this */
- pmptr->alloc = pmptr->end = top; /* mark as already mapped */
- pmptr->type = mptr->type;
- } else if (mptr->base >= FOURGIG) {
- /* entire range above 4GB (pre PAE) */
- continue;
- } else if ( (base < fap) &&
- (top > fap)) {
- /* spans first_avail */
- /* put mem below first avail in table but
- mark already allocated */
- pmptr->base = base;
- pmptr->alloc = pmptr->end = (fap - 1);
- pmptr->type = mptr->type;
- /* we bump these here inline so the accounting below works
- correctly */
- pmptr++;
- pmap_memory_region_count++;
- pmptr->alloc = pmptr->base = fap;
- pmptr->type = mptr->type;
- pmptr->end = top;
- } else if ( (mptr->base < FOURGIG) &&
- ((mptr->base+mptr->length) > FOURGIG) ) {
- /* spans across 4GB (pre PAE) */
- pmptr->alloc = pmptr->base = base;
- pmptr->type = mptr->type;
- pmptr->end = (FOURGIG >> I386_PGSHIFT) - 1;
- } else {
- /* entire range useable */
- pmptr->alloc = pmptr->base = base;
- pmptr->type = mptr->type;
- pmptr->end = top;
- }
-
- if (i386_ptob(pmptr->end) > avail_end ) {
- avail_end = i386_ptob(pmptr->end);
- }
-
- avail_remaining += (pmptr->end - pmptr->base);
- pmap_memory_region_count++;
- pmptr++;
- }
+
+ for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
+ ppnum_t base, top;
+
+ if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) {
+ kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count);
+ break;
+ }
+ base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
+ top = (ppnum_t) ((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1;
+
+ switch (mptr->Type) {
+ case kEfiLoaderCode:
+ case kEfiLoaderData:
+ case kEfiBootServicesCode:
+ case kEfiBootServicesData:
+ case kEfiConventionalMemory:
+ /*
+ * Consolidate usable memory types into one.
+ */
+ pmap_type = kEfiConventionalMemory;
+ sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
+ break;
+
+ case kEfiRuntimeServicesCode:
+ case kEfiRuntimeServicesData:
+ case kEfiACPIReclaimMemory:
+ case kEfiACPIMemoryNVS:
+ case kEfiPalCode:
+ /*
+ * sane_size should reflect the total amount of physical ram
+ * in the system, not just the amount that is available for
+ * the OS to use
+ */
+ sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
+ /* fall thru */
+
+ case kEfiUnusableMemory:
+ case kEfiMemoryMappedIO:
+ case kEfiMemoryMappedIOPortSpace:
+ case kEfiReservedMemoryType:
+ default:
+ pmap_type = mptr->Type;
+ }
+
+ kprintf("EFI region: type = %d/%d, base = 0x%x, top = 0x%x\n", mptr->Type, pmap_type, base, top);
+
+ if (maxpg) {
+ if (base >= maxpg)
+ break;
+ top = (top > maxpg) ? maxpg : top;
+ }
+
+ /*
+ * handle each region
+ */
+ if (kEfiACPIMemoryNVS == pmap_type) {
+ prev_pmptr = 0;
+ continue;
+ } else if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
+ pmap_type != kEfiConventionalMemory) {
+ prev_pmptr = 0;
+ continue;
+ } else {
+ /*
+ * Usable memory region
+ */
+ if (top < I386_LOWMEM_RESERVED) {
+ prev_pmptr = 0;
+ continue;
+ }
+ if (top < fap) {
+ /*
+ * entire range below first_avail
+ * salvage some low memory pages
+ * we use some very low memory at startup
+ * mark as already allocated here
+ */
+ if (base >= I386_LOWMEM_RESERVED)
+ pmptr->base = base;
+ else
+ pmptr->base = I386_LOWMEM_RESERVED;
+ /*
+ * mark as already mapped
+ */
+ pmptr->alloc = pmptr->end = top;
+ pmptr->type = pmap_type;
+ }
+ else if ( (base < fap) && (top > fap) ) {
+ /*
+ * spans first_avail
+ * put mem below first avail in table but
+ * mark already allocated
+ */
+ pmptr->base = base;
+ pmptr->alloc = pmptr->end = (fap - 1);
+ pmptr->type = pmap_type;
+ /*
+ * we bump these here inline so the accounting
+ * below works correctly
+ */
+ pmptr++;
+ pmap_memory_region_count++;
+ pmptr->alloc = pmptr->base = fap;
+ pmptr->type = pmap_type;
+ pmptr->end = top;
+ }
+ else {
+ /*
+ * entire range useable
+ */
+ pmptr->alloc = pmptr->base = base;
+ pmptr->type = pmap_type;
+ pmptr->end = top;
+ }
+
+ if (i386_ptob(pmptr->end) > avail_end )
+ avail_end = i386_ptob(pmptr->end);
+
+ avail_remaining += (pmptr->end - pmptr->base);
+
+ /*
+ * Consolidate contiguous memory regions, if possible
+ */
+ if (prev_pmptr &&
+ pmptr->type == prev_pmptr->type &&
+ pmptr->base == pmptr->alloc &&
+ pmptr->base == (prev_pmptr->end + 1)) {
+ prev_pmptr->end = pmptr->end;
+ } else {
+ pmap_memory_region_count++;
+ prev_pmptr = pmptr;
+ pmptr++;
+ }
+ }