X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0c530ab8987f0ae6a1a3d9284f40182b88852816..7ee9d059c4eecf68ae4f8b0fb99ae2471eda79af:/osfmk/i386/i386_vm_init.c diff --git a/osfmk/i386/i386_vm_init.c b/osfmk/i386/i386_vm_init.c index 2cf027867..989895eb0 100644 --- a/osfmk/i386/i386_vm_init.c +++ b/osfmk/i386/i386_vm_init.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003-2008 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -68,80 +74,71 @@ #include #include #include -#include #include -#include #include #include #include -#include "i386_lowmem.h" +#include +#include +#include + +#include +#include vm_size_t mem_size = 0; -vm_offset_t first_avail = 0;/* first after page tables */ -vm_offset_t last_addr; +pmap_paddr_t first_avail = 0;/* first after page tables */ uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ uint64_t mem_actual; uint64_t sane_size = 0; /* Memory size to use for defaults calculations */ -#define MAXBOUNCEPOOL (128 * 1024 * 1024) -#define MAXLORESERVE ( 32 * 1024 * 1024) - -extern int bsd_mbuf_cluster_reserve(void); - +#define MAXLORESERVE (32 * 1024 * 1024) -uint32_t bounce_pool_base = 0; -uint32_t bounce_pool_size = 0; +ppnum_t max_ppnum = 0; +ppnum_t lowest_lo = 0; +ppnum_t lowest_hi = 0; +ppnum_t highest_hi = 0; -static void reserve_bouncepool(uint32_t); +uint32_t pmap_reserved_pages_allocated = 0; +uint32_t pmap_last_reserved_range = 0xFFFFFFFF; +uint32_t pmap_reserved_ranges = 0; +extern unsigned int bsd_mbuf_cluster_reserve(boolean_t *); -pmap_paddr_t avail_start, avail_end; +pmap_paddr_t avail_start, avail_end; vm_offset_t virtual_avail, virtual_end; static pmap_paddr_t avail_remaining; vm_offset_t static_memory_end = 0; -#include -vm_offset_t edata, etext, end; +vm_offset_t sHIB, eHIB, stext, etext, sdata, edata, end; /* - * _mh_execute_header is the mach_header for the currently executing - * 32 bit kernel + * _mh_execute_header is the mach_header for the currently executing kernel */ -extern struct mach_header _mh_execute_header; -void *sectTEXTB; int sectSizeTEXT; -void *sectDATAB; int sectSizeDATA; -void *sectOBJCB; int sectSizeOBJC; -void *sectLINKB; int sectSizeLINK; -void *sectPRELINKB; int sectSizePRELINK; -void *sectHIBB; int sectSizeHIB; - -extern void *getsegdatafromheader(struct mach_header *, const char *, int *); -extern struct segment_command *getsegbyname(const char *); -extern struct section *firstsect(struct segment_command *); -extern struct section *nextsect(struct segment_command *, struct section *); - - -void -i386_macho_zerofill(void) -{ - struct segment_command *sgp; - struct section *sp; - - sgp = getsegbyname("__DATA"); - if (sgp) { - sp = firstsect(sgp); - if (sp) { - do { - if ((sp->flags & S_ZEROFILL)) - bzero((char *) sp->addr, sp->size); - } while ((sp = nextsect(sgp, sp))); - } - } - - return; -} - +void *sectTEXTB; unsigned long sectSizeTEXT; +void *sectDATAB; unsigned long sectSizeDATA; +void *sectOBJCB; unsigned long sectSizeOBJC; +void *sectLINKB; unsigned long sectSizeLINK; +void *sectPRELINKB; unsigned long sectSizePRELINK; +void *sectHIBB; unsigned long sectSizeHIB; +void *sectINITPTB; unsigned long sectSizeINITPT; + +kernel_segment_command_t *segTEXT; +kernel_section_t *cursectTEXT, *lastsectTEXT; + +extern uint64_t firmware_Conventional_bytes; +extern uint64_t firmware_RuntimeServices_bytes; +extern uint64_t firmware_ACPIReclaim_bytes; +extern uint64_t firmware_ACPINVS_bytes; +extern uint64_t firmware_PalCode_bytes; +extern uint64_t firmware_Reserved_bytes; +extern uint64_t firmware_Unusable_bytes; +extern uint64_t firmware_other_bytes; +uint64_t firmware_MMIO_bytes; + +#if DEBUG +#define PRINT_PMAP_MEMORY_TABLE +#endif /* DEBUG */ /* * Basic VM initialization. */ @@ -160,9 +157,19 @@ i386_vm_init(uint64_t maxmem, unsigned int safeboot; ppnum_t maxpg = 0; uint32_t pmap_type; - uint32_t maxbouncepoolsize; uint32_t maxloreserve; uint32_t maxdmaaddr; + uint32_t mbuf_reserve = 0; + boolean_t mbuf_override = FALSE; + +#if DEBUG + kprintf("Boot args revision: %d version: %d", + args->Revision, args->Version); + kprintf(" commandline: \""); + for(i=0; iCommandLine[i]); + kprintf("\"\n"); +#endif /* * Now retrieve addresses for end, edata, and etext @@ -179,19 +186,48 @@ i386_vm_init(uint64_t maxmem, &_mh_execute_header, "__LINKEDIT", §SizeLINK); sectHIBB = (void *)getsegdatafromheader( &_mh_execute_header, "__HIB", §SizeHIB); + sectINITPTB = (void *)getsegdatafromheader( + &_mh_execute_header, "__INITPT", §SizeINITPT); sectPRELINKB = (void *) getsegdatafromheader( - &_mh_execute_header, "__PRELINK", §SizePRELINK); + &_mh_execute_header, "__PRELINK_TEXT", §SizePRELINK); + + segTEXT = getsegbynamefromheader(&_mh_execute_header, "__TEXT"); + cursectTEXT = lastsectTEXT = firstsect(segTEXT); + /* Discover the last TEXT section within the TEXT segment */ + while ((cursectTEXT = nextsect(segTEXT, cursectTEXT)) != NULL) { + lastsectTEXT = cursectTEXT; + } - etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + sHIB = (vm_offset_t) sectHIBB; + eHIB = (vm_offset_t) sectHIBB + sectSizeHIB; + /* Zero-padded from ehib to stext if text is 2M-aligned */ + stext = (vm_offset_t) sectTEXTB; + etext = (vm_offset_t) round_page_64(lastsectTEXT->addr + lastsectTEXT->size); + /* Zero-padded from etext to sdata if text is 2M-aligned */ + sdata = (vm_offset_t) sectDATAB; edata = (vm_offset_t) sectDATAB + sectSizeDATA; +#if DEBUG + kprintf("sectTEXTB = %p\n", sectTEXTB); + kprintf("sectDATAB = %p\n", sectDATAB); + kprintf("sectOBJCB = %p\n", sectOBJCB); + kprintf("sectLINKB = %p\n", sectLINKB); + kprintf("sectHIBB = %p\n", sectHIBB); + kprintf("sectPRELINKB = %p\n", sectPRELINKB); + kprintf("eHIB = %p\n", (void *) eHIB); + kprintf("stext = %p\n", (void *) stext); + kprintf("etext = %p\n", (void *) etext); + kprintf("sdata = %p\n", (void *) sdata); + kprintf("edata = %p\n", (void *) edata); +#endif + vm_set_page_size(); /* * Compute the memory size. */ - if ((1 == vm_himemory_mode) || PE_parse_boot_arg("-x", &safeboot)) { + if ((1 == vm_himemory_mode) || PE_parse_boot_argn("-x", &safeboot, sizeof (safeboot))) { maxpg = 1 << (32 - I386_PGSHIFT); } avail_remaining = 0; @@ -201,23 +237,27 @@ i386_vm_init(uint64_t maxmem, pmap_memory_region_count = pmap_memory_region_current = 0; fap = (ppnum_t) i386_btop(first_avail); - mptr = (EfiMemoryRange *)args->MemoryMap; + mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap); if (args->MemoryMapDescriptorSize == 0) panic("Invalid memory map descriptor size"); msize = args->MemoryMapDescriptorSize; mcount = args->MemoryMapSize / msize; #define FOURGIG 0x0000000100000000ULL +#define ONEGIG 0x0000000040000000ULL for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { ppnum_t base, top; + uint64_t region_bytes = 0; if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) { kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count); break; } base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); - top = (ppnum_t) ((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1; + top = (ppnum_t) (((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1); + region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT); + pmap_type = mptr->Type; switch (mptr->Type) { case kEfiLoaderCode: @@ -229,31 +269,61 @@ i386_vm_init(uint64_t maxmem, * Consolidate usable memory types into one. */ pmap_type = kEfiConventionalMemory; - sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT); + sane_size += region_bytes; + firmware_Conventional_bytes += region_bytes; break; + /* + * sane_size should reflect the total amount of physical + * RAM in the system, not just the amount that is + * available for the OS to use. + * FIXME:Consider deriving this value from SMBIOS tables + * rather than reverse engineering the memory map. + * Alternatively, see + * Memory map should + * describe all memory + * Firmware on some systems guarantees that the memory + * map is complete via the "RomReservedMemoryTracked" + * feature field--consult that where possible to + * avoid the "round up to 128M" workaround below. + */ case kEfiRuntimeServicesCode: case kEfiRuntimeServicesData: + firmware_RuntimeServices_bytes += region_bytes; + sane_size += region_bytes; + break; case kEfiACPIReclaimMemory: + firmware_ACPIReclaim_bytes += region_bytes; + sane_size += region_bytes; + break; case kEfiACPIMemoryNVS: + firmware_ACPINVS_bytes += region_bytes; + sane_size += region_bytes; + break; case kEfiPalCode: - /* - * sane_size should reflect the total amount of physical ram - * in the system, not just the amount that is available for - * the OS to use - */ - sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT); - /* fall thru */ + firmware_PalCode_bytes += region_bytes; + sane_size += region_bytes; + break; + case kEfiReservedMemoryType: + firmware_Reserved_bytes += region_bytes; + break; case kEfiUnusableMemory: + firmware_Unusable_bytes += region_bytes; + break; case kEfiMemoryMappedIO: case kEfiMemoryMappedIOPortSpace: - case kEfiReservedMemoryType: + firmware_MMIO_bytes += region_bytes; + break; default: - pmap_type = mptr->Type; + firmware_other_bytes += region_bytes; + break; } - kprintf("EFI region: type = %d/%d, base = 0x%x, top = 0x%x\n", mptr->Type, pmap_type, base, top); +#if DEBUG + kprintf("EFI region %d: type %u/%d, base 0x%x, top 0x%x\n", + i, mptr->Type, pmap_type, base, top); +#endif if (maxpg) { if (base >= maxpg) @@ -264,18 +334,16 @@ i386_vm_init(uint64_t maxmem, /* * handle each region */ - if (kEfiACPIMemoryNVS == pmap_type) { - prev_pmptr = 0; - continue; - } else if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME || - pmap_type != kEfiConventionalMemory) { + if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME || + pmap_type != kEfiConventionalMemory) { prev_pmptr = 0; continue; } else { /* * Usable memory region */ - if (top < I386_LOWMEM_RESERVED) { + if (top < I386_LOWMEM_RESERVED || + !pal_is_usable_memory(base, top)) { prev_pmptr = 0; continue; } @@ -290,10 +358,31 @@ i386_vm_init(uint64_t maxmem, pmptr->base = base; else pmptr->base = I386_LOWMEM_RESERVED; + + pmptr->end = top; + /* - * mark as already mapped + * A range may be marked with with the + * EFI_MEMORY_KERN_RESERVED attribute + * on some systems, to indicate that the range + * must not be made available to devices. + * Simplifying assumptions are made regarding + * the placement of the range. */ - pmptr->alloc = pmptr->end = top; + if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) + pmap_reserved_ranges++; + + if ((mptr->Attribute & EFI_MEMORY_KERN_RESERVED) && + (top < I386_KERNEL_IMAGE_BASE_PAGE)) { + pmptr->alloc = pmptr->base; + pmap_last_reserved_range = pmap_memory_region_count; + } + else { + /* + * mark as already mapped + */ + pmptr->alloc = top; + } pmptr->type = pmap_type; } else if ( (base < fap) && (top > fap) ) { @@ -335,8 +424,11 @@ i386_vm_init(uint64_t maxmem, if (prev_pmptr && pmptr->type == prev_pmptr->type && pmptr->base == pmptr->alloc && - pmptr->base == (prev_pmptr->end + 1)) { - prev_pmptr->end = pmptr->end; + pmptr->base == (prev_pmptr->end + 1)) + { + if(prev_pmptr->end == prev_pmptr->alloc) + prev_pmptr->alloc = pmptr->base; + prev_pmptr->end = pmptr->end; } else { pmap_memory_region_count++; prev_pmptr = pmptr; @@ -345,60 +437,95 @@ i386_vm_init(uint64_t maxmem, } } - #ifdef PRINT_PMAP_MEMORY_TABLE { unsigned int j; pmap_memory_region_t *p = pmap_memory_regions; - vm_offset_t region_start, region_end; - vm_offset_t efi_start, efi_end; + addr64_t region_start, region_end; + addr64_t efi_start, efi_end; for (j=0;jtype, - p->base << I386_PGSHIFT, p->alloc << I386_PGSHIFT, p->end << I386_PGSHIFT); - region_start = p->base << I386_PGSHIFT; - region_end = (p->end << I386_PGSHIFT) - 1; - mptr = args->MemoryMap; + kprintf("pmap region %d type %d base 0x%llx alloc 0x%llx top 0x%llx\n", + j, p->type, + (addr64_t) p->base << I386_PGSHIFT, + (addr64_t) p->alloc << I386_PGSHIFT, + (addr64_t) p->end << I386_PGSHIFT); + region_start = (addr64_t) p->base << I386_PGSHIFT; + region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1; + mptr = (EfiMemoryRange *) ml_static_ptovirt((vm_offset_t)args->MemoryMap); for (i=0; iType != kEfiLoaderCode && mptr->Type != kEfiLoaderData && mptr->Type != kEfiBootServicesCode && mptr->Type != kEfiBootServicesData && mptr->Type != kEfiConventionalMemory) { - efi_start = (vm_offset_t)mptr->PhysicalStart; + efi_start = (addr64_t)mptr->PhysicalStart; efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1; if ((efi_start >= region_start && efi_start <= region_end) || (efi_end >= region_start && efi_end <= region_end)) { kprintf(" *** Overlapping region with EFI runtime region %d\n", i); } - } - + } } - } + } } #endif avail_start = first_avail; mem_actual = sane_size; -#define MEG (1024*1024) - /* * For user visible memory size, round up to 128 Mb - accounting for the various stolen memory * not reported by EFI. */ - sane_size = (sane_size + 128 * MEG - 1) & ~((uint64_t)(128 * MEG - 1)); + sane_size = (sane_size + 128 * MB - 1) & ~((uint64_t)(128 * MB - 1)); + + /* + * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64). + * Unless overriden by the maxmem= boot-arg + * -- which is a non-zero maxmem argument to this function. + */ + if (maxmem == 0 && sane_size > KERNEL_MAXMEM) { + maxmem = KERNEL_MAXMEM; + printf("Physical memory %lld bytes capped at %dGB\n", + sane_size, (uint32_t) (KERNEL_MAXMEM/GB)); + } /* * if user set maxmem, reduce memory sizes */ if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) { - ppnum_t discarded_pages = (sane_size - maxmem) >> I386_PGSHIFT; - sane_size = maxmem; + ppnum_t discarded_pages = (ppnum_t)((sane_size - maxmem) >> I386_PGSHIFT); + ppnum_t highest_pn = 0; + ppnum_t cur_alloc = 0; + uint64_t pages_to_use; + unsigned cur_region = 0; + + sane_size = maxmem; + if (avail_remaining > discarded_pages) avail_remaining -= discarded_pages; else avail_remaining = 0; + + pages_to_use = avail_remaining; + + while (cur_region < pmap_memory_region_count && pages_to_use) { + for (cur_alloc = pmap_memory_regions[cur_region].alloc; + cur_alloc < pmap_memory_regions[cur_region].end && pages_to_use; + cur_alloc++) { + if (cur_alloc > highest_pn) + highest_pn = cur_alloc; + pages_to_use--; + } + if (pages_to_use == 0) + pmap_memory_regions[cur_region].end = cur_alloc; + + cur_region++; + } + pmap_memory_region_count = cur_region; + + avail_end = i386_ptob(highest_pn + 1); } /* @@ -411,41 +538,52 @@ i386_vm_init(uint64_t maxmem, mem_size = (vm_size_t)sane_size; max_mem = sane_size; - kprintf("Physical memory %d MB\n", sane_size/MEG); - - if (!PE_parse_boot_arg("max_valid_dma_addr", &maxdmaaddr)) - max_valid_dma_address = 1024ULL * 1024ULL * 4096ULL; - else - max_valid_dma_address = ((uint64_t) maxdmaaddr) * 1024ULL * 1024ULL; + kprintf("Physical memory %llu MB\n", sane_size/MB); - if (!PE_parse_boot_arg("maxbouncepool", &maxbouncepoolsize)) - maxbouncepoolsize = MAXBOUNCEPOOL; - else - maxbouncepoolsize = maxbouncepoolsize * (1024 * 1024); - - /* - * bsd_mbuf_cluster_reserve depends on sane_size being set - * in order to correctly determine the size of the mbuf pool - * that will be reserved - */ - if (!PE_parse_boot_arg("maxloreserve", &maxloreserve)) - maxloreserve = MAXLORESERVE + bsd_mbuf_cluster_reserve(); - else - maxloreserve = maxloreserve * (1024 * 1024); + max_valid_low_ppnum = (2 * GB) / PAGE_SIZE; + if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr, sizeof (maxdmaaddr))) { + max_valid_dma_address = (uint64_t)4 * (uint64_t)GB; + } else { + max_valid_dma_address = ((uint64_t) maxdmaaddr) * MB; + if ((max_valid_dma_address / PAGE_SIZE) < max_valid_low_ppnum) + max_valid_low_ppnum = (ppnum_t)(max_valid_dma_address / PAGE_SIZE); + } if (avail_end >= max_valid_dma_address) { - if (maxbouncepoolsize) - reserve_bouncepool(maxbouncepoolsize); - if (maxloreserve) - vm_lopage_poolsize = maxloreserve / PAGE_SIZE; + if (!PE_parse_boot_argn("maxloreserve", &maxloreserve, sizeof (maxloreserve))) { + + if (sane_size >= (ONEGIG * 15)) + maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 4; + else if (sane_size >= (ONEGIG * 7)) + maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 2; + else + maxloreserve = MAXLORESERVE / PAGE_SIZE; + + mbuf_reserve = bsd_mbuf_cluster_reserve(&mbuf_override) / PAGE_SIZE; + } else + maxloreserve = (maxloreserve * (1024 * 1024)) / PAGE_SIZE; + + if (maxloreserve) { + vm_lopage_free_limit = maxloreserve; + + if (mbuf_override == TRUE) { + vm_lopage_free_limit += mbuf_reserve; + vm_lopage_lowater = 0; + } else + vm_lopage_lowater = vm_lopage_free_limit / 16; + + vm_lopage_refill = TRUE; + vm_lopage_needed = TRUE; + } } - + /* * Initialize kernel physical map. * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS. */ + kprintf("avail_remaining = 0x%lx\n", (unsigned long)avail_remaining); pmap_bootstrap(0, IA32e); } @@ -453,7 +591,77 @@ i386_vm_init(uint64_t maxmem, unsigned int pmap_free_pages(void) { - return avail_remaining; + return (unsigned int)avail_remaining; +} + + +boolean_t pmap_next_page_reserved(ppnum_t *); + +/* + * Pick a page from a "kernel private" reserved range; works around + * errata on some hardware. + */ +boolean_t +pmap_next_page_reserved(ppnum_t *pn) { + if (pmap_reserved_ranges && pmap_last_reserved_range != 0xFFFFFFFF) { + uint32_t n; + pmap_memory_region_t *region; + for (n = 0; n <= pmap_last_reserved_range; n++) { + region = &pmap_memory_regions[n]; + if (region->alloc < region->end) { + *pn = region->alloc++; + avail_remaining--; + + if (*pn > max_ppnum) + max_ppnum = *pn; + + if (lowest_lo == 0 || *pn < lowest_lo) + lowest_lo = *pn; + + pmap_reserved_pages_allocated++; + return TRUE; + } + } + } + return FALSE; +} + + +boolean_t +pmap_next_page_hi( + ppnum_t *pn) +{ + pmap_memory_region_t *region; + int n; + + if (pmap_next_page_reserved(pn)) + return TRUE; + + if (avail_remaining) { + for (n = pmap_memory_region_count - 1; n >= 0; n--) { + region = &pmap_memory_regions[n]; + + if (region->alloc != region->end) { + *pn = region->alloc++; + avail_remaining--; + + if (*pn > max_ppnum) + max_ppnum = *pn; + + if (lowest_lo == 0 || *pn < lowest_lo) + lowest_lo = *pn; + + if (lowest_hi == 0 || *pn < lowest_hi) + lowest_hi = *pn; + + if (*pn > highest_hi) + highest_hi = *pn; + + return TRUE; + } + } + } + return FALSE; } @@ -461,16 +669,21 @@ boolean_t pmap_next_page( ppnum_t *pn) { - if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) { - if (pmap_memory_regions[pmap_memory_region_current].alloc == - pmap_memory_regions[pmap_memory_region_current].end) { - pmap_memory_region_current++; + if (pmap_memory_regions[pmap_memory_region_current].alloc == + pmap_memory_regions[pmap_memory_region_current].end) { + pmap_memory_region_current++; continue; } *pn = pmap_memory_regions[pmap_memory_region_current].alloc++; avail_remaining--; + if (*pn > max_ppnum) + max_ppnum = *pn; + + if (lowest_lo == 0 || *pn < lowest_lo) + lowest_lo = *pn; + return TRUE; } return FALSE; @@ -484,36 +697,10 @@ pmap_valid_page( unsigned int i; pmap_memory_region_t *pmptr = pmap_memory_regions; - assert(pn); for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { - if ( (pn >= pmptr->base) && (pn <= pmptr->end) && pmptr->type == kEfiConventionalMemory ) + if ( (pn >= pmptr->base) && (pn <= pmptr->end) ) return TRUE; } return FALSE; } - -static void -reserve_bouncepool(uint32_t bounce_pool_wanted) -{ - pmap_memory_region_t *pmptr = pmap_memory_regions; - pmap_memory_region_t *lowest = NULL; - unsigned int i; - unsigned int pages_needed; - - pages_needed = bounce_pool_wanted / PAGE_SIZE; - - for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { - if ( (pmptr->type == kEfiConventionalMemory) && ((pmptr->end - pmptr->alloc) >= pages_needed) ) { - if ( (lowest == NULL) || (pmptr->alloc < lowest->alloc) ) - lowest = pmptr; - } - } - if ( (lowest != NULL) ) { - bounce_pool_base = lowest->alloc * PAGE_SIZE; - bounce_pool_size = bounce_pool_wanted; - - lowest->alloc += pages_needed; - avail_remaining -= pages_needed; - } -}