X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ff6e181ae92fc6f1e89841290f461d1f2f9badd9..143464d58d2bd6378e74eec636961ceb0d32fb91:/osfmk/i386/hibernate_i386.c diff --git a/osfmk/i386/hibernate_i386.c b/osfmk/i386/hibernate_i386.c index 0e2f593f7..a16994234 100644 --- a/osfmk/i386/hibernate_i386.c +++ b/osfmk/i386/hibernate_i386.c @@ -1,14 +1,19 @@ /* - * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -18,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -32,74 +37,120 @@ #include #include #include -#define KERNEL + +#include #include #include +#include -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +extern ppnum_t max_ppnum; -/* This assumes that - * - we never will want to read or write memory below the start of kernel text - * - kernel text and data isn't included in pmap memory regions - */ +#define MAX_BANKS 32 -extern void *sectTEXTB; -extern char *first_avail; +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ hibernate_page_list_t * -hibernate_page_list_allocate(void) +hibernate_page_list_allocate(boolean_t log) { - vm_offset_t base; + ppnum_t base, num; vm_size_t size; - uint32_t bank; + uint32_t bank, num_banks; uint32_t pages, page_count; hibernate_page_list_t * list; hibernate_bitmap_t * bitmap; - pmap_memory_region_t * regions; - pmap_memory_region_t * rp; - uint32_t num_regions, num_alloc_regions; - - page_count = 0; - - /* Make a list of the maximum number of regions needed */ - num_alloc_regions = 1 + pmap_memory_region_count; - /* Allocate our own list of memory regions so we can sort them in order. */ - regions = (pmap_memory_region_t *)kalloc(sizeof(pmap_memory_region_t) * num_alloc_regions); - if (!regions) - return (0); - - /* Fill in the actual regions we will be returning. */ - rp = regions; - - /* XXX should check for non-volatile memory region below kernel space. */ - /* Kernel region is first. */ - base = (vm_offset_t)(sectTEXTB) & 0x3FFFFFFF; - rp->base = atop_32(base); - rp->end = atop_32((vm_offset_t)first_avail) - 1; - rp->alloc = 0; - num_regions = 1; - - /* Remaining memory regions. Consolidate adjacent regions. */ - for (bank = 0; bank < (uint32_t) pmap_memory_region_count; bank++) + EfiMemoryRange * mptr; + uint32_t mcount, msize, i; + hibernate_bitmap_t dram_ranges[MAX_BANKS]; + boot_args * args = (boot_args *) PE_state.bootArgs; + uint32_t non_os_pagecount; + + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); + if (args->MemoryMapDescriptorSize == 0) + panic("Invalid memory map descriptor size"); + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; + + num_banks = 0; + non_os_pagecount = 0; + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if ((rp->end + 1) == pmap_memory_regions[bank].base) { - rp->end = pmap_memory_regions[bank].end; - } else { - ++rp; - ++num_regions; - rp->base = pmap_memory_regions[bank].base; - rp->end = pmap_memory_regions[bank].end; - rp->alloc = 0; - } + base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); + num = (ppnum_t) mptr->NumberOfPages; + + if (base > max_ppnum) + continue; + if ((base + num - 1) > max_ppnum) + num = max_ppnum - base + 1; + if (!num) + continue; + + switch (mptr->Type) + { + // any kind of dram + case kEfiACPIMemoryNVS: + case kEfiPalCode: + non_os_pagecount += num; + + // OS used dram + case kEfiLoaderCode: + case kEfiLoaderData: + case kEfiBootServicesCode: + case kEfiBootServicesData: + case kEfiConventionalMemory: + + for (bank = 0; bank < num_banks; bank++) + { + if (dram_ranges[bank].first_page <= base) + continue; + if ((base + num) == dram_ranges[bank].first_page) + { + dram_ranges[bank].first_page = base; + num = 0; + } + break; + } + if (!num) break; + + if (bank && (base == (1 + dram_ranges[bank - 1].last_page))) + bank--; + else + { + num_banks++; + if (num_banks >= MAX_BANKS) break; + bcopy(&dram_ranges[bank], + &dram_ranges[bank + 1], + (num_banks - bank - 1) * sizeof(hibernate_bitmap_t)); + dram_ranges[bank].first_page = base; + } + dram_ranges[bank].last_page = base + num - 1; + break; + + // runtime services will be restarted, so no save + case kEfiRuntimeServicesCode: + case kEfiRuntimeServicesData: + // contents are volatile once the platform expert starts + case kEfiACPIReclaimMemory: + // non dram + case kEfiReservedMemoryType: + case kEfiUnusableMemory: + case kEfiMemoryMappedIO: + case kEfiMemoryMappedIOPortSpace: + default: + break; + } } - /* Size the hibernation bitmap */ + if (num_banks >= MAX_BANKS) + return (NULL); + + // size the hibernation bitmap + size = sizeof(hibernate_page_list_t); page_count = 0; - for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) { - pages = rp->end + 1 - rp->base; + for (bank = 0; bank < num_banks; bank++) { + pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page; page_count += pages; size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t); } @@ -108,76 +159,80 @@ hibernate_page_list_allocate(void) if (!list) return (list); - list->list_size = size; + list->list_size = (uint32_t)size; list->page_count = page_count; - list->bank_count = num_regions; + list->bank_count = num_banks; + + // convert to hibernation bitmap. - /* Convert to hibernation bitmap. */ - /* This assumes that ranges are in order and do not overlap. */ bitmap = &list->bank_bitmap[0]; - for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) { - bitmap->first_page = rp->base; - bitmap->last_page = rp->end; + for (bank = 0; bank < num_banks; bank++) + { + bitmap->first_page = dram_ranges[bank].first_page; + bitmap->last_page = dram_ranges[bank].last_page; bitmap->bitmapwords = (bitmap->last_page + 1 - bitmap->first_page + 31) >> 5; - kprintf("HIB: Bank %d: 0x%x end 0x%x\n", bank, - ptoa_32(bitmap->first_page), - ptoa_32(bitmap->last_page)); + if (log) kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", + bank, bitmap->first_page, bitmap->last_page); bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; } + if (log) printf("efi pagecount %d\n", non_os_pagecount); - kfree((void *)regions, sizeof(pmap_memory_region_t) * num_alloc_regions); return (list); } +// mark pages not to be saved, but available for scratch usage during restore + void -hibernate_page_list_setall_machine(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - uint32_t * pagesOut) +hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list, + __unused hibernate_page_list_t * page_list_wired, + __unused boolean_t preflight, + __unused uint32_t * pagesOut) { - KernelBootArgs_t * bootArgs = (KernelBootArgs_t *)PE_state.bootArgs; - MemoryRange * mptr; - uint32_t bank; - uint32_t page, count; - - for (bank = 0, mptr = bootArgs->memoryMap; bank < bootArgs->memoryMapCount; bank++, mptr++) { - - if (kMemoryRangeNVS != mptr->type) continue; - kprintf("Base NVS region 0x%x + 0x%x\n", (vm_offset_t)mptr->base, (vm_size_t)mptr->length); - /* Round to page size. Hopefully this does not overlap any reserved areas. */ - page = atop_32(trunc_page((vm_offset_t)mptr->base)); - count = atop_32(round_page((vm_offset_t)mptr->base + (vm_size_t)mptr->length)) - page; - kprintf("Rounded NVS region 0x%x size 0x%x\n", page, count); - - hibernate_set_page_state(page_list, page_list_wired, page, count, 1); - pagesOut -= count; +} + +// mark pages not to be saved and not for scratch usage during restore +void +hibernate_page_list_set_volatile( hibernate_page_list_t * page_list, + hibernate_page_list_t * page_list_wired, + uint32_t * pagesOut) +{ + boot_args * args = (boot_args *) PE_state.bootArgs; + + if (args->efiRuntimeServicesPageStart) + { + hibernate_set_page_state(page_list, page_list_wired, + args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount, + kIOHibernatePageStateFree); + *pagesOut -= args->efiRuntimeServicesPageCount; } } kern_return_t hibernate_processor_setup(IOHibernateImageHeader * header) { - current_cpu_datap()->cpu_hibernate = 1; + boot_args * args = (boot_args *) PE_state.bootArgs; + + cpu_datap(0)->cpu_hibernate = 1; header->processorFlags = 0; + + header->runtimePages = args->efiRuntimeServicesPageStart; + header->runtimePageCount = args->efiRuntimeServicesPageCount; + header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart; + header->performanceDataStart = args->performanceDataStart; + header->performanceDataSize = args->performanceDataSize; + return (KERN_SUCCESS); } void hibernate_vm_lock(void) { - if (FALSE /* getPerProc()->hibernate */) - { - vm_page_lock_queues(); - mutex_lock(&vm_page_queue_free_lock); - } + if (current_cpu_datap()->cpu_hibernate) hibernate_vm_lock_queues(); } void hibernate_vm_unlock(void) { - if (FALSE /* getPerProc()->hibernate */) - { - mutex_unlock(&vm_page_queue_free_lock); - vm_page_unlock_queues(); - } + if (current_cpu_datap()->cpu_hibernate) hibernate_vm_unlock_queues(); }