X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..0c530ab8987f0ae6a1a3d9284f40182b88852816:/osfmk/i386/hibernate_i386.c diff --git a/osfmk/i386/hibernate_i386.c b/osfmk/i386/hibernate_i386.c index 6d47d1c96..d8f14ea46 100644 --- a/osfmk/i386/hibernate_i386.c +++ b/osfmk/i386/hibernate_i386.c @@ -1,29 +1,23 @@ /* * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * @APPLE_LICENSE_HEADER_START@ * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + * @APPLE_LICENSE_HEADER_END@ */ #include @@ -37,74 +31,90 @@ #include #include #include -#define KERNEL + +#include #include #include +#include "i386_lowmem.h" -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#define MAX_BANKS 32 -/* This assumes that - * - we never will want to read or write memory below the start of kernel text - * - kernel text and data isn't included in pmap memory regions - */ - -extern void *sectTEXTB; -extern char *first_avail; +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ hibernate_page_list_t * hibernate_page_list_allocate(void) { - vm_offset_t base; + ppnum_t base, num; vm_size_t size; - uint32_t bank; + uint32_t bank, num_banks; uint32_t pages, page_count; hibernate_page_list_t * list; hibernate_bitmap_t * bitmap; - pmap_memory_region_t * regions; - pmap_memory_region_t * rp; - uint32_t num_regions, num_alloc_regions; - page_count = 0; + EfiMemoryRange * mptr; + uint32_t mcount, msize, i; + hibernate_bitmap_t dram_ranges[MAX_BANKS]; + boot_args * args = (boot_args *) PE_state.bootArgs; - /* Make a list of the maximum number of regions needed */ - num_alloc_regions = 1 + pmap_memory_region_count; + mptr = args->MemoryMap; + if (args->MemoryMapDescriptorSize == 0) + panic("Invalid memory map descriptor size"); + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; - /* Allocate our own list of memory regions so we can sort them in order. */ - regions = (pmap_memory_region_t *)kalloc(sizeof(pmap_memory_region_t) * num_alloc_regions); - if (!regions) - return (0); - - /* Fill in the actual regions we will be returning. */ - rp = regions; - - /* XXX should check for non-volatile memory region below kernel space. */ - /* Kernel region is first. */ - base = (vm_offset_t)(sectTEXTB) & 0x3FFFFFFF; - rp->base = atop_32(base); - rp->end = atop_32((vm_offset_t)first_avail) - 1; - rp->alloc = 0; - num_regions = 1; - - /* Remaining memory regions. Consolidate adjacent regions. */ - for (bank = 0; bank < (uint32_t) pmap_memory_region_count; bank++) + num_banks = 0; + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if ((rp->end + 1) == pmap_memory_regions[bank].base) { - rp->end = pmap_memory_regions[bank].end; - } else { - ++rp; - ++num_regions; - rp->base = pmap_memory_regions[bank].base; - rp->end = pmap_memory_regions[bank].end; - rp->alloc = 0; - } + base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); + num = (ppnum_t) mptr->NumberOfPages; + if (!num) + continue; + + switch (mptr->Type) + { + // any kind of dram + case kEfiLoaderCode: + case kEfiLoaderData: + case kEfiBootServicesCode: + case kEfiBootServicesData: + case kEfiConventionalMemory: + case kEfiACPIReclaimMemory: + case kEfiACPIMemoryNVS: + case kEfiPalCode: + + if (!num_banks || (base != (1 + dram_ranges[num_banks - 1].last_page))) + { + num_banks++; + if (num_banks >= MAX_BANKS) + break; + dram_ranges[num_banks - 1].first_page = base; + } + dram_ranges[num_banks - 1].last_page = base + num - 1; + break; + + // runtime services will be restarted, so no save + case kEfiRuntimeServicesCode: + case kEfiRuntimeServicesData: + // non dram + case kEfiReservedMemoryType: + case kEfiUnusableMemory: + case kEfiMemoryMappedIO: + case kEfiMemoryMappedIOPortSpace: + default: + break; + } } - /* Size the hibernation bitmap */ + if (num_banks >= MAX_BANKS) + return (NULL); + + // size the hibernation bitmap + size = sizeof(hibernate_page_list_t); page_count = 0; - for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) { - pages = rp->end + 1 - rp->base; + for (bank = 0; bank < num_banks; bank++) { + pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page; page_count += pages; size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t); } @@ -115,62 +125,75 @@ hibernate_page_list_allocate(void) list->list_size = size; list->page_count = page_count; - list->bank_count = num_regions; + list->bank_count = num_banks; + + // convert to hibernation bitmap. - /* Convert to hibernation bitmap. */ - /* This assumes that ranges are in order and do not overlap. */ bitmap = &list->bank_bitmap[0]; - for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) { - bitmap->first_page = rp->base; - bitmap->last_page = rp->end; + for (bank = 0; bank < num_banks; bank++) + { + bitmap->first_page = dram_ranges[bank].first_page; + bitmap->last_page = dram_ranges[bank].last_page; bitmap->bitmapwords = (bitmap->last_page + 1 - bitmap->first_page + 31) >> 5; - kprintf("HIB: Bank %d: 0x%x end 0x%x\n", bank, - ptoa_32(bitmap->first_page), - ptoa_32(bitmap->last_page)); + kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", bank, + bitmap->first_page, + bitmap->last_page); bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; } - kfree((void *)regions, sizeof(pmap_memory_region_t) * num_alloc_regions); return (list); } +// mark pages not to be saved, but available for scratch usage during restore + +void +hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list, + __unused hibernate_page_list_t * page_list_wired, + __unused uint32_t * pagesOut) +{ +} + +// mark pages not to be saved and not for scratch usage during restore void -hibernate_page_list_setall_machine(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - uint32_t * pagesOut) +hibernate_page_list_set_volatile( hibernate_page_list_t * page_list, + hibernate_page_list_t * page_list_wired, + uint32_t * pagesOut) { - KernelBootArgs_t * bootArgs = (KernelBootArgs_t *)PE_state.bootArgs; - MemoryRange * mptr; - uint32_t bank; - uint32_t page, count; - - for (bank = 0, mptr = bootArgs->memoryMap; bank < bootArgs->memoryMapCount; bank++, mptr++) { - - if (kMemoryRangeNVS != mptr->type) continue; - kprintf("Base NVS region 0x%x + 0x%x\n", (vm_offset_t)mptr->base, (vm_size_t)mptr->length); - /* Round to page size. Hopefully this does not overlap any reserved areas. */ - page = atop_32(trunc_page((vm_offset_t)mptr->base)); - count = atop_32(round_page((vm_offset_t)mptr->base + (vm_size_t)mptr->length)) - page; - kprintf("Rounded NVS region 0x%x size 0x%x\n", page, count); - - hibernate_set_page_state(page_list, page_list_wired, page, count, 1); - pagesOut -= count; + boot_args * args = (boot_args *) PE_state.bootArgs; + + hibernate_set_page_state(page_list, page_list_wired, + I386_HIB_PAGETABLE, I386_HIB_PAGETABLE_COUNT, + kIOHibernatePageStateFree); + *pagesOut -= I386_HIB_PAGETABLE_COUNT; + + if (args->efiRuntimeServicesPageStart) + { + hibernate_set_page_state(page_list, page_list_wired, + args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount, + kIOHibernatePageStateFree); + *pagesOut -= args->efiRuntimeServicesPageCount; } } kern_return_t hibernate_processor_setup(IOHibernateImageHeader * header) { - current_cpu_datap()->cpu_hibernate = 1; + boot_args * args = (boot_args *) PE_state.bootArgs; + + cpu_datap(0)->cpu_hibernate = 1; header->processorFlags = 0; + + header->runtimePages = args->efiRuntimeServicesPageStart; + header->runtimePageCount = args->efiRuntimeServicesPageCount; + return (KERN_SUCCESS); } void hibernate_vm_lock(void) { - if (FALSE /* getPerProc()->hibernate */) + if (current_cpu_datap()->cpu_hibernate) { vm_page_lock_queues(); mutex_lock(&vm_page_queue_free_lock); @@ -180,7 +203,7 @@ hibernate_vm_lock(void) void hibernate_vm_unlock(void) { - if (FALSE /* getPerProc()->hibernate */) + if (current_cpu_datap()->cpu_hibernate) { mutex_unlock(&vm_page_queue_free_lock); vm_page_unlock_queues();