#include <i386/pmap.h>
#include <kern/cpu_data.h>
#include <IOKit/IOPlatformExpert.h>
-
-#include <pexpert/i386/efi.h>
+#define KERNEL
#include <IOKit/IOHibernatePrivate.h>
#include <vm/vm_page.h>
-#include "i386_lowmem.h"
-
-#define MAX_BANKS 32
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/* This assumes that
+ * - we never will want to read or write memory below the start of kernel text
+ * - kernel text and data isn't included in pmap memory regions
+ */
+
+extern void *sectTEXTB;
+extern char *first_avail;
+
hibernate_page_list_t *
hibernate_page_list_allocate(void)
{
- ppnum_t base, num;
+ vm_offset_t base;
vm_size_t size;
- uint32_t bank, num_banks;
+ uint32_t bank;
uint32_t pages, page_count;
hibernate_page_list_t * list;
hibernate_bitmap_t * bitmap;
+ pmap_memory_region_t * regions;
+ pmap_memory_region_t * rp;
+ uint32_t num_regions, num_alloc_regions;
+
+ page_count = 0;
- EfiMemoryRange * mptr;
- uint32_t mcount, msize, i;
- hibernate_bitmap_t dram_ranges[MAX_BANKS];
- boot_args * args = (boot_args *) PE_state.bootArgs;
+ /* Make a list of the maximum number of regions needed */
+ num_alloc_regions = 1 + pmap_memory_region_count;
- mptr = args->MemoryMap;
- if (args->MemoryMapDescriptorSize == 0)
- panic("Invalid memory map descriptor size");
- msize = args->MemoryMapDescriptorSize;
- mcount = args->MemoryMapSize / msize;
+ /* Allocate our own list of memory regions so we can sort them in order. */
+ regions = (pmap_memory_region_t *)kalloc(sizeof(pmap_memory_region_t) * num_alloc_regions);
+ if (!regions)
+ return (0);
- num_banks = 0;
- for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize))
- {
- base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
- num = (ppnum_t) mptr->NumberOfPages;
- if (!num)
- continue;
-
- switch (mptr->Type)
- {
- // any kind of dram
- case kEfiLoaderCode:
- case kEfiLoaderData:
- case kEfiBootServicesCode:
- case kEfiBootServicesData:
- case kEfiConventionalMemory:
- case kEfiACPIReclaimMemory:
- case kEfiACPIMemoryNVS:
- case kEfiPalCode:
-
- if (!num_banks || (base != (1 + dram_ranges[num_banks - 1].last_page)))
- {
- num_banks++;
- if (num_banks >= MAX_BANKS)
- break;
- dram_ranges[num_banks - 1].first_page = base;
- }
- dram_ranges[num_banks - 1].last_page = base + num - 1;
- break;
-
- // runtime services will be restarted, so no save
- case kEfiRuntimeServicesCode:
- case kEfiRuntimeServicesData:
- // non dram
- case kEfiReservedMemoryType:
- case kEfiUnusableMemory:
- case kEfiMemoryMappedIO:
- case kEfiMemoryMappedIOPortSpace:
- default:
- break;
- }
- }
+ /* Fill in the actual regions we will be returning. */
+ rp = regions;
- if (num_banks >= MAX_BANKS)
- return (NULL);
+ /* XXX should check for non-volatile memory region below kernel space. */
+ /* Kernel region is first. */
+ base = (vm_offset_t)(sectTEXTB) & 0x3FFFFFFF;
+ rp->base = atop_32(base);
+ rp->end = atop_32((vm_offset_t)first_avail) - 1;
+ rp->alloc = 0;
+ num_regions = 1;
- // size the hibernation bitmap
+ /* Remaining memory regions. Consolidate adjacent regions. */
+ for (bank = 0; bank < (uint32_t) pmap_memory_region_count; bank++)
+ {
+ if ((rp->end + 1) == pmap_memory_regions[bank].base) {
+ rp->end = pmap_memory_regions[bank].end;
+ } else {
+ ++rp;
+ ++num_regions;
+ rp->base = pmap_memory_regions[bank].base;
+ rp->end = pmap_memory_regions[bank].end;
+ rp->alloc = 0;
+ }
+ }
+ /* Size the hibernation bitmap */
size = sizeof(hibernate_page_list_t);
page_count = 0;
- for (bank = 0; bank < num_banks; bank++) {
- pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
+ for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) {
+ pages = rp->end + 1 - rp->base;
page_count += pages;
size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
}
list->list_size = size;
list->page_count = page_count;
- list->bank_count = num_banks;
-
- // convert to hibernation bitmap.
+ list->bank_count = num_regions;
+ /* Convert to hibernation bitmap. */
+ /* This assumes that ranges are in order and do not overlap. */
bitmap = &list->bank_bitmap[0];
- for (bank = 0; bank < num_banks; bank++)
- {
- bitmap->first_page = dram_ranges[bank].first_page;
- bitmap->last_page = dram_ranges[bank].last_page;
+ for (bank = 0, rp = regions; bank < num_regions; bank++, rp++) {
+ bitmap->first_page = rp->base;
+ bitmap->last_page = rp->end;
bitmap->bitmapwords = (bitmap->last_page + 1
- bitmap->first_page + 31) >> 5;
- kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", bank,
- bitmap->first_page,
- bitmap->last_page);
+ kprintf("HIB: Bank %d: 0x%x end 0x%x\n", bank,
+ ptoa_32(bitmap->first_page),
+ ptoa_32(bitmap->last_page));
bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
}
+ kfree((void *)regions, sizeof(pmap_memory_region_t) * num_alloc_regions);
return (list);
}
-// mark pages not to be saved, but available for scratch usage during restore
-
-void
-hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list,
- __unused hibernate_page_list_t * page_list_wired,
- __unused uint32_t * pagesOut)
-{
-}
-
-// mark pages not to be saved and not for scratch usage during restore
void
-hibernate_page_list_set_volatile( hibernate_page_list_t * page_list,
- hibernate_page_list_t * page_list_wired,
- uint32_t * pagesOut)
+hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
+ hibernate_page_list_t * page_list_wired,
+ uint32_t * pagesOut)
{
- boot_args * args = (boot_args *) PE_state.bootArgs;
-
- hibernate_set_page_state(page_list, page_list_wired,
- I386_HIB_PAGETABLE, I386_HIB_PAGETABLE_COUNT,
- kIOHibernatePageStateFree);
- *pagesOut -= I386_HIB_PAGETABLE_COUNT;
-
- if (args->efiRuntimeServicesPageStart)
- {
- hibernate_set_page_state(page_list, page_list_wired,
- args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount,
- kIOHibernatePageStateFree);
- *pagesOut -= args->efiRuntimeServicesPageCount;
+ KernelBootArgs_t * bootArgs = (KernelBootArgs_t *)PE_state.bootArgs;
+ MemoryRange * mptr;
+ uint32_t bank;
+ uint32_t page, count;
+
+ for (bank = 0, mptr = bootArgs->memoryMap; bank < bootArgs->memoryMapCount; bank++, mptr++) {
+
+ if (kMemoryRangeNVS != mptr->type) continue;
+ kprintf("Base NVS region 0x%x + 0x%x\n", (vm_offset_t)mptr->base, (vm_size_t)mptr->length);
+ /* Round to page size. Hopefully this does not overlap any reserved areas. */
+ page = atop_32(trunc_page((vm_offset_t)mptr->base));
+ count = atop_32(round_page((vm_offset_t)mptr->base + (vm_size_t)mptr->length)) - page;
+ kprintf("Rounded NVS region 0x%x size 0x%x\n", page, count);
+
+ hibernate_set_page_state(page_list, page_list_wired, page, count, 1);
+ pagesOut -= count;
}
}
kern_return_t
hibernate_processor_setup(IOHibernateImageHeader * header)
{
- boot_args * args = (boot_args *) PE_state.bootArgs;
-
- cpu_datap(0)->cpu_hibernate = 1;
+ current_cpu_datap()->cpu_hibernate = 1;
header->processorFlags = 0;
-
- header->runtimePages = args->efiRuntimeServicesPageStart;
- header->runtimePageCount = args->efiRuntimeServicesPageCount;
-
return (KERN_SUCCESS);
}
void
hibernate_vm_lock(void)
{
- if (current_cpu_datap()->cpu_hibernate)
+ if (FALSE /* getPerProc()->hibernate */)
{
vm_page_lock_queues();
mutex_lock(&vm_page_queue_free_lock);
void
hibernate_vm_unlock(void)
{
- if (current_cpu_datap()->cpu_hibernate)
+ if (FALSE /* getPerProc()->hibernate */)
{
mutex_unlock(&vm_page_queue_free_lock);
vm_page_unlock_queues();