]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_i386.c
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/machine.h>
24 #include <kern/misc_protos.h>
25 #include <kern/thread.h>
26 #include <kern/processor.h>
27 #include <kern/kalloc.h>
28 #include <mach/machine.h>
29 #include <mach/processor_info.h>
30 #include <mach/mach_types.h>
31 #include <i386/pmap.h>
32 #include <kern/cpu_data.h>
33 #include <IOKit/IOPlatformExpert.h>
36 #include <IOKit/IOHibernatePrivate.h>
37 #include <vm/vm_page.h>
39 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
42 * - we never will want to read or write memory below the start of kernel text
43 * - kernel text and data isn't included in pmap memory regions
46 extern void *sectTEXTB
;
47 extern char *first_avail
;
49 hibernate_page_list_t
*
50 hibernate_page_list_allocate(void)
55 uint32_t pages
, page_count
;
56 hibernate_page_list_t
* list
;
57 hibernate_bitmap_t
* bitmap
;
58 pmap_memory_region_t
* regions
;
59 pmap_memory_region_t
* rp
;
60 uint32_t num_regions
, num_alloc_regions
;
64 /* Make a list of the maximum number of regions needed */
65 num_alloc_regions
= 1 + pmap_memory_region_count
;
67 /* Allocate our own list of memory regions so we can sort them in order. */
68 regions
= (pmap_memory_region_t
*)kalloc(sizeof(pmap_memory_region_t
) * num_alloc_regions
);
72 /* Fill in the actual regions we will be returning. */
75 /* XXX should check for non-volatile memory region below kernel space. */
76 /* Kernel region is first. */
77 base
= (vm_offset_t
)(sectTEXTB
) & 0x3FFFFFFF;
78 rp
->base
= atop_32(base
);
79 rp
->end
= atop_32((vm_offset_t
)first_avail
) - 1;
83 /* Remaining memory regions. Consolidate adjacent regions. */
84 for (bank
= 0; bank
< (uint32_t) pmap_memory_region_count
; bank
++)
86 if ((rp
->end
+ 1) == pmap_memory_regions
[bank
].base
) {
87 rp
->end
= pmap_memory_regions
[bank
].end
;
91 rp
->base
= pmap_memory_regions
[bank
].base
;
92 rp
->end
= pmap_memory_regions
[bank
].end
;
97 /* Size the hibernation bitmap */
98 size
= sizeof(hibernate_page_list_t
);
100 for (bank
= 0, rp
= regions
; bank
< num_regions
; bank
++, rp
++) {
101 pages
= rp
->end
+ 1 - rp
->base
;
103 size
+= sizeof(hibernate_bitmap_t
) + ((pages
+ 31) >> 5) * sizeof(uint32_t);
106 list
= (hibernate_page_list_t
*)kalloc(size
);
110 list
->list_size
= size
;
111 list
->page_count
= page_count
;
112 list
->bank_count
= num_regions
;
114 /* Convert to hibernation bitmap. */
115 /* This assumes that ranges are in order and do not overlap. */
116 bitmap
= &list
->bank_bitmap
[0];
117 for (bank
= 0, rp
= regions
; bank
< num_regions
; bank
++, rp
++) {
118 bitmap
->first_page
= rp
->base
;
119 bitmap
->last_page
= rp
->end
;
120 bitmap
->bitmapwords
= (bitmap
->last_page
+ 1
121 - bitmap
->first_page
+ 31) >> 5;
122 kprintf("HIB: Bank %d: 0x%x end 0x%x\n", bank
,
123 ptoa_32(bitmap
->first_page
),
124 ptoa_32(bitmap
->last_page
));
125 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
128 kfree((void *)regions
, sizeof(pmap_memory_region_t
) * num_alloc_regions
);
133 hibernate_page_list_setall_machine(hibernate_page_list_t
* page_list
,
134 hibernate_page_list_t
* page_list_wired
,
137 KernelBootArgs_t
* bootArgs
= (KernelBootArgs_t
*)PE_state
.bootArgs
;
140 uint32_t page
, count
;
142 for (bank
= 0, mptr
= bootArgs
->memoryMap
; bank
< bootArgs
->memoryMapCount
; bank
++, mptr
++) {
144 if (kMemoryRangeNVS
!= mptr
->type
) continue;
145 kprintf("Base NVS region 0x%x + 0x%x\n", (vm_offset_t
)mptr
->base
, (vm_size_t
)mptr
->length
);
146 /* Round to page size. Hopefully this does not overlap any reserved areas. */
147 page
= atop_32(trunc_page((vm_offset_t
)mptr
->base
));
148 count
= atop_32(round_page((vm_offset_t
)mptr
->base
+ (vm_size_t
)mptr
->length
)) - page
;
149 kprintf("Rounded NVS region 0x%x size 0x%x\n", page
, count
);
151 hibernate_set_page_state(page_list
, page_list_wired
, page
, count
, 1);
157 hibernate_processor_setup(IOHibernateImageHeader
* header
)
159 current_cpu_datap()->cpu_hibernate
= 1;
160 header
->processorFlags
= 0;
161 return (KERN_SUCCESS
);
165 hibernate_vm_lock(void)
167 if (FALSE
/* getPerProc()->hibernate */)
169 vm_page_lock_queues();
170 mutex_lock(&vm_page_queue_free_lock
);
175 hibernate_vm_unlock(void)
177 if (FALSE
/* getPerProc()->hibernate */)
179 mutex_unlock(&vm_page_queue_free_lock
);
180 vm_page_unlock_queues();