]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_i386.c
0e2f593f7d1c5abd54591441f4e6181995ebb6e9
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/machine.h>
25 #include <kern/misc_protos.h>
26 #include <kern/thread.h>
27 #include <kern/processor.h>
28 #include <kern/kalloc.h>
29 #include <mach/machine.h>
30 #include <mach/processor_info.h>
31 #include <mach/mach_types.h>
32 #include <i386/pmap.h>
33 #include <kern/cpu_data.h>
34 #include <IOKit/IOPlatformExpert.h>
37 #include <IOKit/IOHibernatePrivate.h>
38 #include <vm/vm_page.h>
40 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
43 * - we never will want to read or write memory below the start of kernel text
44 * - kernel text and data isn't included in pmap memory regions
47 extern void *sectTEXTB
;
48 extern char *first_avail
;
50 hibernate_page_list_t
*
51 hibernate_page_list_allocate(void)
56 uint32_t pages
, page_count
;
57 hibernate_page_list_t
* list
;
58 hibernate_bitmap_t
* bitmap
;
59 pmap_memory_region_t
* regions
;
60 pmap_memory_region_t
* rp
;
61 uint32_t num_regions
, num_alloc_regions
;
65 /* Make a list of the maximum number of regions needed */
66 num_alloc_regions
= 1 + pmap_memory_region_count
;
68 /* Allocate our own list of memory regions so we can sort them in order. */
69 regions
= (pmap_memory_region_t
*)kalloc(sizeof(pmap_memory_region_t
) * num_alloc_regions
);
73 /* Fill in the actual regions we will be returning. */
76 /* XXX should check for non-volatile memory region below kernel space. */
77 /* Kernel region is first. */
78 base
= (vm_offset_t
)(sectTEXTB
) & 0x3FFFFFFF;
79 rp
->base
= atop_32(base
);
80 rp
->end
= atop_32((vm_offset_t
)first_avail
) - 1;
84 /* Remaining memory regions. Consolidate adjacent regions. */
85 for (bank
= 0; bank
< (uint32_t) pmap_memory_region_count
; bank
++)
87 if ((rp
->end
+ 1) == pmap_memory_regions
[bank
].base
) {
88 rp
->end
= pmap_memory_regions
[bank
].end
;
92 rp
->base
= pmap_memory_regions
[bank
].base
;
93 rp
->end
= pmap_memory_regions
[bank
].end
;
98 /* Size the hibernation bitmap */
99 size
= sizeof(hibernate_page_list_t
);
101 for (bank
= 0, rp
= regions
; bank
< num_regions
; bank
++, rp
++) {
102 pages
= rp
->end
+ 1 - rp
->base
;
104 size
+= sizeof(hibernate_bitmap_t
) + ((pages
+ 31) >> 5) * sizeof(uint32_t);
107 list
= (hibernate_page_list_t
*)kalloc(size
);
111 list
->list_size
= size
;
112 list
->page_count
= page_count
;
113 list
->bank_count
= num_regions
;
115 /* Convert to hibernation bitmap. */
116 /* This assumes that ranges are in order and do not overlap. */
117 bitmap
= &list
->bank_bitmap
[0];
118 for (bank
= 0, rp
= regions
; bank
< num_regions
; bank
++, rp
++) {
119 bitmap
->first_page
= rp
->base
;
120 bitmap
->last_page
= rp
->end
;
121 bitmap
->bitmapwords
= (bitmap
->last_page
+ 1
122 - bitmap
->first_page
+ 31) >> 5;
123 kprintf("HIB: Bank %d: 0x%x end 0x%x\n", bank
,
124 ptoa_32(bitmap
->first_page
),
125 ptoa_32(bitmap
->last_page
));
126 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
129 kfree((void *)regions
, sizeof(pmap_memory_region_t
) * num_alloc_regions
);
134 hibernate_page_list_setall_machine(hibernate_page_list_t
* page_list
,
135 hibernate_page_list_t
* page_list_wired
,
138 KernelBootArgs_t
* bootArgs
= (KernelBootArgs_t
*)PE_state
.bootArgs
;
141 uint32_t page
, count
;
143 for (bank
= 0, mptr
= bootArgs
->memoryMap
; bank
< bootArgs
->memoryMapCount
; bank
++, mptr
++) {
145 if (kMemoryRangeNVS
!= mptr
->type
) continue;
146 kprintf("Base NVS region 0x%x + 0x%x\n", (vm_offset_t
)mptr
->base
, (vm_size_t
)mptr
->length
);
147 /* Round to page size. Hopefully this does not overlap any reserved areas. */
148 page
= atop_32(trunc_page((vm_offset_t
)mptr
->base
));
149 count
= atop_32(round_page((vm_offset_t
)mptr
->base
+ (vm_size_t
)mptr
->length
)) - page
;
150 kprintf("Rounded NVS region 0x%x size 0x%x\n", page
, count
);
152 hibernate_set_page_state(page_list
, page_list_wired
, page
, count
, 1);
158 hibernate_processor_setup(IOHibernateImageHeader
* header
)
160 current_cpu_datap()->cpu_hibernate
= 1;
161 header
->processorFlags
= 0;
162 return (KERN_SUCCESS
);
166 hibernate_vm_lock(void)
168 if (FALSE
/* getPerProc()->hibernate */)
170 vm_page_lock_queues();
171 mutex_lock(&vm_page_queue_free_lock
);
176 hibernate_vm_unlock(void)
178 if (FALSE
/* getPerProc()->hibernate */)
180 mutex_unlock(&vm_page_queue_free_lock
);
181 vm_page_unlock_queues();