]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/hibernate_arm64.c
5a3e356b4c11ca442a2483b9d61541219b51f6bb
[apple/xnu.git] / osfmk / arm64 / hibernate_arm64.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*!
29 * ARM64-specific functions required to support hibernation entry, and also to
30 * support hibernation exit after wired pages have already been restored.
31 */
32
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
45
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
52
53 #if HIBERNATE_HMAC_IMAGE
54 #include <arm64/hibernate_ppl_hmac.h>
55 #include <arm64/ppl/ppl_hib.h>
56 #endif /* HIBERNATE_HMAC_IMAGE */
57
58 extern void
59 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
60
61 void
62 pal_hib_teardown_pmap_structs(__unused addr64_t *unneeded_start, __unused addr64_t *unneeded_end)
63 {
64 }
65
66 void
67 pal_hib_rebuild_pmap_structs(void)
68 {
69 }
70
71 static void
72 set_dram_range(hibernate_bitmap_t *range, uint64_t start_addr, uint64_t size)
73 {
74 uint64_t first_page = atop_64(start_addr);
75 uint64_t page_count = atop_64(size);
76 uint64_t last_page = first_page + page_count - 1;
77
78 range->first_page = (uint32_t)first_page;
79 assert(range->first_page == first_page); // make sure the truncation wasn't lossy
80
81 range->last_page = (uint32_t)last_page;
82 assert(range->last_page == last_page); // make sure the truncation wasn't lossy
83 }
84
85 // Comparison function used to sort the DRAM ranges list.
86 static int
87 dram_range_compare(const void *a, const void *b)
88 {
89 return ((const hibernate_bitmap_t *)a)->first_page - ((const hibernate_bitmap_t *)b)->first_page;
90 }
91
92 hibernate_page_list_t *
93 hibernate_page_list_allocate(boolean_t log)
94 {
95 vm_size_t size;
96 uint32_t bank;
97 uint32_t pages, page_count;
98 hibernate_page_list_t * list;
99 hibernate_bitmap_t * bitmap;
100
101 #if HIBERNATE_HMAC_IMAGE
102 // Determine if any PPL-owned I/O ranges need to be hibernated, and if so,
103 // allocate bitmaps to represent those pages.
104 const ppl_hib_io_range *io_ranges = NULL;
105 uint16_t num_io_ranges = 0;
106 hibernate_bitmap_t * dram_ranges = NULL;
107 uint32_t num_banks = 1;
108
109 ppl_hmac_get_io_ranges(&io_ranges, &num_io_ranges);
110
111 // Allocate a single DRAM range to cover kernel-managed memory and one range
112 // per PPL-owned I/O range that needs to be hibernated.
113 if (io_ranges != NULL && num_io_ranges > 0) {
114 num_banks += num_io_ranges;
115 }
116
117 dram_ranges = kheap_alloc(KHEAP_TEMP,
118 num_banks * sizeof(hibernate_bitmap_t), Z_WAITOK);
119 if (!dram_ranges) {
120 return NULL;
121 }
122
123 // The 0th dram range is used to represent kernel-managed memory, so skip it
124 // when adding I/O ranges.
125 for (unsigned int i = 1; i < num_banks; ++i) {
126 dram_ranges[i].first_page = io_ranges[i - 1].first_page;
127 dram_ranges[i].last_page = (io_ranges[i - 1].first_page + io_ranges[i - 1].page_count) - 1;
128 }
129 #else
130 // Allocate a single DRAM range to cover the kernel-managed memory.
131 hibernate_bitmap_t dram_ranges[1];
132 uint32_t num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]);
133 #endif /* HIBERNATE_HMAC_IMAGE */
134
135 // All of kernel-managed memory can be described by one DRAM range
136 set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize);
137
138 // Sort the DRAM ranges based on the first page. Other parts of the hibernation
139 // flow expect these ranges to be in order.
140 qsort((void*)dram_ranges, num_banks, sizeof(dram_ranges[0]), dram_range_compare);
141
142 // size the hibernation bitmap
143
144 size = sizeof(hibernate_page_list_t);
145 page_count = 0;
146 for (bank = 0; bank < num_banks; bank++) {
147 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
148 page_count += pages;
149 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
150 }
151
152 list = (hibernate_page_list_t *)kalloc(size);
153 if (!list) {
154 goto out;
155 }
156
157 list->list_size = (uint32_t)size;
158 list->page_count = page_count;
159 list->bank_count = num_banks;
160
161 // convert to hibernation bitmap.
162
163 bitmap = &list->bank_bitmap[0];
164 for (bank = 0; bank < num_banks; bank++) {
165 bitmap->first_page = dram_ranges[bank].first_page;
166 bitmap->last_page = dram_ranges[bank].last_page;
167 bitmap->bitmapwords = (bitmap->last_page + 1
168 - bitmap->first_page + 31) >> 5;
169 if (log) {
170 HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
171 bank,
172 ptoa_64(bitmap->first_page), bitmap->first_page,
173 ptoa_64(bitmap->last_page), bitmap->last_page);
174 }
175 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
176 }
177
178 out:
179 #if HIBERNATE_HMAC_IMAGE
180 kheap_free(KHEAP_TEMP, dram_ranges,
181 num_banks * sizeof(hibernate_bitmap_t));
182 #endif /* HIBERNATE_HMAC_IMAGE */
183
184 return list;
185 }
186
187 void
188 pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count)
189 {
190 vm_offset_t stack_end = BootCpuData.intstack_top;
191 vm_offset_t stack_begin = stack_end - INTSTACK_SIZE;
192 *first_page = atop_64(kvtophys(stack_begin));
193 *page_count = atop_64(round_page(stack_end) - trunc_page(stack_begin));
194 }
195
196 // mark pages not to be saved, but available for scratch usage during restore
197 void
198 hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
199 hibernate_page_list_t * page_list_wired,
200 boolean_t preflight,
201 uint32_t * pagesOut)
202 {
203 vm_offset_t stack_first_page, stack_page_count;
204 pal_hib_get_stack_pages(&stack_first_page, &stack_page_count);
205
206 extern pmap_paddr_t pmap_stacks_start_pa, pmap_stacks_end_pa;
207 vm_offset_t pmap_stack_page_count = atop_64(pmap_stacks_end_pa - pmap_stacks_start_pa);
208
209 if (!preflight) {
210 // mark the stack as unavailable for clobbering during restore;
211 // we won't actually save it because we mark these pages as free
212 // in hibernate_page_list_set_volatile
213 hibernate_set_page_state(page_list, page_list_wired,
214 stack_first_page, stack_page_count,
215 kIOHibernatePageStateWiredSave);
216
217 // Mark the PPL stack as not needing to be saved. Any PPL memory that is
218 // excluded from the image will need to be explicitly checked for in
219 // pmap_check_ppl_hashed_flag_all(). That function ensures that all
220 // PPL pages are contained within the image (so any memory explicitly
221 // not being saved, needs to be removed from the check).
222 hibernate_set_page_state(page_list, page_list_wired,
223 atop_64(pmap_stacks_start_pa), pmap_stack_page_count,
224 kIOHibernatePageStateFree);
225 }
226 *pagesOut += stack_page_count;
227 *pagesOut -= pmap_stack_page_count;
228 }
229
230 // mark pages not to be saved and not for scratch usage during restore
231 void
232 hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,
233 hibernate_page_list_t * page_list_wired,
234 uint32_t * pagesOut)
235 {
236 vm_offset_t page, count;
237
238 // hibernation restore runs on the interrupt stack,
239 // so we need to make sure we don't save it
240 pal_hib_get_stack_pages(&page, &count);
241 hibernate_set_page_state(page_list, page_list_wired,
242 page, count,
243 kIOHibernatePageStateFree);
244 *pagesOut -= count;
245 }
246
247 kern_return_t
248 hibernate_processor_setup(IOHibernateImageHeader * header)
249 {
250 cpu_datap(master_cpu)->cpu_hibernate = 1;
251 header->processorFlags = 0;
252 return KERN_SUCCESS;
253 }
254
255 static boolean_t hibernate_vm_locks_safe;
256
257 void
258 hibernate_vm_lock(void)
259 {
260 if (kIOHibernateStateHibernating == gIOHibernateState) {
261 hibernate_vm_lock_queues();
262 hibernate_vm_locks_safe = TRUE;
263 }
264 }
265
266 void
267 hibernate_vm_unlock(void)
268 {
269 assert(FALSE == ml_get_interrupts_enabled());
270 if (kIOHibernateStateHibernating == gIOHibernateState) {
271 hibernate_vm_unlock_queues();
272 }
273 ml_set_is_quiescing(TRUE);
274 }
275
276 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
277 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
278 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
279
280 void
281 hibernate_vm_lock_end(void)
282 {
283 assert(FALSE == ml_get_interrupts_enabled());
284 hibernate_vm_locks_safe = FALSE;
285 }
286
287 boolean_t
288 hibernate_vm_locks_are_safe(void)
289 {
290 assert(FALSE == ml_get_interrupts_enabled());
291 return hibernate_vm_locks_safe;
292 }
293
294 void
295 pal_hib_init(void)
296 {
297 #if HIBERNATE_HMAC_IMAGE
298 gHibernateGlobals.hmacRegBase = ppl_hmac_get_reg_base();
299 #endif /* HIBERNATE_HMAC_IMAGE */
300 }
301
302 void
303 pal_hib_write_hook(void)
304 {
305 }