]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/hibernate_arm64.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / hibernate_arm64.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*!
29 * ARM64-specific functions required to support hibernation entry, and also to
30 * support hibernation exit after wired pages have already been restored.
31 */
32
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
45
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
52
53
54 extern void
55 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
56
57 void
58 pal_hib_teardown_pmap_structs(__unused addr64_t *unneeded_start, __unused addr64_t *unneeded_end)
59 {
60 }
61
62 void
63 pal_hib_rebuild_pmap_structs(void)
64 {
65 }
66
67 static void
68 set_dram_range(hibernate_bitmap_t *range, uint64_t start_addr, uint64_t size)
69 {
70 uint64_t first_page = atop_64(start_addr);
71 uint64_t page_count = atop_64(size);
72 uint64_t last_page = first_page + page_count - 1;
73
74 range->first_page = (uint32_t)first_page;
75 assert(range->first_page == first_page); // make sure the truncation wasn't lossy
76
77 range->last_page = (uint32_t)last_page;
78 assert(range->last_page == last_page); // make sure the truncation wasn't lossy
79 }
80
81 // Comparison function used to sort the DRAM ranges list.
82 static int
83 dram_range_compare(const void *a, const void *b)
84 {
85 return ((const hibernate_bitmap_t *)a)->first_page - ((const hibernate_bitmap_t *)b)->first_page;
86 }
87
88 hibernate_page_list_t *
89 hibernate_page_list_allocate(boolean_t log)
90 {
91 vm_size_t size;
92 uint32_t bank;
93 uint32_t pages, page_count;
94 hibernate_page_list_t * list;
95 hibernate_bitmap_t * bitmap;
96
97 // Allocate a single DRAM range to cover the kernel-managed memory.
98 hibernate_bitmap_t dram_ranges[1];
99 uint32_t num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]);
100
101 // All of kernel-managed memory can be described by one DRAM range
102 set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize);
103
104 // Sort the DRAM ranges based on the first page. Other parts of the hibernation
105 // flow expect these ranges to be in order.
106 qsort((void*)dram_ranges, num_banks, sizeof(dram_ranges[0]), dram_range_compare);
107
108 // size the hibernation bitmap
109
110 size = sizeof(hibernate_page_list_t);
111 page_count = 0;
112 for (bank = 0; bank < num_banks; bank++) {
113 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
114 page_count += pages;
115 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
116 }
117
118 list = (hibernate_page_list_t *)kalloc(size);
119 if (!list) {
120 goto out;
121 }
122
123 list->list_size = (uint32_t)size;
124 list->page_count = page_count;
125 list->bank_count = num_banks;
126
127 // convert to hibernation bitmap.
128
129 bitmap = &list->bank_bitmap[0];
130 for (bank = 0; bank < num_banks; bank++) {
131 bitmap->first_page = dram_ranges[bank].first_page;
132 bitmap->last_page = dram_ranges[bank].last_page;
133 bitmap->bitmapwords = (bitmap->last_page + 1
134 - bitmap->first_page + 31) >> 5;
135 if (log) {
136 HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
137 bank,
138 ptoa_64(bitmap->first_page), bitmap->first_page,
139 ptoa_64(bitmap->last_page), bitmap->last_page);
140 }
141 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
142 }
143
144 out:
145
146 return list;
147 }
148
149 void
150 pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count)
151 {
152 vm_offset_t stack_end = BootCpuData.intstack_top;
153 vm_offset_t stack_begin = stack_end - INTSTACK_SIZE;
154 *first_page = atop_64(kvtophys(stack_begin));
155 *page_count = atop_64(round_page(stack_end) - trunc_page(stack_begin));
156 }
157
158 // mark pages not to be saved, but available for scratch usage during restore
159 void
160 hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
161 hibernate_page_list_t * page_list_wired,
162 boolean_t preflight,
163 uint32_t * pagesOut)
164 {
165 vm_offset_t stack_first_page, stack_page_count;
166 pal_hib_get_stack_pages(&stack_first_page, &stack_page_count);
167
168 extern pmap_paddr_t pmap_stacks_start_pa, pmap_stacks_end_pa;
169 vm_offset_t pmap_stack_page_count = atop_64(pmap_stacks_end_pa - pmap_stacks_start_pa);
170
171 if (!preflight) {
172 // mark the stack as unavailable for clobbering during restore;
173 // we won't actually save it because we mark these pages as free
174 // in hibernate_page_list_set_volatile
175 hibernate_set_page_state(page_list, page_list_wired,
176 stack_first_page, stack_page_count,
177 kIOHibernatePageStateWiredSave);
178
179 // Mark the PPL stack as not needing to be saved. Any PPL memory that is
180 // excluded from the image will need to be explicitly checked for in
181 // pmap_check_ppl_hashed_flag_all(). That function ensures that all
182 // PPL pages are contained within the image (so any memory explicitly
183 // not being saved, needs to be removed from the check).
184 hibernate_set_page_state(page_list, page_list_wired,
185 atop_64(pmap_stacks_start_pa), pmap_stack_page_count,
186 kIOHibernatePageStateFree);
187 }
188 *pagesOut += stack_page_count;
189 *pagesOut -= pmap_stack_page_count;
190 }
191
192 // mark pages not to be saved and not for scratch usage during restore
193 void
194 hibernate_page_list_set_volatile(hibernate_page_list_t * page_list,
195 hibernate_page_list_t * page_list_wired,
196 uint32_t * pagesOut)
197 {
198 vm_offset_t page, count;
199
200 // hibernation restore runs on the interrupt stack,
201 // so we need to make sure we don't save it
202 pal_hib_get_stack_pages(&page, &count);
203 hibernate_set_page_state(page_list, page_list_wired,
204 page, count,
205 kIOHibernatePageStateFree);
206 *pagesOut -= count;
207 }
208
209 kern_return_t
210 hibernate_processor_setup(IOHibernateImageHeader * header)
211 {
212 cpu_datap(master_cpu)->cpu_hibernate = 1;
213 header->processorFlags = 0;
214 return KERN_SUCCESS;
215 }
216
217 static boolean_t hibernate_vm_locks_safe;
218
219 void
220 hibernate_vm_lock(void)
221 {
222 if (kIOHibernateStateHibernating == gIOHibernateState) {
223 hibernate_vm_lock_queues();
224 hibernate_vm_locks_safe = TRUE;
225 }
226 }
227
228 void
229 hibernate_vm_unlock(void)
230 {
231 assert(FALSE == ml_get_interrupts_enabled());
232 if (kIOHibernateStateHibernating == gIOHibernateState) {
233 hibernate_vm_unlock_queues();
234 }
235 ml_set_is_quiescing(TRUE);
236 }
237
238 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
239 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
240 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
241
242 void
243 hibernate_vm_lock_end(void)
244 {
245 assert(FALSE == ml_get_interrupts_enabled());
246 hibernate_vm_locks_safe = FALSE;
247 }
248
249 boolean_t
250 hibernate_vm_locks_are_safe(void)
251 {
252 assert(FALSE == ml_get_interrupts_enabled());
253 return hibernate_vm_locks_safe;
254 }
255
256 void
257 pal_hib_init(void)
258 {
259 gHibernateGlobals.kernelSlide = gVirtBase - gPhysBase;
260 }
261
262 void
263 pal_hib_write_hook(void)
264 {
265 }