2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * ARM64-specific functions required to support hibernation entry, and also to
30 * support hibernation exit after wired pages have already been restored.
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
53 #if HIBERNATE_HMAC_IMAGE
54 #include <arm64/hibernate_ppl_hmac.h>
55 #include <arm64/ppl/ppl_hib.h>
56 #endif /* HIBERNATE_HMAC_IMAGE */
59 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
62 pal_hib_teardown_pmap_structs(__unused addr64_t
*unneeded_start
, __unused addr64_t
*unneeded_end
)
67 pal_hib_rebuild_pmap_structs(void)
72 set_dram_range(hibernate_bitmap_t
*range
, uint64_t start_addr
, uint64_t size
)
74 uint64_t first_page
= atop_64(start_addr
);
75 uint64_t page_count
= atop_64(size
);
76 uint64_t last_page
= first_page
+ page_count
- 1;
78 range
->first_page
= (uint32_t)first_page
;
79 assert(range
->first_page
== first_page
); // make sure the truncation wasn't lossy
81 range
->last_page
= (uint32_t)last_page
;
82 assert(range
->last_page
== last_page
); // make sure the truncation wasn't lossy
85 // Comparison function used to sort the DRAM ranges list.
87 dram_range_compare(const void *a
, const void *b
)
89 return ((const hibernate_bitmap_t
*)a
)->first_page
- ((const hibernate_bitmap_t
*)b
)->first_page
;
92 hibernate_page_list_t
*
93 hibernate_page_list_allocate(boolean_t log
)
97 uint32_t pages
, page_count
;
98 hibernate_page_list_t
* list
;
99 hibernate_bitmap_t
* bitmap
;
101 #if HIBERNATE_HMAC_IMAGE
102 // Determine if any PPL-owned I/O ranges need to be hibernated, and if so,
103 // allocate bitmaps to represent those pages.
104 const ppl_hib_io_range
*io_ranges
= NULL
;
105 uint16_t num_io_ranges
= 0;
106 hibernate_bitmap_t
* dram_ranges
= NULL
;
107 uint32_t num_banks
= 1;
109 ppl_hmac_get_io_ranges(&io_ranges
, &num_io_ranges
);
111 // Allocate a single DRAM range to cover kernel-managed memory and one range
112 // per PPL-owned I/O range that needs to be hibernated.
113 if (io_ranges
!= NULL
&& num_io_ranges
> 0) {
114 num_banks
+= num_io_ranges
;
117 dram_ranges
= kheap_alloc(KHEAP_TEMP
,
118 num_banks
* sizeof(hibernate_bitmap_t
), Z_WAITOK
);
123 // The 0th dram range is used to represent kernel-managed memory, so skip it
124 // when adding I/O ranges.
125 for (unsigned int i
= 1; i
< num_banks
; ++i
) {
126 dram_ranges
[i
].first_page
= io_ranges
[i
- 1].first_page
;
127 dram_ranges
[i
].last_page
= (io_ranges
[i
- 1].first_page
+ io_ranges
[i
- 1].page_count
) - 1;
130 // Allocate a single DRAM range to cover the kernel-managed memory.
131 hibernate_bitmap_t dram_ranges
[1];
132 uint32_t num_banks
= sizeof(dram_ranges
) / sizeof(dram_ranges
[0]);
133 #endif /* HIBERNATE_HMAC_IMAGE */
135 // All of kernel-managed memory can be described by one DRAM range
136 set_dram_range(&dram_ranges
[0], gPhysBase
, gPhysSize
);
138 // Sort the DRAM ranges based on the first page. Other parts of the hibernation
139 // flow expect these ranges to be in order.
140 qsort((void*)dram_ranges
, num_banks
, sizeof(dram_ranges
[0]), dram_range_compare
);
142 // size the hibernation bitmap
144 size
= sizeof(hibernate_page_list_t
);
146 for (bank
= 0; bank
< num_banks
; bank
++) {
147 pages
= dram_ranges
[bank
].last_page
+ 1 - dram_ranges
[bank
].first_page
;
149 size
+= sizeof(hibernate_bitmap_t
) + ((pages
+ 31) >> 5) * sizeof(uint32_t);
152 list
= (hibernate_page_list_t
*)kalloc(size
);
157 list
->list_size
= (uint32_t)size
;
158 list
->page_count
= page_count
;
159 list
->bank_count
= num_banks
;
161 // convert to hibernation bitmap.
163 bitmap
= &list
->bank_bitmap
[0];
164 for (bank
= 0; bank
< num_banks
; bank
++) {
165 bitmap
->first_page
= dram_ranges
[bank
].first_page
;
166 bitmap
->last_page
= dram_ranges
[bank
].last_page
;
167 bitmap
->bitmapwords
= (bitmap
->last_page
+ 1
168 - bitmap
->first_page
+ 31) >> 5;
170 HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
172 ptoa_64(bitmap
->first_page
), bitmap
->first_page
,
173 ptoa_64(bitmap
->last_page
), bitmap
->last_page
);
175 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
179 #if HIBERNATE_HMAC_IMAGE
180 kheap_free(KHEAP_TEMP
, dram_ranges
,
181 num_banks
* sizeof(hibernate_bitmap_t
));
182 #endif /* HIBERNATE_HMAC_IMAGE */
188 pal_hib_get_stack_pages(vm_offset_t
*first_page
, vm_offset_t
*page_count
)
190 vm_offset_t stack_end
= BootCpuData
.intstack_top
;
191 vm_offset_t stack_begin
= stack_end
- INTSTACK_SIZE
;
192 *first_page
= atop_64(kvtophys(stack_begin
));
193 *page_count
= atop_64(round_page(stack_end
) - trunc_page(stack_begin
));
196 // mark pages not to be saved, but available for scratch usage during restore
198 hibernate_page_list_setall_machine(hibernate_page_list_t
* page_list
,
199 hibernate_page_list_t
* page_list_wired
,
203 vm_offset_t stack_first_page
, stack_page_count
;
204 pal_hib_get_stack_pages(&stack_first_page
, &stack_page_count
);
206 extern pmap_paddr_t pmap_stacks_start_pa
, pmap_stacks_end_pa
;
207 vm_offset_t pmap_stack_page_count
= atop_64(pmap_stacks_end_pa
- pmap_stacks_start_pa
);
210 // mark the stack as unavailable for clobbering during restore;
211 // we won't actually save it because we mark these pages as free
212 // in hibernate_page_list_set_volatile
213 hibernate_set_page_state(page_list
, page_list_wired
,
214 stack_first_page
, stack_page_count
,
215 kIOHibernatePageStateWiredSave
);
217 // Mark the PPL stack as not needing to be saved. Any PPL memory that is
218 // excluded from the image will need to be explicitly checked for in
219 // pmap_check_ppl_hashed_flag_all(). That function ensures that all
220 // PPL pages are contained within the image (so any memory explicitly
221 // not being saved, needs to be removed from the check).
222 hibernate_set_page_state(page_list
, page_list_wired
,
223 atop_64(pmap_stacks_start_pa
), pmap_stack_page_count
,
224 kIOHibernatePageStateFree
);
226 *pagesOut
+= stack_page_count
;
227 *pagesOut
-= pmap_stack_page_count
;
230 // mark pages not to be saved and not for scratch usage during restore
232 hibernate_page_list_set_volatile(hibernate_page_list_t
* page_list
,
233 hibernate_page_list_t
* page_list_wired
,
236 vm_offset_t page
, count
;
238 // hibernation restore runs on the interrupt stack,
239 // so we need to make sure we don't save it
240 pal_hib_get_stack_pages(&page
, &count
);
241 hibernate_set_page_state(page_list
, page_list_wired
,
243 kIOHibernatePageStateFree
);
248 hibernate_processor_setup(IOHibernateImageHeader
* header
)
250 cpu_datap(master_cpu
)->cpu_hibernate
= 1;
251 header
->processorFlags
= 0;
255 static boolean_t hibernate_vm_locks_safe
;
258 hibernate_vm_lock(void)
260 if (kIOHibernateStateHibernating
== gIOHibernateState
) {
261 hibernate_vm_lock_queues();
262 hibernate_vm_locks_safe
= TRUE
;
267 hibernate_vm_unlock(void)
269 assert(FALSE
== ml_get_interrupts_enabled());
270 if (kIOHibernateStateHibernating
== gIOHibernateState
) {
271 hibernate_vm_unlock_queues();
273 ml_set_is_quiescing(TRUE
);
276 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
277 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
278 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
281 hibernate_vm_lock_end(void)
283 assert(FALSE
== ml_get_interrupts_enabled());
284 hibernate_vm_locks_safe
= FALSE
;
288 hibernate_vm_locks_are_safe(void)
290 assert(FALSE
== ml_get_interrupts_enabled());
291 return hibernate_vm_locks_safe
;
297 #if HIBERNATE_HMAC_IMAGE
298 gHibernateGlobals
.hmacRegBase
= ppl_hmac_get_reg_base();
299 #endif /* HIBERNATE_HMAC_IMAGE */
303 pal_hib_write_hook(void)