2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * ARM64-specific functions required to support hibernation entry, and also to
30 * support hibernation exit after wired pages have already been restored.
33 #include <kern/machine.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/kalloc.h>
38 #include <mach/machine.h>
39 #include <mach/processor_info.h>
40 #include <mach/mach_types.h>
41 #include <kern/cpu_data.h>
42 #include <kern/startup.h>
43 #include <IOKit/IOPlatformExpert.h>
44 #include <pexpert/device_tree.h>
46 #include <IOKit/IOHibernatePrivate.h>
47 #include <vm/vm_page.h>
48 #include <san/kasan.h>
49 #include <arm/cpu_internal.h>
50 #include <arm/cpu_data_internal.h>
51 #include <machine/pal_hibernate.h>
55 qsort(void *a
, size_t n
, size_t es
, int (*cmp
)(const void *, const void *));
58 pal_hib_teardown_pmap_structs(__unused addr64_t
*unneeded_start
, __unused addr64_t
*unneeded_end
)
63 pal_hib_rebuild_pmap_structs(void)
68 set_dram_range(hibernate_bitmap_t
*range
, uint64_t start_addr
, uint64_t size
)
70 uint64_t first_page
= atop_64(start_addr
);
71 uint64_t page_count
= atop_64(size
);
72 uint64_t last_page
= first_page
+ page_count
- 1;
74 range
->first_page
= (uint32_t)first_page
;
75 assert(range
->first_page
== first_page
); // make sure the truncation wasn't lossy
77 range
->last_page
= (uint32_t)last_page
;
78 assert(range
->last_page
== last_page
); // make sure the truncation wasn't lossy
81 // Comparison function used to sort the DRAM ranges list.
83 dram_range_compare(const void *a
, const void *b
)
85 return ((const hibernate_bitmap_t
*)a
)->first_page
- ((const hibernate_bitmap_t
*)b
)->first_page
;
88 hibernate_page_list_t
*
89 hibernate_page_list_allocate(boolean_t log
)
93 uint32_t pages
, page_count
;
94 hibernate_page_list_t
* list
;
95 hibernate_bitmap_t
* bitmap
;
97 // Allocate a single DRAM range to cover the kernel-managed memory.
98 hibernate_bitmap_t dram_ranges
[1];
99 uint32_t num_banks
= sizeof(dram_ranges
) / sizeof(dram_ranges
[0]);
101 // All of kernel-managed memory can be described by one DRAM range
102 set_dram_range(&dram_ranges
[0], gPhysBase
, gPhysSize
);
104 // Sort the DRAM ranges based on the first page. Other parts of the hibernation
105 // flow expect these ranges to be in order.
106 qsort((void*)dram_ranges
, num_banks
, sizeof(dram_ranges
[0]), dram_range_compare
);
108 // size the hibernation bitmap
110 size
= sizeof(hibernate_page_list_t
);
112 for (bank
= 0; bank
< num_banks
; bank
++) {
113 pages
= dram_ranges
[bank
].last_page
+ 1 - dram_ranges
[bank
].first_page
;
115 size
+= sizeof(hibernate_bitmap_t
) + ((pages
+ 31) >> 5) * sizeof(uint32_t);
118 list
= (hibernate_page_list_t
*)kalloc(size
);
123 list
->list_size
= (uint32_t)size
;
124 list
->page_count
= page_count
;
125 list
->bank_count
= num_banks
;
127 // convert to hibernation bitmap.
129 bitmap
= &list
->bank_bitmap
[0];
130 for (bank
= 0; bank
< num_banks
; bank
++) {
131 bitmap
->first_page
= dram_ranges
[bank
].first_page
;
132 bitmap
->last_page
= dram_ranges
[bank
].last_page
;
133 bitmap
->bitmapwords
= (bitmap
->last_page
+ 1
134 - bitmap
->first_page
+ 31) >> 5;
136 HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n",
138 ptoa_64(bitmap
->first_page
), bitmap
->first_page
,
139 ptoa_64(bitmap
->last_page
), bitmap
->last_page
);
141 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
150 pal_hib_get_stack_pages(vm_offset_t
*first_page
, vm_offset_t
*page_count
)
152 vm_offset_t stack_end
= BootCpuData
.intstack_top
;
153 vm_offset_t stack_begin
= stack_end
- INTSTACK_SIZE
;
154 *first_page
= atop_64(kvtophys(stack_begin
));
155 *page_count
= atop_64(round_page(stack_end
) - trunc_page(stack_begin
));
158 // mark pages not to be saved, but available for scratch usage during restore
160 hibernate_page_list_setall_machine(hibernate_page_list_t
* page_list
,
161 hibernate_page_list_t
* page_list_wired
,
165 vm_offset_t stack_first_page
, stack_page_count
;
166 pal_hib_get_stack_pages(&stack_first_page
, &stack_page_count
);
168 extern pmap_paddr_t pmap_stacks_start_pa
, pmap_stacks_end_pa
;
169 vm_offset_t pmap_stack_page_count
= atop_64(pmap_stacks_end_pa
- pmap_stacks_start_pa
);
172 // mark the stack as unavailable for clobbering during restore;
173 // we won't actually save it because we mark these pages as free
174 // in hibernate_page_list_set_volatile
175 hibernate_set_page_state(page_list
, page_list_wired
,
176 stack_first_page
, stack_page_count
,
177 kIOHibernatePageStateWiredSave
);
179 // Mark the PPL stack as not needing to be saved. Any PPL memory that is
180 // excluded from the image will need to be explicitly checked for in
181 // pmap_check_ppl_hashed_flag_all(). That function ensures that all
182 // PPL pages are contained within the image (so any memory explicitly
183 // not being saved, needs to be removed from the check).
184 hibernate_set_page_state(page_list
, page_list_wired
,
185 atop_64(pmap_stacks_start_pa
), pmap_stack_page_count
,
186 kIOHibernatePageStateFree
);
188 *pagesOut
+= stack_page_count
;
189 *pagesOut
-= pmap_stack_page_count
;
192 // mark pages not to be saved and not for scratch usage during restore
194 hibernate_page_list_set_volatile(hibernate_page_list_t
* page_list
,
195 hibernate_page_list_t
* page_list_wired
,
198 vm_offset_t page
, count
;
200 // hibernation restore runs on the interrupt stack,
201 // so we need to make sure we don't save it
202 pal_hib_get_stack_pages(&page
, &count
);
203 hibernate_set_page_state(page_list
, page_list_wired
,
205 kIOHibernatePageStateFree
);
210 hibernate_processor_setup(IOHibernateImageHeader
* header
)
212 cpu_datap(master_cpu
)->cpu_hibernate
= 1;
213 header
->processorFlags
= 0;
217 static boolean_t hibernate_vm_locks_safe
;
220 hibernate_vm_lock(void)
222 if (kIOHibernateStateHibernating
== gIOHibernateState
) {
223 hibernate_vm_lock_queues();
224 hibernate_vm_locks_safe
= TRUE
;
229 hibernate_vm_unlock(void)
231 assert(FALSE
== ml_get_interrupts_enabled());
232 if (kIOHibernateStateHibernating
== gIOHibernateState
) {
233 hibernate_vm_unlock_queues();
235 ml_set_is_quiescing(TRUE
);
238 // processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled.
239 // ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled.
240 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
243 hibernate_vm_lock_end(void)
245 assert(FALSE
== ml_get_interrupts_enabled());
246 hibernate_vm_locks_safe
= FALSE
;
250 hibernate_vm_locks_are_safe(void)
252 assert(FALSE
== ml_get_interrupts_enabled());
253 return hibernate_vm_locks_safe
;
259 gHibernateGlobals
.kernelSlide
= gVirtBase
- gPhysBase
;
263 pal_hib_write_hook(void)