2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * ARM64-specific functions required to support hibernation exit.
32 #include <mach/mach_types.h>
33 #include <kern/misc_protos.h>
34 #include <IOKit/IOHibernatePrivate.h>
35 #include <machine/pal_hibernate.h>
36 #include <pexpert/arm/dockchannel.h>
38 #include <arm/cpu_data_internal.h>
39 #include <arm/cpu_internal.h>
41 #if HIBERNATE_HMAC_IMAGE
42 #include <arm64/ppl/ppl_hib.h>
43 #include <corecrypto/ccsha2_internal.h>
44 #include <corecrypto/ccdigest_internal.h>
45 #endif /* HIBERNATE_HMAC_IMAGE */
47 pal_hib_tramp_result_t gHibTramp
;
48 pal_hib_globals_t gHibernateGlobals
;
51 hibernate_restore_phys_page(uint64_t src
, uint64_t dst
, uint32_t len
, __unused
uint32_t procFlags
)
53 void *d
= (void*)pal_hib_map(DEST_COPY_AREA
, dst
);
54 __nosan_memcpy(d
, (void*)src
, len
);
59 pal_hib_map(pal_hib_map_type_t virt
, uint64_t phys
)
66 return phys
+ gHibTramp
.memSlide
;
77 pal_hib_restore_pal_state(__unused
uint32_t *arg
)
82 pal_hib_resume_init(pal_hib_ctx_t
*ctx
, hibernate_page_list_t
*map
, uint32_t *nextFree
)
84 #if HIBERNATE_HMAC_IMAGE
85 extern void AccelerateCrypto_SHA256_compress(ccdigest_state_t state
, size_t numBlocks
, const void *data
);
86 ctx
->di
= (struct ccdigest_info
){
87 .output_size
= CCSHA256_OUTPUT_SIZE
,
88 .state_size
= CCSHA256_STATE_SIZE
,
89 .block_size
= CCSHA256_BLOCK_SIZE
,
90 .oid_size
= ccoid_sha256_len
,
91 .oid
= CC_DIGEST_OID_SHA256
,
92 .initial_state
= ccsha256_initial_state
,
93 .compress
= AccelerateCrypto_SHA256_compress
,
94 .final
= ccdigest_final_64be
,
99 // validate signature of handoff
100 uint32_t handoffPages
= gIOHibernateCurrentHeader
->handoffPages
;
101 uint32_t handoffPageCount
= gIOHibernateCurrentHeader
->handoffPageCount
;
103 void *handoffSrc
= (void *)pal_hib_map(IMAGE_AREA
, ptoa_64(handoffPages
));
104 ppl_hib_init_context(&ctx
->di
, &shaCtx
, 'HOFF');
105 ccdigest_update(&ctx
->di
, shaCtx
.ctx
, sizeof(handoffPages
), &handoffPages
);
106 ccdigest_update(&ctx
->di
, shaCtx
.ctx
, sizeof(handoffPageCount
), &handoffPageCount
);
107 ccdigest_update(&ctx
->di
, shaCtx
.ctx
, ptoa_64(handoffPageCount
), handoffSrc
);
108 uint8_t handoffHMAC
[CCSHA384_OUTPUT_SIZE
];
109 ppl_hib_compute_hmac(&ctx
->di
, &shaCtx
, gHibernateGlobals
.hmacRegBase
, handoffHMAC
);
110 HIB_ASSERT(__nosan_memcmp(handoffHMAC
, gIOHibernateCurrentHeader
->handoffHMAC
, sizeof(handoffHMAC
)) == 0);
112 // construct a hibernate_scratch_t for storing all of the pages we restored
113 hibernate_scratch_init(&ctx
->pagesRestored
, map
, nextFree
);
114 #endif /* HIBERNATE_HMAC_IMAGE */
118 pal_hib_restored_page(pal_hib_ctx_t
*ctx
, pal_hib_restore_stage_t stage
, ppnum_t ppnum
)
120 #if HIBERNATE_HMAC_IMAGE
121 if (stage
!= pal_hib_restore_stage_handoff_data
) {
122 // remember that we restored this page
123 hibernate_scratch_write(&ctx
->pagesRestored
, &ppnum
, sizeof(ppnum
));
125 #endif /* HIBERNATE_HMAC_IMAGE */
129 pal_hib_patchup(pal_hib_ctx_t
*ctx
)
131 #if HIBERNATE_HMAC_IMAGE
132 // compute and validate the HMAC for the wired pages (image1)
135 hibernate_scratch_start_read(&ctx
->pagesRestored
);
136 uint64_t pageCount
= ctx
->pagesRestored
.totalLength
/ sizeof(ppnum_t
);
137 ppl_hib_init_context(&ctx
->di
, &shaCtx
, 'PAG1');
138 for (uint64_t i
= 0; i
< pageCount
; i
++) {
140 hibernate_scratch_read(&ctx
->pagesRestored
, &ppnum
, sizeof(ppnum
));
141 vm_offset_t virtAddr
= pal_hib_map(DEST_COPY_AREA
, ptoa_64(ppnum
));
142 ccdigest_update(&ctx
->di
, shaCtx
.ctx
, sizeof(ppnum
), &ppnum
);
143 ccdigest_update(&ctx
->di
, shaCtx
.ctx
, PAGE_SIZE
, (void *)virtAddr
);
145 uint8_t image1PagesHMAC
[CCSHA384_OUTPUT_SIZE
];
146 ppl_hib_compute_hmac(&ctx
->di
, &shaCtx
, gHibernateGlobals
.hmacRegBase
, image1PagesHMAC
);
147 HIB_ASSERT(__nosan_memcmp(image1PagesHMAC
, gIOHibernateCurrentHeader
->image1PagesHMAC
, sizeof(image1PagesHMAC
)) == 0);
148 #endif /* HIBERNATE_HMAC_IMAGE */
150 // DRAM pages are captured from a PPL context, so here we restore all cpu_data structures to a non-PPL context
151 extern struct pmap_cpu_data_array_entry pmap_cpu_data_array
[MAX_CPUS
];
152 for (int i
= 0; i
< MAX_CPUS
; i
++) {
153 pmap_cpu_data_array
[i
].cpu_data
.ppl_state
= PPL_STATE_KERNEL
;
154 pmap_cpu_data_array
[i
].cpu_data
.ppl_kern_saved_sp
= 0;
157 // cluster CTRR state needs to be reconfigured
158 init_ctrr_cluster_states();
160 // Calls into the pmap that could potentially modify pmap data structures
161 // during image copying were explicitly blocked on hibernation entry.
162 // Resetting this variable to false allows those calls to be made again.
163 extern bool hib_entry_pmap_lockdown
;
164 hib_entry_pmap_lockdown
= false;
168 pal_hib_decompress_page(void *src
, void *dst
, void *scratch
, unsigned int compressedSize
)
171 if (((uint64_t)src
) & 63) {
172 // the wkdm instruction requires that our source buffer be aligned, so copy into an aligned buffer if necessary
173 __nosan_memcpy(scratch
, src
, compressedSize
);
178 HIB_ASSERT((((uint64_t)wkdmSrc
) & 63) == 0);
179 HIB_ASSERT((((uint64_t)dst
) & PAGE_MASK
) == 0);
181 uint32_t reserved
:12;
183 uint32_t reserved2
:17;
185 uint32_t reserved3
:14;
186 } result
= { .status
= ~0u };
187 __asm__
volatile ("wkdmd %0, %1" : "=r"(result
): "r"(dst
), "0"(wkdmSrc
));
188 HIB_ASSERT(result
.status
== 0);
191 // proc_reg's ARM_TTE_TABLE_NS has both NSTABLE and NS set
192 #define ARM_LPAE_NSTABLE 0x8000000000000000ULL
195 #define LAST_TABLE_LEVEL 3
196 #define PAGE_GRANULE_SHIFT 14
197 #define PAGE_GRANULE_SIZE ((size_t)1<<PAGE_GRANULE_SHIFT)
198 #define PAGE_GRANULE_MASK (PAGE_GRANULE_SIZE-1)
199 #define LEVEL_SHIFT(level) (47 - (level * 11))
201 #define PTE_EMPTY(ent) ((ent) == 0)
204 hibernate_page_list_t
*bitmap
;
206 uint64_t page_table_base
;
210 hib_bzero(volatile void *s
, size_t n
)
212 // can't use __nosan_bzero while the MMU is off, so do it manually
213 while (n
> sizeof(uint64_t)) {
214 *(volatile uint64_t *)s
= 0;
215 s
+= sizeof(uint64_t);
216 n
-= sizeof(uint64_t);
218 while (n
> sizeof(uint32_t)) {
219 *(volatile uint32_t *)s
= 0;
220 s
+= sizeof(uint32_t);
221 n
-= sizeof(uint32_t);
224 *(volatile char *)s
= 0;
231 allocate_page(map_ctx
*ctx
)
233 // pages that were unnecessary for preservation when we entered hibernation are
234 // marked as free in ctx->bitmap, so they are available for scratch usage during
235 // resume; here, we "borrow" one of these free pages to use as part of our temporary
237 ppnum_t ppnum
= hibernate_page_list_grab(ctx
->bitmap
, &ctx
->nextFree
);
238 hibernate_page_bitset(ctx
->bitmap
, FALSE
, ppnum
);
239 uint64_t result
= ptoa_64(ppnum
);
240 hib_bzero((void *)result
, PAGE_SIZE
);
245 create_map_entries(map_ctx
*ctx
, uint64_t vaddr
, uint64_t paddr
, uint64_t size
, uint64_t map_flags
)
247 // if we've set gHibTramp.memSlide, we should already be running with the MMU on;
248 // in this case, we don't permit further modification to the page table
249 HIB_ASSERT(!gHibTramp
.memSlide
);
251 int level
= TOP_LEVEL
;
252 volatile uint64_t *table_base
= (uint64_t *)ctx
->page_table_base
;
253 if (map_flags
== 0) {
254 paddr
= 0; // no physical address for none mappings
258 HIB_ASSERT(level
>= 1);
259 HIB_ASSERT(level
<= LAST_TABLE_LEVEL
);
261 size_t level_shift
= LEVEL_SHIFT(level
);
262 size_t level_entries
= PAGE_GRANULE_SIZE
/ sizeof(uint64_t);
263 size_t level_size
= 1ull << level_shift
;
264 size_t level_mask
= level_size
- 1;
265 size_t index
= (vaddr
>> level_shift
) & (level_entries
- 1);
266 // Can we make block entries here? Must be permitted at this
267 // level, have enough bytes remaining, and both virtual and
268 // physical addresses aligned to a block.
270 size
>= level_size
&&
271 ((vaddr
| paddr
) & level_mask
) == 0) {
272 // Map contiguous blocks.
273 size_t num_entries
= MIN(size
/ level_size
, level_entries
- index
);
275 uint64_t entry
= map_flags
| ((level
< LAST_TABLE_LEVEL
) ? ARM_TTE_TYPE_BLOCK
: ARM_TTE_TYPE_L3BLOCK
);
276 for (size_t i
= 0; i
< num_entries
; i
++) {
277 HIB_ASSERT(PTE_EMPTY(table_base
[index
+ i
]));
278 table_base
[index
+ i
] = entry
| paddr
;
282 // make sure all the corresponding entries are empty
283 for (size_t i
= 0; i
< num_entries
; i
++) {
284 HIB_ASSERT(PTE_EMPTY(table_base
[index
+ i
]));
287 size_t mapped
= num_entries
* level_size
;
290 // map the remaining at the top level
292 table_base
= (uint64_t *)ctx
->page_table_base
;
294 // paddr already incremented above if necessary
297 // Sub-divide into a next level table.
298 HIB_ASSERT(level
< LAST_TABLE_LEVEL
);
299 uint64_t entry
= table_base
[index
];
300 HIB_ASSERT((entry
& (ARM_TTE_VALID
| ARM_TTE_TYPE_MASK
)) != (ARM_TTE_VALID
| ARM_TTE_TYPE_BLOCK
)); // Breaking down blocks not implemented
301 uint64_t sub_base
= entry
& ARM_TTE_TABLE_MASK
;
303 sub_base
= allocate_page(ctx
);
304 HIB_ASSERT((sub_base
& PAGE_GRANULE_MASK
) == 0);
305 table_base
[index
] = sub_base
| ARM_LPAE_NSTABLE
| ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
;
307 // map into the sub table
309 table_base
= (uint64_t *)sub_base
;
315 map_range_start_end(map_ctx
*ctx
, uint64_t start
, uint64_t end
, uint64_t slide
, uint64_t flags
)
317 HIB_ASSERT(end
>= start
);
318 create_map_entries(ctx
, start
+ slide
, start
, end
- start
, flags
);
321 #define MAP_FLAGS_COMMON (ARM_PTE_AF | ARM_PTE_NS | ARM_TTE_VALID | ARM_PTE_SH(SH_OUTER_MEMORY) | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK))
322 #define MAP_DEVICE (ARM_PTE_AF | ARM_TTE_VALID | ARM_PTE_PNX | ARM_PTE_NX | ARM_PTE_SH(SH_NONE) | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE))
323 #define MAP_RO (MAP_FLAGS_COMMON | ARM_PTE_PNX | ARM_PTE_NX | ARM_PTE_AP(AP_RONA))
324 #define MAP_RW (MAP_FLAGS_COMMON | ARM_PTE_PNX | ARM_PTE_NX)
325 #define MAP_RX (MAP_FLAGS_COMMON | ARM_PTE_AP(AP_RONA))
328 map_register_page(map_ctx
*ctx
, vm_address_t regPage
)
330 uint64_t regBase
= trunc_page(regPage
);
332 map_range_start_end(ctx
, regBase
, regBase
+ PAGE_SIZE
, 0, MAP_DEVICE
);
337 iterate_bitmaps(const map_ctx
*ctx
, bool (^callback
)(const hibernate_bitmap_t
*bank_bitmap
))
339 hibernate_bitmap_t
*bank_bitmap
= &ctx
->bitmap
->bank_bitmap
[0];
340 for (uint32_t bank
= 0; bank
< ctx
->bitmap
->bank_count
; bank
++) {
341 if (!callback(bank_bitmap
)) {
344 bank_bitmap
= (hibernate_bitmap_t
*)&bank_bitmap
->bitmap
[bank_bitmap
->bitmapwords
];
348 // during hibernation resume, we can't use the original kernel page table (because we don't know what it was), so we instead
349 // create a temporary page table to use during hibernation resume; since the original kernel page table was part of DRAM,
350 // it will be restored by the time we're done with hibernation resume, at which point we can jump through the reset vector
351 // to reload the original page table
353 pal_hib_resume_tramp(uint32_t headerPpnum
)
355 uint64_t header_phys
= ptoa_64(headerPpnum
);
356 IOHibernateImageHeader
*header
= (IOHibernateImageHeader
*)header_phys
;
357 IOHibernateHibSegInfo
*seg_info
= &header
->hibSegInfo
;
358 uint64_t hib_text_start
= ptoa_64(header
->restore1CodePhysPage
);
360 __block map_ctx ctx
= {};
361 uint64_t map_phys
= header_phys
362 + (offsetof(IOHibernateImageHeader
, fileExtentMap
)
363 + header
->fileExtentMapSize
364 + ptoa_32(header
->restore1PageCount
)
365 + header
->previewSize
);
366 ctx
.bitmap
= (hibernate_page_list_t
*)map_phys
;
368 // find the bank describing xnu's map
369 __block
uint64_t phys_start
= 0, phys_end
= 0;
370 iterate_bitmaps(&ctx
, ^bool (const hibernate_bitmap_t
*bank_bitmap
) {
371 if ((bank_bitmap
->first_page
<= header
->restore1CodePhysPage
) &&
372 (bank_bitmap
->last_page
>= header
->restore1CodePhysPage
)) {
373 phys_start
= ptoa_64(bank_bitmap
->first_page
);
374 phys_end
= ptoa_64(bank_bitmap
->last_page
) + PAGE_SIZE
;
380 HIB_ASSERT(phys_start
!= 0);
381 HIB_ASSERT(phys_end
!= 0);
383 hib_bzero(&gHibTramp
, sizeof(gHibTramp
));
384 gHibTramp
.kernelSlide
= header
->restore1CodeVirt
- hib_text_start
;
386 // During hibernation resume, we create temporary mappings that do not collide with where any of the kernel mappings were originally.
387 // Technically, non-collision isn't a requirement, but doing this means that if some code accidentally jumps to a VA in the original
388 // kernel map, it won't be present in our temporary map and we'll get an exception when jumping to an unmapped address.
389 // The base address of our temporary mappings is adjusted by a random amount as a "poor-man's ASLR". We don’t have a good source of random
390 // numbers in this context, so we just use some of the bits from one of imageHeaderHMMAC, which should be random enough.
391 uint16_t rand
= (uint16_t)(((header
->imageHeaderHMAC
[0]) << 8) | header
->imageHeaderHMAC
[1]);
392 uint64_t mem_slide
= gHibTramp
.kernelSlide
- (phys_end
- phys_start
) * 4 - rand
* 256 * PAGE_SIZE
;
394 // make sure we don't clobber any of the pages we need for restore
395 hibernate_reserve_restore_pages(header_phys
, header
, ctx
.bitmap
);
398 hibernate_page_list_grab(ctx
.bitmap
, &ctx
.nextFree
);
401 ctx
.page_table_base
= allocate_page(&ctx
);
402 gHibTramp
.ttbr1
= ctx
.page_table_base
;
404 uint64_t first_seg_start
= 0, last_seg_end
= 0, hib_text_end
= 0;
405 for (size_t i
= 0; i
< NUM_HIBSEGINFO_SEGMENTS
; i
++) {
406 uint64_t size
= ptoa_64(seg_info
->segments
[i
].pageCount
);
408 uint64_t seg_start
= ptoa_64(seg_info
->segments
[i
].physPage
);
409 uint64_t seg_end
= seg_start
+ size
;
410 uint32_t protection
= seg_info
->segments
[i
].protection
;
411 if (protection
!= VM_PROT_NONE
) {
412 // make sure the segment is in bounds
413 HIB_ASSERT(seg_start
>= phys_start
);
414 HIB_ASSERT(seg_end
<= phys_end
);
416 if (!first_seg_start
) {
417 first_seg_start
= seg_start
;
420 // map the "hole" as RW
421 map_range_start_end(&ctx
, last_seg_end
, seg_start
, mem_slide
, MAP_RW
);
423 // map the segments described in machine_header at their original locations
424 bool executable
= (protection
& VM_PROT_EXECUTE
);
425 bool writeable
= (protection
& VM_PROT_WRITE
);
426 uint64_t map_flags
= executable
? MAP_RX
: writeable
? MAP_RW
: MAP_RO
;
427 map_range_start_end(&ctx
, seg_start
, seg_end
, gHibTramp
.kernelSlide
, map_flags
);
428 last_seg_end
= seg_end
;
430 if (seg_info
->segments
[i
].physPage
== header
->restore1CodePhysPage
) {
431 // this is the hibtext segment, so remember where it ends
432 hib_text_end
= seg_end
;
436 // map the rest of kernel memory (the pages that come before and after our segments) as RW
437 map_range_start_end(&ctx
, phys_start
, first_seg_start
, mem_slide
, MAP_RW
);
438 map_range_start_end(&ctx
, last_seg_end
, phys_end
, mem_slide
, MAP_RW
);
440 // map all of the remaining banks that we didn't already deal with
441 iterate_bitmaps(&ctx
, ^bool (const hibernate_bitmap_t
*bank_bitmap
) {
442 uint64_t bank_start
= ptoa_64(bank_bitmap
->first_page
);
443 uint64_t bank_end
= ptoa_64(bank_bitmap
->last_page
) + PAGE_SIZE
;
444 if (bank_start
== phys_start
) {
445 // skip this bank since we already covered it above
448 map_range_start_end(&ctx
, bank_start
, bank_end
, mem_slide
, MAP_RW
);
454 ctx
.page_table_base
= allocate_page(&ctx
);
455 gHibTramp
.ttbr0
= ctx
.page_table_base
;
457 // map hib text P=V so that we can still execute at its physical address
458 map_range_start_end(&ctx
, hib_text_start
, hib_text_end
, 0, MAP_RX
);
460 // map the hib image P=V, RW
461 uint64_t image_start
= trunc_page(header_phys
);
462 uint64_t image_end
= round_page(header_phys
+ header
->image1Size
);
463 map_range_start_end(&ctx
, image_start
, image_end
, 0, MAP_RW
);
465 // map the handoff pages P=V, RO
466 image_start
= ptoa_64(header
->handoffPages
);
467 image_end
= image_start
+ ptoa_64(header
->handoffPageCount
);
468 map_range_start_end(&ctx
, image_start
, image_end
, 0, MAP_RO
);
470 // map some device register pages
471 if (gHibernateGlobals
.dockChannelRegBase
) {
472 #define dockchannel_uart_base gHibernateGlobals.dockChannelRegBase
473 vm_address_t dockChannelRegBase
= trunc_page(&rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL
));
474 map_register_page(&ctx
, dockChannelRegBase
);
476 map_register_page(&ctx
, gHibernateGlobals
.hibUartRegBase
);
477 map_register_page(&ctx
, gHibernateGlobals
.hmacRegBase
);
479 gHibTramp
.memSlide
= mem_slide
;