2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * These functions are wrappers around the PPL HIB extension. They provide a
30 * higher level interface to the PPL HIB ioctl interface, and include logic for
31 * turning the HMAC block on when necessary. Refer to the comments in the PPL HIB
32 * extension for more details.
34 #include "hibernate_ppl_hmac.h"
36 #include <mach/vm_param.h>
37 #include <pexpert/arm64/board_config.h>
39 #include <arm64/amcc_rorgn.h>
40 #include <arm64/ppl/ppl_hib.h>
41 #include "pal_hibernate.h"
44 #if XNU_MONITOR_PPL_HIB
47 #error New SoC defined in board_config.h that supports PPL HIB but no \
48 embedded headers included in hibernate_ppl_hmac.c for that SoC.
51 #include <soc/module/address_map.h>
52 #include <soc/module/pmgr_soc.h>
54 static ppl_iommu_state
*pplHmacState
;
55 static void *pplHmacScratchPage
;
58 ppl_hmac_enable_aes_ps(void)
60 static vm_address_t aes_ps_reg_base
;
61 if (!aes_ps_reg_base
) {
62 /* map the AES PS registers */
63 aes_ps_reg_base
= ml_io_map(PMGR_REG_BASE
, PAGE_SIZE
);
65 volatile uint32_t *psreg
= (volatile uint32_t *)(aes_ps_reg_base
+ PMGR_AES_OFFSET
);
66 // set PS_MANUAL to on
68 while ((*psreg
& 0xf) != ((*psreg
>> 4) & 0xf)) {
69 // poll until the block's PS_ACTUAL matches PS_MANUAL
74 hibernate_compress_page(const void *src
, void *dst
)
76 assert((((uint64_t)src
) & PAGE_MASK
) == 0);
77 assert((((uint64_t)dst
) & 63) == 0);
83 uint32_t reserved2
:17;
85 uint32_t reserved3
:14;
86 } result
= { .status
= ~0u };
87 __asm__
volatile ("wkdmc %0, %1" : "=r"(result
): "r"(dst
), "0"(src
));
94 return (result
.count
+ 1) * 64;
97 /* initialize context needed for ppl computations */
101 // don't initialize ppl_hib if hibernation isn't supported
102 if (!ppl_hib_hibernation_supported()) {
107 /* construct context needed to talk to PPL */
109 ppl_iommu_state
*pplState
= NULL
;
110 vm_address_t hmac_reg_base
= 0;
113 ppl_hmac_enable_aes_ps();
115 // set up the hmac engine
116 hmac_reg_base
= ml_io_map(HMAC_REG_BASE
, PAGE_SIZE
);
117 ppl_hib_init_data init_data
= { .version
= PPL_HIB_VERSION
, .hmac_reg_base
= hmac_reg_base
};
118 kern_return_t kr
= pmap_iommu_init(ppl_hib_get_desc(), "HMAC", &init_data
, sizeof(init_data
), &pplState
);
119 if (kr
!= KERN_SUCCESS
) {
120 printf("ppl_hmac_init: failed to initialize PPL state object: 0x%x\n", kr
);
122 ml_io_unmap(hmac_reg_base
, PAGE_SIZE
);
127 pplHmacState
= pplState
;
134 * Reset state for a new signature.
136 * @param wired_pages True if this context will be used to hash wired pages (image1),
137 * false otherwise (image2).
140 ppl_hmac_reset(bool wired_pages
)
142 // make sure AES_PS is on
143 ppl_hmac_enable_aes_ps();
145 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_RESET
,
146 &wired_pages
, sizeof(wired_pages
), NULL
, 0);
147 if (kr
!= KERN_SUCCESS
) {
148 panic("ppl_hmac_reset: PPL ioctl PPL_HIB_IOCTL_RESET failed: 0x%x\n", kr
);
153 * Inform HMAC driver that we're going to hibernate.
156 ppl_hmac_hibernate_begin(void)
158 uintptr_t scratchPage
= 0;
159 kern_return_t kr
= pmap_iommu_map(pplHmacState
, NULL
, 0, 0, &scratchPage
);
160 if (kr
!= KERN_SUCCESS
) {
161 panic("ppl_register_scratch_page: pmap_iommu_map failed: 0x%x\n", kr
);
163 pplHmacScratchPage
= (void *)scratchPage
;
167 * Inform HMAC driver that we're done hibernating.
170 ppl_hmac_hibernate_end(void)
172 pmap_iommu_unmap(pplHmacState
, NULL
, 0, 0, NULL
);
173 pplHmacScratchPage
= NULL
;
176 /* get the hmac register base */
178 ppl_hmac_get_reg_base(void)
180 return HMAC_REG_BASE
;
184 * Update the PPL HMAC hash computation with the given page.
186 * @param pageNumber Page to add into the hash.
187 * @param uncompressed Out parameter that receives a pointer to the uncompressed data of the given page.
188 * @param compressed Buffer that will receive the compressed content of the given page
189 * @result The compressed size, 0 if the page was a single repeated value, or -1 if the page failed to compress.
192 ppl_hmac_update_and_compress_page(ppnum_t pageNumber
, void **uncompressed
, void *compressed
)
194 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_UPDATE_AND_COPY_PAGE
,
195 &pageNumber
, sizeof(pageNumber
), NULL
, 0);
196 if (kr
!= KERN_SUCCESS
) {
197 panic("ppl_hmac_update_and_compress_page: PPL ioctl PPL_HIB_IOCTL_UPDATE_PAGE failed: 0x%x\n", kr
);
199 // page was copied to scratch, so compress it into compressed
202 *uncompressed
= pplHmacScratchPage
;
205 result
= hibernate_compress_page(pplHmacScratchPage
, compressed
);
212 /* finalize HMAC calculation */
214 ppl_hmac_final(uint8_t *output
, size_t outputLen
)
216 if (outputLen
!= HMAC_HASH_SIZE
) {
217 panic("ppl_hmac_final: outputLen should be %d but is %zu\n", HMAC_HASH_SIZE
, outputLen
);
219 uint8_t hashOutput
[HMAC_HASH_SIZE
];
220 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_FINAL
, NULL
, 0, hashOutput
, sizeof(hashOutput
));
221 if (kr
!= KERN_SUCCESS
) {
222 panic("ppl_hmac_final: PPL ioctl PPL_HIB_IOCTL_FINAL failed: 0x%x\n", kr
);
224 memcpy(output
, hashOutput
, HMAC_HASH_SIZE
);
227 /* HMAC the hibseg and get metadata */
229 ppl_hmac_fetch_hibseg_and_info(void *buffer
,
231 IOHibernateHibSegInfo
*info
)
233 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_FETCH_HIBSEG
, NULL
, 0, buffer
, bufferLen
);
234 if (kr
!= KERN_SUCCESS
) {
235 panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG failed: 0x%x\n", kr
);
237 IOHibernateHibSegInfo segInfo
;
238 kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_FETCH_HIBSEG_INFO
, NULL
, 0, &segInfo
, sizeof(segInfo
));
239 if (kr
!= KERN_SUCCESS
) {
240 panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG_INFO failed: 0x%x\n", kr
);
242 memcpy(info
, &segInfo
, sizeof(segInfo
));
245 /* HMAC the entire read-only region, or compare to previous HMAC */
247 ppl_hmac_compute_rorgn_hmac(void)
249 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC
, NULL
, 0, NULL
, 0);
250 if (kr
!= KERN_SUCCESS
) {
251 panic("ppl_hmac_compute_rorgn_hmac: PPL ioctl PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC failed: 0x%x\n", kr
);
256 * Finish hashing the hibernation image and return out the signed hash. This also
257 * hashes the hibernation header.
260 ppl_hmac_finalize_image(const void *header
, size_t headerLen
, uint8_t *hmac
, size_t hmacLen
)
262 if (hmacLen
!= HMAC_HASH_SIZE
) {
263 panic("ppl_hmac_finalize_image: hmacLen should be %d but is %zu\n", HMAC_HASH_SIZE
, hmacLen
);
265 uint8_t hashOutput
[HMAC_HASH_SIZE
];
266 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_FINALIZE_IMAGE
, header
, headerLen
, hashOutput
, sizeof(hashOutput
));
267 if (kr
!= KERN_SUCCESS
) {
268 panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_FINALIZE_IMAGE failed: 0x%x\n", kr
);
270 memcpy(hmac
, hashOutput
, HMAC_HASH_SIZE
);
275 * Return back an array of I/O ranges that need to be included within the hibernation
276 * image. If there are no I/O ranges that need hashing, then `*io_ranges` will be
277 * NULL and `*num_io_ranges` will be zero.
280 ppl_hmac_get_io_ranges(const ppl_hib_io_range
**io_ranges
, uint16_t *num_io_ranges
)
282 assert((io_ranges
!= NULL
) && (num_io_ranges
!= NULL
));
284 ppl_hib_get_io_ranges_data io
;
285 kern_return_t kr
= pmap_iommu_ioctl(pplHmacState
, PPL_HIB_IOCTL_GET_IO_RANGES
, NULL
, 0, &io
, sizeof(io
));
286 if (kr
!= KERN_SUCCESS
) {
287 panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_GET_IO_RANGES failed: 0x%x\n", kr
);
291 * This returns back pointers to PPL-owned data but this is fine since the
292 * caller only needs read-only access to this data (and the kernel has RO
293 * access to PPL-owned memory).
295 *io_ranges
= io
.ranges
;
296 *num_io_ranges
= io
.num_io_ranges
;
299 #endif /* XNU_MONITOR_PPL_HIB */