]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/hibernate_ppl_hmac.c
dcd22884e96a1541eecc232550a595437e12e649
[apple/xnu.git] / osfmk / arm64 / hibernate_ppl_hmac.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * These functions are wrappers around the PPL HIB extension. They provide a
30 * higher level interface to the PPL HIB ioctl interface, and include logic for
31 * turning the HMAC block on when necessary. Refer to the comments in the PPL HIB
32 * extension for more details.
33 */
34 #include "hibernate_ppl_hmac.h"
35
36 #include <mach/vm_param.h>
37 #include <pexpert/arm64/board_config.h>
38 #include <vm/pmap.h>
39 #include <arm64/amcc_rorgn.h>
40 #include <arm64/ppl/ppl_hib.h>
41 #include "pal_hibernate.h"
42 #include <stdbool.h>
43
44 #if XNU_MONITOR_PPL_HIB
45
46
47 #error New SoC defined in board_config.h that supports PPL HIB but no \
48 embedded headers included in hibernate_ppl_hmac.c for that SoC.
49
50
51 #include <soc/module/address_map.h>
52 #include <soc/module/pmgr_soc.h>
53
54 static ppl_iommu_state *pplHmacState;
55 static void *pplHmacScratchPage;
56
57 static void
58 ppl_hmac_enable_aes_ps(void)
59 {
60 static vm_address_t aes_ps_reg_base;
61 if (!aes_ps_reg_base) {
62 /* map the AES PS registers */
63 aes_ps_reg_base = ml_io_map(PMGR_REG_BASE, PAGE_SIZE);
64 }
65 volatile uint32_t *psreg = (volatile uint32_t *)(aes_ps_reg_base + PMGR_AES_OFFSET);
66 // set PS_MANUAL to on
67 *psreg |= 0xf;
68 while ((*psreg & 0xf) != ((*psreg >> 4) & 0xf)) {
69 // poll until the block's PS_ACTUAL matches PS_MANUAL
70 }
71 }
72
73 static int
74 hibernate_compress_page(const void *src, void *dst)
75 {
76 assert((((uint64_t)src) & PAGE_MASK) == 0);
77 assert((((uint64_t)dst) & 63) == 0);
78 struct {
79 uint32_t count:8;
80 uint32_t svp:1;
81 uint32_t reserved:3;
82 uint32_t status:3;
83 uint32_t reserved2:17;
84 uint32_t popcnt:18;
85 uint32_t reserved3:14;
86 } result = { .status = ~0u };
87 __asm__ volatile ("wkdmc %0, %1" : "=r"(result): "r"(dst), "0"(src));
88 if (result.status) {
89 return -1;
90 }
91 if (result.svp) {
92 return 0;
93 }
94 return (result.count + 1) * 64;
95 }
96
97 /* initialize context needed for ppl computations */
98 kern_return_t
99 ppl_hmac_init(void)
100 {
101 // don't initialize ppl_hib if hibernation isn't supported
102 if (!ppl_hib_hibernation_supported()) {
103 return KERN_FAILURE;
104 }
105
106 if (!pplHmacState) {
107 /* construct context needed to talk to PPL */
108
109 ppl_iommu_state *pplState = NULL;
110 vm_address_t hmac_reg_base = 0;
111
112 // turn on AES_PS
113 ppl_hmac_enable_aes_ps();
114
115 // set up the hmac engine
116 hmac_reg_base = ml_io_map(HMAC_REG_BASE, PAGE_SIZE);
117 ppl_hib_init_data init_data = { .version = PPL_HIB_VERSION, .hmac_reg_base = hmac_reg_base };
118 kern_return_t kr = pmap_iommu_init(ppl_hib_get_desc(), "HMAC", &init_data, sizeof(init_data), &pplState);
119 if (kr != KERN_SUCCESS) {
120 printf("ppl_hmac_init: failed to initialize PPL state object: 0x%x\n", kr);
121 if (hmac_reg_base) {
122 ml_io_unmap(hmac_reg_base, PAGE_SIZE);
123 }
124 return kr;
125 }
126
127 pplHmacState = pplState;
128 }
129
130 return KERN_SUCCESS;
131 }
132
133 /**
134 * Reset state for a new signature.
135 *
136 * @param wired_pages True if this context will be used to hash wired pages (image1),
137 * false otherwise (image2).
138 */
139 void
140 ppl_hmac_reset(bool wired_pages)
141 {
142 // make sure AES_PS is on
143 ppl_hmac_enable_aes_ps();
144
145 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_RESET,
146 &wired_pages, sizeof(wired_pages), NULL, 0);
147 if (kr != KERN_SUCCESS) {
148 panic("ppl_hmac_reset: PPL ioctl PPL_HIB_IOCTL_RESET failed: 0x%x\n", kr);
149 }
150 }
151
152 /**
153 * Inform HMAC driver that we're going to hibernate.
154 */
155 void
156 ppl_hmac_hibernate_begin(void)
157 {
158 uintptr_t scratchPage = 0;
159 kern_return_t kr = pmap_iommu_map(pplHmacState, NULL, 0, 0, &scratchPage);
160 if (kr != KERN_SUCCESS) {
161 panic("ppl_register_scratch_page: pmap_iommu_map failed: 0x%x\n", kr);
162 }
163 pplHmacScratchPage = (void *)scratchPage;
164 }
165
166 /**
167 * Inform HMAC driver that we're done hibernating.
168 */
169 void
170 ppl_hmac_hibernate_end(void)
171 {
172 pmap_iommu_unmap(pplHmacState, NULL, 0, 0, NULL);
173 pplHmacScratchPage = NULL;
174 }
175
176 /* get the hmac register base */
177 vm_address_t
178 ppl_hmac_get_reg_base(void)
179 {
180 return HMAC_REG_BASE;
181 }
182
183 /**
184 * Update the PPL HMAC hash computation with the given page.
185 *
186 * @param pageNumber Page to add into the hash.
187 * @param uncompressed Out parameter that receives a pointer to the uncompressed data of the given page.
188 * @param compressed Buffer that will receive the compressed content of the given page
189 * @result The compressed size, 0 if the page was a single repeated value, or -1 if the page failed to compress.
190 */
191 int
192 ppl_hmac_update_and_compress_page(ppnum_t pageNumber, void **uncompressed, void *compressed)
193 {
194 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_UPDATE_AND_COPY_PAGE,
195 &pageNumber, sizeof(pageNumber), NULL, 0);
196 if (kr != KERN_SUCCESS) {
197 panic("ppl_hmac_update_and_compress_page: PPL ioctl PPL_HIB_IOCTL_UPDATE_PAGE failed: 0x%x\n", kr);
198 }
199 // page was copied to scratch, so compress it into compressed
200 int result;
201 if (uncompressed) {
202 *uncompressed = pplHmacScratchPage;
203 }
204 if (compressed) {
205 result = hibernate_compress_page(pplHmacScratchPage, compressed);
206 } else {
207 result = 0;
208 }
209 return result;
210 }
211
212 /* finalize HMAC calculation */
213 void
214 ppl_hmac_final(uint8_t *output, size_t outputLen)
215 {
216 if (outputLen != HMAC_HASH_SIZE) {
217 panic("ppl_hmac_final: outputLen should be %d but is %zu\n", HMAC_HASH_SIZE, outputLen);
218 }
219 uint8_t hashOutput[HMAC_HASH_SIZE];
220 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINAL, NULL, 0, hashOutput, sizeof(hashOutput));
221 if (kr != KERN_SUCCESS) {
222 panic("ppl_hmac_final: PPL ioctl PPL_HIB_IOCTL_FINAL failed: 0x%x\n", kr);
223 }
224 memcpy(output, hashOutput, HMAC_HASH_SIZE);
225 }
226
227 /* HMAC the hibseg and get metadata */
228 void
229 ppl_hmac_fetch_hibseg_and_info(void *buffer,
230 uint64_t bufferLen,
231 IOHibernateHibSegInfo *info)
232 {
233 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG, NULL, 0, buffer, bufferLen);
234 if (kr != KERN_SUCCESS) {
235 panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG failed: 0x%x\n", kr);
236 }
237 IOHibernateHibSegInfo segInfo;
238 kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG_INFO, NULL, 0, &segInfo, sizeof(segInfo));
239 if (kr != KERN_SUCCESS) {
240 panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG_INFO failed: 0x%x\n", kr);
241 }
242 memcpy(info, &segInfo, sizeof(segInfo));
243 }
244
245 /* HMAC the entire read-only region, or compare to previous HMAC */
246 void
247 ppl_hmac_compute_rorgn_hmac(void)
248 {
249 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC, NULL, 0, NULL, 0);
250 if (kr != KERN_SUCCESS) {
251 panic("ppl_hmac_compute_rorgn_hmac: PPL ioctl PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC failed: 0x%x\n", kr);
252 }
253 }
254
255 /**
256 * Finish hashing the hibernation image and return out the signed hash. This also
257 * hashes the hibernation header.
258 */
259 void
260 ppl_hmac_finalize_image(const void *header, size_t headerLen, uint8_t *hmac, size_t hmacLen)
261 {
262 if (hmacLen != HMAC_HASH_SIZE) {
263 panic("ppl_hmac_finalize_image: hmacLen should be %d but is %zu\n", HMAC_HASH_SIZE, hmacLen);
264 }
265 uint8_t hashOutput[HMAC_HASH_SIZE];
266 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINALIZE_IMAGE, header, headerLen, hashOutput, sizeof(hashOutput));
267 if (kr != KERN_SUCCESS) {
268 panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_FINALIZE_IMAGE failed: 0x%x\n", kr);
269 }
270 memcpy(hmac, hashOutput, HMAC_HASH_SIZE);
271 }
272
273
274 /**
275 * Return back an array of I/O ranges that need to be included within the hibernation
276 * image. If there are no I/O ranges that need hashing, then `*io_ranges` will be
277 * NULL and `*num_io_ranges` will be zero.
278 */
279 void
280 ppl_hmac_get_io_ranges(const ppl_hib_io_range **io_ranges, uint16_t *num_io_ranges)
281 {
282 assert((io_ranges != NULL) && (num_io_ranges != NULL));
283
284 ppl_hib_get_io_ranges_data io;
285 kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_GET_IO_RANGES, NULL, 0, &io, sizeof(io));
286 if (kr != KERN_SUCCESS) {
287 panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_GET_IO_RANGES failed: 0x%x\n", kr);
288 }
289
290 /**
291 * This returns back pointers to PPL-owned data but this is fine since the
292 * caller only needs read-only access to this data (and the kernel has RO
293 * access to PPL-owned memory).
294 */
295 *io_ranges = io.ranges;
296 *num_io_ranges = io.num_io_ranges;
297 }
298
299 #endif /* XNU_MONITOR_PPL_HIB */