]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_cprotect.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cprotect.c
1 /*
2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cprotect.h>
30 #include <sys/malloc.h>
31 #include <sys/mount_internal.h>
32 #include <sys/filio.h>
33 #include <sys/content_protection.h>
34 #include <libkern/crypto/sha1.h>
35 #include <libkern/libkern.h>
36
37 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
38
39 // -- struct cpx --
40
41 /*
42 * This structure contains the unwrapped key and is passed to the lower layers.
43 * It is private so users must use the accessors declared in sys/cprotect.h
44 * to read/write it.
45 */
46
47 // cpx_flags
48 typedef uint32_t cpx_flags_t;
49 enum {
50 CPX_SEP_WRAPPEDKEY = 0x01,
51 CPX_IV_AES_CTX_INITIALIZED = 0x02,
52 CPX_USE_OFFSET_FOR_IV = 0x04,
53
54 // Using AES IV context generated from key
55 CPX_IV_AES_CTX_VFS = 0x08,
56 CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10,
57 CPX_COMPOSITEKEY = 0x20
58 };
59
60 struct cpx {
61 #if DEBUG
62 uint32_t cpx_magic1;
63 #endif
64 aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV
65 cpx_flags_t cpx_flags;
66 uint16_t cpx_max_key_len;
67 uint16_t cpx_key_len;
68 uint8_t cpx_cached_key[];
69 };
70
71 // -- cpx_t accessors --
72
73 size_t cpx_size(size_t key_size)
74 {
75 size_t size = sizeof(struct cpx) + key_size;
76
77 #if DEBUG
78 size += 4; // Extra for magic
79 #endif
80
81 return size;
82 }
83
84 size_t cpx_sizex(const struct cpx *cpx)
85 {
86 return cpx_size(cpx->cpx_max_key_len);
87 }
88
89 cpx_t cpx_alloc(size_t key_len)
90 {
91 cpx_t cpx;
92
93 #if TARGET_OS_OSX
94 /*
95 * Macs only use 1 key per volume, so force it into its own page.
96 * This way, we can write-protect as needed.
97 */
98 size_t cpsize = cpx_size (key_len);
99 if (cpsize < PAGE_SIZE) {
100 MALLOC(cpx, cpx_t, PAGE_SIZE, M_TEMP, M_WAITOK);
101 }
102 else {
103 panic ("cpx_size too large ! (%lu)", cpsize);
104 }
105 #else
106 MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK);
107 #endif
108 cpx_init(cpx, key_len);
109
110 return cpx;
111 }
112
113 /* this is really a void function */
114 void cpx_writeprotect (cpx_t cpx)
115 {
116 #if TARGET_OS_OSX
117 void *cpxstart = (void*)cpx;
118 void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
119 vm_map_protect (kernel_map, cpxstart, cpxend, (VM_PROT_READ), FALSE);
120 #else
121 (void) cpx;
122 #endif
123 return;
124 }
125
126 #if DEBUG
127 static const uint32_t cpx_magic1 = 0x7b787063; // cpx{
128 static const uint32_t cpx_magic2 = 0x7870637d; // }cpx
129 #endif
130
131 void cpx_free(cpx_t cpx)
132 {
133
134 #if DEBUG
135 assert(cpx->cpx_magic1 == cpx_magic1);
136 assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2);
137 #endif
138
139 #if TARGET_OS_OSX
140 /* unprotect the page before bzeroing */
141 void *cpxstart = (void*)cpx;
142 void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
143 vm_map_protect (kernel_map, cpxstart, cpxend, (VM_PROT_DEFAULT), FALSE);
144 #endif
145
146 bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
147 FREE(cpx, M_TEMP);
148 }
149
150 void cpx_init(cpx_t cpx, size_t key_len)
151 {
152 #if DEBUG
153 cpx->cpx_magic1 = cpx_magic1;
154 *PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2;
155 #endif
156 cpx->cpx_flags = 0;
157 cpx->cpx_key_len = 0;
158 cpx->cpx_max_key_len = key_len;
159 }
160
161 bool cpx_is_sep_wrapped_key(const struct cpx *cpx)
162 {
163 return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
164 }
165
166 void cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v)
167 {
168 if (v)
169 SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
170 else
171 CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
172 }
173
174 bool cpx_is_composite_key(const struct cpx *cpx)
175 {
176 return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY);
177 }
178
179 void cpx_set_is_composite_key(struct cpx *cpx, bool v)
180 {
181 if (v)
182 SET(cpx->cpx_flags, CPX_COMPOSITEKEY);
183 else
184 CLR(cpx->cpx_flags, CPX_COMPOSITEKEY);
185 }
186
187 bool cpx_use_offset_for_iv(const struct cpx *cpx)
188 {
189 return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
190 }
191
192 void cpx_set_use_offset_for_iv(struct cpx *cpx, bool v)
193 {
194 if (v)
195 SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
196 else
197 CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
198 }
199
200 bool cpx_synthetic_offset_for_iv(const struct cpx *cpx)
201 {
202 return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
203 }
204
205 void cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v)
206 {
207 if (v)
208 SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
209 else
210 CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
211 }
212
213 uint16_t cpx_max_key_len(const struct cpx *cpx)
214 {
215 return cpx->cpx_max_key_len;
216 }
217
218 uint16_t cpx_key_len(const struct cpx *cpx)
219 {
220 return cpx->cpx_key_len;
221 }
222
223 void cpx_set_key_len(struct cpx *cpx, uint16_t key_len)
224 {
225 cpx->cpx_key_len = key_len;
226
227 if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) {
228 /*
229 * We assume that if the key length is being modified, the key
230 * has changed. As a result, un-set any bits related to the
231 * AES context, if needed. They should be re-generated
232 * on-demand.
233 */
234 CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS);
235 }
236 }
237
238 bool cpx_has_key(const struct cpx *cpx)
239 {
240 return cpx->cpx_key_len > 0;
241 }
242
243 #pragma clang diagnostic push
244 #pragma clang diagnostic ignored "-Wcast-qual"
245 void *cpx_key(const struct cpx *cpx)
246 {
247 return (void *)cpx->cpx_cached_key;
248 }
249 #pragma clang diagnostic pop
250
251 void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key)
252 {
253 aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx);
254 SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV);
255 CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
256 }
257
258 aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx)
259 {
260 if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED))
261 return &cpx->cpx_iv_aes_ctx;
262
263 SHA1_CTX sha1ctxt;
264 uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */
265
266 /* First init the cp_cache_iv_key[] */
267 SHA1Init(&sha1ctxt);
268
269 /*
270 * We can only use this when the keys are generated in the AP; As a result
271 * we only use the first 32 bytes of key length in the cache key
272 */
273 SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len);
274 SHA1Final(digest, &sha1ctxt);
275
276 cpx_set_aes_iv_key(cpx, digest);
277 SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
278
279 return &cpx->cpx_iv_aes_ctx;
280 }
281
282 void cpx_flush(cpx_t cpx)
283 {
284 bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
285 bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx));
286 cpx->cpx_flags = 0;
287 cpx->cpx_key_len = 0;
288 }
289
290 bool cpx_can_copy(const struct cpx *src, const struct cpx *dst)
291 {
292 return src->cpx_key_len <= dst->cpx_max_key_len;
293 }
294
295 void cpx_copy(const struct cpx *src, cpx_t dst)
296 {
297 uint16_t key_len = cpx_key_len(src);
298 cpx_set_key_len(dst, key_len);
299 memcpy(cpx_key(dst), cpx_key(src), key_len);
300 dst->cpx_flags = src->cpx_flags;
301 if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED))
302 dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx;
303 }
304
305 typedef struct {
306 cp_lock_state_t state;
307 int valid_uuid;
308 uuid_t volume_uuid;
309 } cp_lock_vfs_callback_arg;
310
311 static int
312 cp_lock_vfs_callback(mount_t mp, void *arg)
313 {
314 cp_lock_vfs_callback_arg *callback_arg = (cp_lock_vfs_callback_arg *)arg;
315
316 if (callback_arg->valid_uuid) {
317 struct vfs_attr va;
318 VFSATTR_INIT(&va);
319 VFSATTR_WANTED(&va, f_uuid);
320
321 if (vfs_getattr(mp, &va, vfs_context_current()))
322 return 0;
323
324 if (!VFSATTR_IS_SUPPORTED(&va, f_uuid))
325 return 0;
326
327 if(memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t)))
328 return 0;
329 }
330
331 VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->state, 0, vfs_context_kernel());
332 return 0;
333 }
334
335 int
336 cp_key_store_action(cp_key_store_action_t action)
337 {
338 cp_lock_vfs_callback_arg callback_arg;
339
340 switch (action) {
341 case CP_ACTION_LOCKED:
342 case CP_ACTION_UNLOCKED:
343 callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
344 memset(callback_arg.volume_uuid, 0, sizeof(uuid_t));
345 callback_arg.valid_uuid = 0;
346 return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg);
347 default:
348 return -1;
349 }
350 }
351
352 int
353 cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action)
354 {
355 cp_lock_vfs_callback_arg callback_arg;
356
357 switch (action) {
358 case CP_ACTION_LOCKED:
359 case CP_ACTION_UNLOCKED:
360 callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
361 memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t));
362 callback_arg.valid_uuid = 1;
363 return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg);
364 default:
365 return -1;
366 }
367 }
368
369 int
370 cp_is_valid_class(int isdir, int32_t protectionclass)
371 {
372 /*
373 * The valid protection classes are from 0 -> N
374 * We use a signed argument to detect unassigned values from
375 * directory entry creation time in HFS.
376 */
377 if (isdir) {
378 /* Directories are not allowed to have F, but they can have "NONE" */
379 return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
380 (protectionclass <= PROTECTION_CLASS_D));
381 }
382 else {
383 return ((protectionclass >= PROTECTION_CLASS_A) &&
384 (protectionclass <= PROTECTION_CLASS_F));
385 }
386 }
387
388 /*
389 * Parses versions of the form 12A316, i.e. <major><minor><revision> and
390 * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
391 * bb = <ASCII char>, cccc = <revision>.
392 */
393 static cp_key_os_version_t
394 parse_os_version(const char *vers)
395 {
396 const char *p = vers;
397
398 int a = 0;
399 while (*p >= '0' && *p <= '9') {
400 a = a * 10 + *p - '0';
401 ++p;
402 }
403
404 if (!a)
405 return 0;
406
407 int b = *p++;
408 if (!b)
409 return 0;
410
411 int c = 0;
412 while (*p >= '0' && *p <= '9') {
413 c = c * 10 + *p - '0';
414 ++p;
415 }
416
417 if (!c)
418 return 0;
419
420 return (a & 0xff) << 24 | b << 16 | (c & 0xffff);
421 }
422
423 cp_key_os_version_t
424 cp_os_version(void)
425 {
426 static cp_key_os_version_t cp_os_version;
427
428 if (cp_os_version)
429 return cp_os_version;
430
431 if (!osversion[0])
432 return 0;
433
434 cp_os_version = parse_os_version(osversion);
435 if (!cp_os_version) {
436 printf("cp_os_version: unable to parse osversion `%s'\n", osversion);
437 cp_os_version = 1;
438 }
439
440 return cp_os_version;
441 }