]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/vfs/vfs_cprotect.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cprotect.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cprotect.h>
30#include <sys/malloc.h>
31#include <sys/mount_internal.h>
32#include <sys/filio.h>
33#include <sys/content_protection.h>
34#include <libkern/crypto/sha1.h>
35#include <libkern/libkern.h>
36//for write protection
37#include <vm/vm_kern.h>
38#include <vm/vm_map.h>
39
40#define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
41
42// -- struct cpx --
43
44/*
45 * This structure contains the unwrapped key and is passed to the lower layers.
46 * It is private so users must use the accessors declared in sys/cprotect.h
47 * to read/write it.
48 */
49
50// cpx_flags
51typedef uint32_t cpx_flags_t;
52enum {
53 CPX_SEP_WRAPPEDKEY = 0x01,
54 CPX_IV_AES_CTX_INITIALIZED = 0x02,
55 CPX_USE_OFFSET_FOR_IV = 0x04,
56
57 // Using AES IV context generated from key
58 CPX_IV_AES_CTX_VFS = 0x08,
59 CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10,
60 CPX_COMPOSITEKEY = 0x20,
61
62 //write page protection
63 CPX_WRITE_PROTECTABLE = 0x40
64};
65
66struct cpx {
67#if DEBUG
68 uint32_t cpx_magic1;
69#endif
70 aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV
71 cpx_flags_t cpx_flags;
72 uint16_t cpx_max_key_len;
73 uint16_t cpx_key_len;
74 uint8_t cpx_cached_key[];
75};
76
77// -- cpx_t accessors --
78
79size_t
80cpx_size(size_t key_size)
81{
82 size_t size = sizeof(struct cpx) + key_size;
83
84#if DEBUG
85 size += 4; // Extra for magic
86#endif
87
88 return size;
89}
90
91size_t
92cpx_sizex(const struct cpx *cpx)
93{
94 return cpx_size(cpx->cpx_max_key_len);
95}
96
97cpx_t
98cpx_alloc(size_t key_len)
99{
100 cpx_t cpx = NULL;
101
102#if CONFIG_KEYPAGE_WP
103 /*
104 * Macs only use 1 key per volume, so force it into its own page.
105 * This way, we can write-protect as needed.
106 */
107 size_t cpsize = cpx_size(key_len);
108 if (cpsize < PAGE_SIZE) {
109 /*
110 * Don't use MALLOC to allocate the page-sized structure. Instead,
111 * use kmem_alloc to bypass KASAN since we are supplying our own
112 * unilateral write protection on this page. Note that kmem_alloc
113 * can block.
114 */
115 if (kmem_alloc(kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, VM_KERN_MEMORY_FILE)) {
116 /*
117 * returning NULL at this point (due to failed allocation) would just
118 * result in a panic. fall back to attempting a normal MALLOC, and don't
119 * let the cpx get marked PROTECTABLE.
120 */
121 MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK);
122 } else {
123 //mark the page as protectable, since kmem_alloc succeeded.
124 cpx->cpx_flags |= CPX_WRITE_PROTECTABLE;
125 }
126 } else {
127 panic("cpx_size too large ! (%lu)", cpsize);
128 }
129#else
130 /* If key page write protection disabled, just switch to kernel MALLOC */
131 MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK);
132#endif
133 cpx_init(cpx, key_len);
134
135 return cpx;
136}
137
138/* this is really a void function */
139void
140cpx_writeprotect(cpx_t cpx)
141{
142#if CONFIG_KEYPAGE_WP
143 void *cpxstart = (void*)cpx;
144 void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
145 if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
146 vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE);
147 }
148#else
149 (void) cpx;
150#endif
151 return;
152}
153
154#if DEBUG
155static const uint32_t cpx_magic1 = 0x7b787063; // cpx{
156static const uint32_t cpx_magic2 = 0x7870637d; // }cpx
157#endif
158
159void
160cpx_free(cpx_t cpx)
161{
162#if DEBUG
163 assert(cpx->cpx_magic1 == cpx_magic1);
164 assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2);
165#endif
166
167#if CONFIG_KEYPAGE_WP
168 /* unprotect the page before bzeroing */
169 void *cpxstart = (void*)cpx;
170 void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE);
171 if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
172 vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE);
173
174 //now zero the memory after un-protecting it
175 bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
176
177 //If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it.
178 kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE);
179 return;
180 }
181#else
182 bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
183 FREE(cpx, M_TEMP);
184 return;
185#endif
186}
187
188void
189cpx_init(cpx_t cpx, size_t key_len)
190{
191#if DEBUG
192 cpx->cpx_magic1 = cpx_magic1;
193 *PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2;
194#endif
195 cpx->cpx_flags = 0;
196 cpx->cpx_key_len = 0;
197 cpx->cpx_max_key_len = key_len;
198}
199
200bool
201cpx_is_sep_wrapped_key(const struct cpx *cpx)
202{
203 return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
204}
205
206void
207cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v)
208{
209 if (v) {
210 SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
211 } else {
212 CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
213 }
214}
215
216bool
217cpx_is_composite_key(const struct cpx *cpx)
218{
219 return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY);
220}
221
222void
223cpx_set_is_composite_key(struct cpx *cpx, bool v)
224{
225 if (v) {
226 SET(cpx->cpx_flags, CPX_COMPOSITEKEY);
227 } else {
228 CLR(cpx->cpx_flags, CPX_COMPOSITEKEY);
229 }
230}
231
232bool
233cpx_use_offset_for_iv(const struct cpx *cpx)
234{
235 return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
236}
237
238void
239cpx_set_use_offset_for_iv(struct cpx *cpx, bool v)
240{
241 if (v) {
242 SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
243 } else {
244 CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
245 }
246}
247
248bool
249cpx_synthetic_offset_for_iv(const struct cpx *cpx)
250{
251 return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
252}
253
254void
255cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v)
256{
257 if (v) {
258 SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
259 } else {
260 CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
261 }
262}
263
264uint16_t
265cpx_max_key_len(const struct cpx *cpx)
266{
267 return cpx->cpx_max_key_len;
268}
269
270uint16_t
271cpx_key_len(const struct cpx *cpx)
272{
273 return cpx->cpx_key_len;
274}
275
276void
277cpx_set_key_len(struct cpx *cpx, uint16_t key_len)
278{
279 cpx->cpx_key_len = key_len;
280
281 if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) {
282 /*
283 * We assume that if the key length is being modified, the key
284 * has changed. As a result, un-set any bits related to the
285 * AES context, if needed. They should be re-generated
286 * on-demand.
287 */
288 CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS);
289 }
290}
291
292bool
293cpx_has_key(const struct cpx *cpx)
294{
295 return cpx->cpx_key_len > 0;
296}
297
298#pragma clang diagnostic push
299#pragma clang diagnostic ignored "-Wcast-qual"
300void *
301cpx_key(const struct cpx *cpx)
302{
303 return (void *)cpx->cpx_cached_key;
304}
305#pragma clang diagnostic pop
306
307void
308cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key)
309{
310 aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx);
311 SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV);
312 CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
313}
314
315aes_encrypt_ctx *
316cpx_iv_aes_ctx(struct cpx *cpx)
317{
318 if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
319 return &cpx->cpx_iv_aes_ctx;
320 }
321
322 SHA1_CTX sha1ctxt;
323 uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */
324
325 /* First init the cp_cache_iv_key[] */
326 SHA1Init(&sha1ctxt);
327
328 /*
329 * We can only use this when the keys are generated in the AP; As a result
330 * we only use the first 32 bytes of key length in the cache key
331 */
332 SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len);
333 SHA1Final(digest, &sha1ctxt);
334
335 cpx_set_aes_iv_key(cpx, digest);
336 SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
337
338 return &cpx->cpx_iv_aes_ctx;
339}
340
341void
342cpx_flush(cpx_t cpx)
343{
344 bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
345 bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx));
346 cpx->cpx_flags = 0;
347 cpx->cpx_key_len = 0;
348}
349
350bool
351cpx_can_copy(const struct cpx *src, const struct cpx *dst)
352{
353 return src->cpx_key_len <= dst->cpx_max_key_len;
354}
355
356void
357cpx_copy(const struct cpx *src, cpx_t dst)
358{
359 uint16_t key_len = cpx_key_len(src);
360 cpx_set_key_len(dst, key_len);
361 memcpy(cpx_key(dst), cpx_key(src), key_len);
362 dst->cpx_flags = src->cpx_flags;
363 if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
364 dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx;
365 }
366}
367
368typedef struct {
369 cp_lock_state_t state;
370 int valid_uuid;
371 uuid_t volume_uuid;
372} cp_lock_vfs_callback_arg;
373
374static int
375cp_lock_vfs_callback(mount_t mp, void *arg)
376{
377 cp_lock_vfs_callback_arg *callback_arg = (cp_lock_vfs_callback_arg *)arg;
378
379 if (callback_arg->valid_uuid) {
380 struct vfs_attr va;
381 VFSATTR_INIT(&va);
382 VFSATTR_WANTED(&va, f_uuid);
383
384 if (vfs_getattr(mp, &va, vfs_context_current())) {
385 return 0;
386 }
387
388 if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) {
389 return 0;
390 }
391
392 if (memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) {
393 return 0;
394 }
395 }
396
397 VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->state, 0, vfs_context_kernel());
398 return 0;
399}
400
401int
402cp_key_store_action(cp_key_store_action_t action)
403{
404 cp_lock_vfs_callback_arg callback_arg;
405
406 switch (action) {
407 case CP_ACTION_LOCKED:
408 case CP_ACTION_UNLOCKED:
409 callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
410 memset(callback_arg.volume_uuid, 0, sizeof(uuid_t));
411 callback_arg.valid_uuid = 0;
412 return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg);
413 default:
414 return -1;
415 }
416}
417
418int
419cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action)
420{
421 cp_lock_vfs_callback_arg callback_arg;
422
423 switch (action) {
424 case CP_ACTION_LOCKED:
425 case CP_ACTION_UNLOCKED:
426 callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
427 memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t));
428 callback_arg.valid_uuid = 1;
429 return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg);
430 default:
431 return -1;
432 }
433}
434
435int
436cp_is_valid_class(int isdir, int32_t protectionclass)
437{
438 /*
439 * The valid protection classes are from 0 -> N
440 * We use a signed argument to detect unassigned values from
441 * directory entry creation time in HFS.
442 */
443 if (isdir) {
444 /* Directories are not allowed to have F, but they can have "NONE" */
445 return (protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
446 (protectionclass <= PROTECTION_CLASS_D);
447 } else {
448 return (protectionclass >= PROTECTION_CLASS_A) &&
449 (protectionclass <= PROTECTION_CLASS_F);
450 }
451}
452
453/*
454 * Parses versions of the form 12A316, i.e. <major><minor><revision> and
455 * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
456 * bb = <ASCII char>, cccc = <revision>.
457 */
458static cp_key_os_version_t
459parse_os_version(const char *vers)
460{
461 const char *p = vers;
462
463 int a = 0;
464 while (*p >= '0' && *p <= '9') {
465 a = a * 10 + *p - '0';
466 ++p;
467 }
468
469 if (!a) {
470 return 0;
471 }
472
473 int b = *p++;
474 if (!b) {
475 return 0;
476 }
477
478 int c = 0;
479 while (*p >= '0' && *p <= '9') {
480 c = c * 10 + *p - '0';
481 ++p;
482 }
483
484 if (!c) {
485 return 0;
486 }
487
488 return (a & 0xff) << 24 | b << 16 | (c & 0xffff);
489}
490
491cp_key_os_version_t
492cp_os_version(void)
493{
494 static cp_key_os_version_t cp_os_version;
495
496 if (cp_os_version) {
497 return cp_os_version;
498 }
499
500 if (!osversion[0]) {
501 return 0;
502 }
503
504 cp_os_version = parse_os_version(osversion);
505 if (!cp_os_version) {
506 printf("cp_os_version: unable to parse osversion `%s'\n", osversion);
507 cp_os_version = 1;
508 }
509
510 return cp_os_version;
511}