]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cprotect.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/cprotect.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/uio_internal.h>
34 #include <sys/ubc_internal.h>
35 #include <sys/vnode_if.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/fcntl.h>
38 #include <libkern/OSByteOrder.h>
39 #include <sys/proc.h>
40 #include <sys/kauth.h>
41
42 #include "hfs.h"
43 #include "hfs_cnode.h"
44 #include "hfs_fsctl.h"
45
46 #if CONFIG_PROTECT
47 /*
48 * The wrap function pointers and the variable to indicate if they
49 * are initialized are system-wide, and hence are defined globally.
50 */
51 static struct cp_wrap_func g_cp_wrap_func = {};
52 static int are_wraps_initialized = false;
53
54 extern int (**hfs_vnodeop_p) (void *);
55
56 /*
57 * CP private functions
58 */
59 static int cp_root_major_vers(mount_t mp);
60 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
61 static struct cprotect *cp_entry_alloc(size_t);
62 static void cp_entry_dealloc(struct cprotect *entry);
63 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
64 static int cp_lock_vfs_callback(mount_t, void *);
65 static int cp_lock_vnode_callback(vnode_t, void *);
66 static int cp_vnode_is_eligible (vnode_t);
67 static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop);
68 static int cp_new(int newclass, struct hfsmount *hfsmp, struct cnode *cp, mode_t cmode,
69 uint32_t flags, struct cprotect **output_entry);
70 static int cp_rewrap(struct cnode *cp, struct hfsmount *hfsmp, int newclass);
71 static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
72 static int cp_setup_aes_ctx(struct cprotect *entry);
73 static void cp_init_access(cp_cred_t access, struct cnode *cp);
74
75 static inline int cp_get_crypto_generation (uint32_t protclass) {
76 if (protclass & CP_CRYPTO_G1) {
77 return 1;
78 }
79 else return 0;
80 }
81
82
83 #if DEVELOPMENT || DEBUG
84 #define CP_ASSERT(x) \
85 if ((x) == 0) { \
86 panic("Content Protection: failed assertion in %s", __FUNCTION__); \
87 }
88 #else
89 #define CP_ASSERT(x)
90 #endif
91
92 int
93 cp_key_store_action(int action)
94 {
95
96 if (action < 0 || action > CP_MAX_STATE) {
97 return -1;
98 }
99
100 /*
101 * The lock state is kept locally to each data protected filesystem to
102 * avoid using globals. Pass along the lock request to each filesystem
103 * we iterate through.
104 */
105
106 /*
107 * Upcast the value in 'action' to be a pointer-width unsigned integer.
108 * This avoids issues relating to pointer-width.
109 */
110 unsigned long action_arg = (unsigned long) action;
111 return vfs_iterate(0, cp_lock_vfs_callback, (void*)action_arg);
112 }
113
114
115 int
116 cp_register_wraps(cp_wrap_func_t key_store_func)
117 {
118 g_cp_wrap_func.new_key = key_store_func->new_key;
119 g_cp_wrap_func.unwrapper = key_store_func->unwrapper;
120 g_cp_wrap_func.rewrapper = key_store_func->rewrapper;
121 /* do not use invalidater until rdar://12170050 goes in ! */
122 g_cp_wrap_func.invalidater = key_store_func->invalidater;
123 g_cp_wrap_func.backup_key = key_store_func->backup_key;
124
125 /* Mark the functions as initialized in the function pointer container */
126 are_wraps_initialized = true;
127
128 return 0;
129 }
130
131 /*
132 * Allocate and initialize a cprotect blob for a new cnode.
133 * Called from hfs_getnewvnode: cnode is locked exclusive.
134 *
135 * Read xattr data off the cnode. Then, if conditions permit,
136 * unwrap the file key and cache it in the cprotect blob.
137 */
138 int
139 cp_entry_init(struct cnode *cp, struct mount *mp)
140 {
141 struct cprotect *entry = NULL;
142 int error = 0;
143 struct hfsmount *hfsmp = VFSTOHFS(mp);
144
145 /*
146 * The cnode should be locked at this point, regardless of whether or not
147 * we are creating a new item in the namespace or vending a vnode on behalf
148 * of lookup. The only time we tell getnewvnode to skip the lock is when
149 * constructing a resource fork vnode. But a resource fork vnode must come
150 * after the regular data fork cnode has already been constructed.
151 */
152 if (!cp_fs_protected (mp)) {
153 cp->c_cpentry = NULL;
154 return 0;
155 }
156
157 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
158 cp->c_cpentry = NULL;
159 return 0;
160 }
161
162 if (are_wraps_initialized == false) {
163 printf("hfs: cp_update_entry: wrap functions not yet set\n");
164 return ENXIO;
165 }
166
167 if (hfsmp->hfs_running_cp_major_vers == 0) {
168 panic ("hfs cp: no running mount point version! ");
169 }
170
171 CP_ASSERT (cp->c_cpentry == NULL);
172
173 error = cp_getxattr(cp, hfsmp, &entry);
174 if (error == 0) {
175 /*
176 * Success; attribute was found, though it may not have keys.
177 * If the entry is not returned without keys, we will delay generating
178 * keys until the first I/O.
179 */
180 if (S_ISREG(cp->c_mode)) {
181 if (entry->cp_flags & CP_NEEDS_KEYS) {
182 entry->cp_flags &= ~CP_KEY_FLUSHED;
183 }
184 else {
185 entry->cp_flags |= CP_KEY_FLUSHED;
186 }
187 }
188 }
189 else if (error == ENOATTR) {
190 /*
191 * Normally, we should always have a CP EA for a file or directory that
192 * we are initializing here. However, there are some extenuating circumstances,
193 * such as the root directory immediately following a newfs_hfs.
194 *
195 * As a result, we leave code here to deal with an ENOATTR which will always
196 * default to a 'D/NONE' key, though we don't expect to use it much.
197 */
198 int target_class = PROTECTION_CLASS_D;
199
200 if (S_ISDIR(cp->c_mode)) {
201 target_class = PROTECTION_CLASS_DIR_NONE;
202 }
203 /* allow keybag to override our class preferences */
204 uint32_t keyflags = CP_KEYWRAP_DIFFCLASS;
205 error = cp_new (target_class, hfsmp, cp, cp->c_mode, keyflags, &entry);
206 if (error == 0) {
207 error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
208 }
209 }
210
211 /*
212 * Bail out if:
213 * a) error was not ENOATTR (we got something bad from the getxattr call)
214 * b) we encountered an error setting the xattr above.
215 * c) we failed to generate a new cprotect data structure.
216 */
217 if (error) {
218 goto out;
219 }
220
221 cp->c_cpentry = entry;
222
223 out:
224 if (error == 0) {
225 entry->cp_backing_cnode = cp;
226 }
227 else {
228 if (entry) {
229 cp_entry_destroy(entry);
230 }
231 cp->c_cpentry = NULL;
232 }
233
234 return error;
235 }
236
237 /*
238 * cp_setup_newentry
239 *
240 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
241 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
242 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
243 * and the file/directory is established, then we can ask it to generate keys. Note that
244 * this introduces a potential race; If the device is locked and the wrapping
245 * keys are purged between the time we call this function and the time we ask it to generate
246 * keys for us, we could have to fail the open(2) call and back out the entry.
247 */
248
249 int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp, int32_t suppliedclass,
250 mode_t cmode, struct cprotect **tmpentry)
251 {
252 int isdir = 0;
253 struct cprotect *entry = NULL;
254 uint32_t target_class = hfsmp->default_cp_class;
255 suppliedclass = CP_CLASS(suppliedclass);
256
257 if (hfsmp->hfs_running_cp_major_vers == 0) {
258 panic ("CP: major vers not set in mount!");
259 }
260
261 if (S_ISDIR (cmode)) {
262 isdir = 1;
263 }
264
265 /* Decide the target class. Input argument takes priority. */
266 if (cp_is_valid_class (isdir, suppliedclass)) {
267 /* caller supplies -1 if it was not specified so we will default to the mount point value */
268 target_class = suppliedclass;
269 /*
270 * One exception, F is never valid for a directory
271 * because its children may inherit and userland will be
272 * unable to read/write to the files.
273 */
274 if (isdir) {
275 if (target_class == PROTECTION_CLASS_F) {
276 *tmpentry = NULL;
277 return EINVAL;
278 }
279 }
280 }
281 else {
282 /*
283 * If no valid class was supplied, behave differently depending on whether or not
284 * the item being created is a file or directory.
285 *
286 * for FILE:
287 * If parent directory has a non-zero class, use that.
288 * If parent directory has a zero class (not set), then attempt to
289 * apply the mount point default.
290 *
291 * for DIRECTORY:
292 * Directories always inherit from the parent; if the parent
293 * has a NONE class set, then we can continue to use that.
294 */
295 if ((dcp) && (dcp->c_cpentry)) {
296 uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass);
297 /* If the parent class is not valid, default to the mount point value */
298 if (cp_is_valid_class(1, parentclass)) {
299 if (isdir) {
300 target_class = parentclass;
301 }
302 else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
303 /* files can inherit so long as it's not NONE */
304 target_class = parentclass;
305 }
306 }
307 /* Otherwise, we already defaulted to the mount point's default */
308 }
309 }
310
311 /* Generate the cprotect to vend out */
312 entry = cp_entry_alloc (0);
313 if (entry == NULL) {
314 *tmpentry = NULL;
315 return ENOMEM;
316 }
317
318 /*
319 * We don't have keys yet, so fill in what we can. At this point
320 * this blob has no keys and it has no backing xattr. We just know the
321 * target class.
322 */
323 entry->cp_flags = (CP_NEEDS_KEYS | CP_NO_XATTR);
324 /* Note this is only the effective class */
325 entry->cp_pclass = target_class;
326 *tmpentry = entry;
327
328 return 0;
329 }
330
331
332 /*
333 * cp_needs_tempkeys
334 *
335 * Relay to caller whether or not the filesystem should generate temporary keys
336 * during resize operations.
337 */
338
339 int cp_needs_tempkeys (struct hfsmount *hfsmp, int *needs)
340 {
341
342 if (hfsmp->hfs_running_cp_major_vers < CP_PREV_MAJOR_VERS ||
343 hfsmp->hfs_running_cp_major_vers > CP_NEW_MAJOR_VERS) {
344 return -1;
345 }
346
347 /* CP_NEW_MAJOR_VERS implies CP_OFF_IV_ENABLED */
348 if (hfsmp->hfs_running_cp_major_vers < CP_NEW_MAJOR_VERS) {
349 *needs = 0;
350 }
351 else {
352 *needs = 1;
353 }
354
355 return 0;
356 }
357
358
359 /*
360 * Set up an initial key/class pair for a disassociated cprotect entry.
361 * This function is used to generate transient keys that will never be
362 * written to disk. We use class F for this since it provides the exact
363 * semantics that are needed here. Because we never attach this blob to
364 * a cnode directly, we take a pointer to the cprotect struct.
365 *
366 * This function is primarily used in the HFS FS truncation codepath
367 * where we may rely on AES symmetry to relocate encrypted data from
368 * one spot in the disk to another.
369 */
370 int cp_entry_gentempkeys(struct cprotect **entry_ptr, struct hfsmount *hfsmp)
371 {
372
373 struct cprotect *entry = NULL;
374
375 if (hfsmp->hfs_running_cp_major_vers < CP_NEW_MAJOR_VERS) {
376 return EPERM;
377 }
378
379 /*
380 * This should only be used for files and won't be written out.
381 * We don't need a persistent key.
382 */
383 entry = cp_entry_alloc (0);
384 if (entry == NULL) {
385 *entry_ptr = NULL;
386 return ENOMEM;
387 }
388 /* This is generated in-kernel so we leave it at the max key*/
389 entry->cp_cache_key_len = CP_MAX_KEYSIZE;
390
391 /* This pclass is only the effective class */
392 entry->cp_pclass = PROTECTION_CLASS_F;
393 entry->cp_persistent_key_len = 0;
394
395 /* Generate the class F key */
396 read_random (&entry->cp_cache_key[0], entry->cp_cache_key_len);
397
398 /* Generate the IV key */
399 cp_setup_aes_ctx(entry);
400 entry->cp_flags |= CP_OFF_IV_ENABLED;
401
402 *entry_ptr = entry;
403 return 0;
404
405 }
406
407 /*
408 * Tear down and clear a cprotect blob for a closing file.
409 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
410 */
411 void
412 cp_entry_destroy(struct cprotect *entry_ptr)
413 {
414 if (entry_ptr == NULL) {
415 /* nothing to clean up */
416 return;
417 }
418 cp_entry_dealloc(entry_ptr);
419 }
420
421
422 int
423 cp_fs_protected (mount_t mnt)
424 {
425 return (vfs_flags(mnt) & MNT_CPROTECT);
426 }
427
428
429 /*
430 * Return a pointer to underlying cnode if there is one for this vnode.
431 * Done without taking cnode lock, inspecting only vnode state.
432 */
433 struct cnode *
434 cp_get_protected_cnode(struct vnode *vp)
435 {
436 if (!cp_vnode_is_eligible(vp)) {
437 return NULL;
438 }
439
440 if (!cp_fs_protected(VTOVFS(vp))) {
441 /* mount point doesn't support it */
442 return NULL;
443 }
444
445 return (struct cnode*) vp->v_data;
446 }
447
448
449 /*
450 * Sets *class to persistent class associated with vnode,
451 * or returns error.
452 */
453 int
454 cp_vnode_getclass(struct vnode *vp, int *class)
455 {
456 struct cprotect *entry;
457 int error = 0;
458 struct cnode *cp;
459 int took_truncate_lock = 0;
460 struct hfsmount *hfsmp = NULL;
461
462 /* Is this an interesting vp? */
463 if (!cp_vnode_is_eligible (vp)) {
464 return EBADF;
465 }
466
467 /* Is the mount point formatted for content protection? */
468 if (!cp_fs_protected(VTOVFS(vp))) {
469 return ENOTSUP;
470 }
471
472 cp = VTOC(vp);
473 hfsmp = VTOHFS(vp);
474
475 /*
476 * Take the truncate lock up-front in shared mode because we may need
477 * to manipulate the CP blob. Pend lock events until we're done here.
478 */
479 hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
480 took_truncate_lock = 1;
481
482 /*
483 * We take only the shared cnode lock up-front. If it turns out that
484 * we need to manipulate the CP blob to write a key out, drop the
485 * shared cnode lock and acquire an exclusive lock.
486 */
487 error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
488 if (error) {
489 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
490 return error;
491 }
492
493 /* pull the class from the live entry */
494 entry = cp->c_cpentry;
495
496 if (entry == NULL) {
497 panic("Content Protection: uninitialized cnode %p", cp);
498 }
499
500 /* Note that we may not have keys yet, but we know the target class. */
501
502 if (error == 0) {
503 *class = CP_CLASS(entry->cp_pclass);
504 }
505
506 if (took_truncate_lock) {
507 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
508 }
509
510 hfs_unlock(cp);
511 return error;
512 }
513
514
515 /*
516 * Sets persistent class for this file or directory.
517 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
518 * If the new class can't be accessed now, EPERM.
519 * Otherwise, record class and re-wrap key if the mount point is content-protected.
520 */
521 int
522 cp_vnode_setclass(struct vnode *vp, uint32_t newclass)
523 {
524 struct cnode *cp;
525 struct cprotect *entry = 0;
526 int error = 0;
527 int took_truncate_lock = 0;
528 struct hfsmount *hfsmp = NULL;
529 int isdir = 0;
530
531 if (vnode_isdir (vp)) {
532 isdir = 1;
533 }
534
535 /* Ensure we only use the effective class here */
536 newclass = CP_CLASS(newclass);
537
538 if (!cp_is_valid_class(isdir, newclass)) {
539 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
540 return EINVAL;
541 }
542
543 /* Is this an interesting vp? */
544 if (!cp_vnode_is_eligible(vp)) {
545 return EBADF;
546 }
547
548 /* Is the mount point formatted for content protection? */
549 if (!cp_fs_protected(VTOVFS(vp))) {
550 return ENOTSUP;
551 }
552
553 hfsmp = VTOHFS(vp);
554 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
555 return EROFS;
556 }
557
558 /*
559 * Take the cnode truncate lock exclusive because we want to manipulate the
560 * CP blob. The lock-event handling code is doing the same. This also forces
561 * all pending IOs to drain before we can re-write the persistent and cache keys.
562 */
563 cp = VTOC(vp);
564 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
565 took_truncate_lock = 1;
566
567 /*
568 * The truncate lock is not sufficient to guarantee the CP blob
569 * isn't being used. We must wait for existing writes to finish.
570 */
571 vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass");
572
573 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
574 return EINVAL;
575 }
576
577 entry = cp->c_cpentry;
578 if (entry == NULL) {
579 error = EINVAL;
580 goto out;
581 }
582
583 /*
584 * re-wrap per-file key with new class.
585 * Generate an entirely new key if switching to F.
586 */
587 if (vnode_isreg(vp)) {
588 /*
589 * The vnode is a file. Before proceeding with the re-wrap, we need
590 * to unwrap the keys before proceeding. This is to ensure that
591 * the destination class's properties still work appropriately for the
592 * target class (since B allows I/O but an unwrap prior to the next unlock
593 * will not be allowed).
594 */
595 if (entry->cp_flags & CP_KEY_FLUSHED) {
596 error = cp_restore_keys (entry, hfsmp, cp);
597 if (error) {
598 goto out;
599 }
600 }
601 if (newclass == PROTECTION_CLASS_F) {
602 /* Verify that file is blockless if switching to class F */
603 if (cp->c_datafork->ff_size > 0) {
604 error = EINVAL;
605 goto out;
606 }
607
608 /* newclass is only the effective class */
609 entry->cp_pclass = newclass;
610
611 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
612 entry->cp_cache_key_len = CP_MAX_KEYSIZE;
613 read_random (&entry->cp_cache_key[0], entry->cp_cache_key_len);
614 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
615 cp_setup_aes_ctx (entry);
616 entry->cp_flags |= CP_OFF_IV_ENABLED;
617 }
618 bzero(entry->cp_persistent_key, entry->cp_persistent_key_len);
619 entry->cp_persistent_key_len = 0;
620 } else {
621 /* Deny the setclass if file is to be moved from F to something else */
622 if (entry->cp_pclass == PROTECTION_CLASS_F) {
623 error = EPERM;
624 goto out;
625 }
626 /* We cannot call cp_rewrap unless the keys were already in existence. */
627 if (entry->cp_flags & CP_NEEDS_KEYS) {
628 struct cprotect *newentry = NULL;
629 /*
630 * We want to fail if we can't wrap to the target class. By not setting
631 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
632 * to 'newclass' then error out.
633 */
634 uint32_t flags = 0;
635 error = cp_generate_keys (hfsmp, cp, newclass, flags, &newentry);
636 if (error == 0) {
637 cp_replace_entry (cp, newentry);
638 }
639 /* Bypass the setxattr code below since generate_keys does it for us */
640 goto out;
641 }
642 else {
643 error = cp_rewrap(cp, hfsmp, newclass);
644 }
645 }
646 if (error) {
647 /* we didn't have perms to set this class. leave file as-is and error out */
648 goto out;
649 }
650 }
651 else if (vnode_isdir(vp)) {
652 /* For directories, just update the pclass. newclass is only effective class */
653 entry->cp_pclass = newclass;
654 error = 0;
655 }
656 else {
657 /* anything else, just error out */
658 error = EINVAL;
659 goto out;
660 }
661
662 /*
663 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
664 * existed. If the keys were never generated, then they'll skip the setxattr calls.
665 */
666
667 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
668 if (error == ENOATTR) {
669 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
670 }
671
672 out:
673
674 if (took_truncate_lock) {
675 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
676 }
677 hfs_unlock(cp);
678 return error;
679 }
680
681
682 int cp_vnode_transcode(vnode_t vp, void *key, unsigned *len)
683 {
684 struct cnode *cp;
685 struct cprotect *entry = 0;
686 int error = 0;
687 int took_truncate_lock = 0;
688 struct hfsmount *hfsmp = NULL;
689
690 /* Structures passed between HFS and AKS */
691 cp_cred_s access_in;
692 cp_wrapped_key_s wrapped_key_in, wrapped_key_out;
693
694 /* Is this an interesting vp? */
695 if (!cp_vnode_is_eligible(vp)) {
696 return EBADF;
697 }
698
699 /* Is the mount point formatted for content protection? */
700 if (!cp_fs_protected(VTOVFS(vp))) {
701 return ENOTSUP;
702 }
703
704 cp = VTOC(vp);
705 hfsmp = VTOHFS(vp);
706
707 /*
708 * Take the cnode truncate lock exclusive because we want to manipulate the
709 * CP blob. The lock-event handling code is doing the same. This also forces
710 * all pending IOs to drain before we can re-write the persistent and cache keys.
711 */
712 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
713 took_truncate_lock = 1;
714
715 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
716 return EINVAL;
717 }
718
719 entry = cp->c_cpentry;
720 if (entry == NULL) {
721 error = EINVAL;
722 goto out;
723 }
724
725 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
726 /*
727 * If we are transcoding keys for AKB, then we should have already established
728 * a set of keys for this vnode. IF we don't have keys yet, then something bad
729 * happened.
730 */
731 error = EINVAL;
732 goto out;
733 }
734
735 /* Send the per-file key in wrapped form for re-wrap with the current class information
736 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
737 * Don't need to process any outputs, so just clear the locks and pass along the error. */
738 if (vnode_isreg(vp)) {
739
740 /* Picked up the following from cp_wrap().
741 * If needed, more comments available there. */
742
743 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
744 error = EINVAL;
745 goto out;
746 }
747
748 cp_init_access(&access_in, cp);
749
750 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
751 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
752 wrapped_key_in.key = entry->cp_persistent_key;
753 wrapped_key_in.key_len = entry->cp_persistent_key_len;
754 /* Use the actual persistent class when talking to AKS */
755 wrapped_key_in.dp_class = entry->cp_pclass;
756 wrapped_key_out.key = key;
757 wrapped_key_out.key_len = *len;
758
759 error = g_cp_wrap_func.backup_key(&access_in,
760 &wrapped_key_in,
761 &wrapped_key_out);
762
763 if(error)
764 error = EPERM;
765 else
766 *len = wrapped_key_out.key_len;
767 }
768
769 out:
770 if (took_truncate_lock) {
771 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
772 }
773 hfs_unlock(cp);
774 return error;
775 }
776
777
778 /*
779 * Check permission for the given operation (read, write) on this node.
780 * Additionally, if the node needs work, do it:
781 * - create a new key for the file if one hasn't been set before
782 * - write out the xattr if it hasn't already been saved
783 * - unwrap the key if needed
784 *
785 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
786 *
787 * Note that this function does *NOT* take the cnode truncate lock. This is because
788 * the thread calling us may already have the truncate lock. It is not necessary
789 * because either we successfully finish this function before the keys are tossed
790 * and the IO will fail, or the keys are tossed and then this function will fail.
791 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
792 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
793 */
794 int
795 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
796 {
797 struct cprotect *entry;
798 int error = 0;
799 struct hfsmount *hfsmp = NULL;
800 struct cnode *cp = NULL;
801
802 /*
803 * First, do validation against the vnode before proceeding any further:
804 * Is this vnode originating from a valid content-protected filesystem ?
805 */
806 if (cp_vnode_is_eligible(vp) == 0) {
807 /*
808 * It is either not HFS or not a file/dir. Just return success. This is a valid
809 * case if servicing i/o against another filesystem type from VFS
810 */
811 return 0;
812 }
813
814 if (cp_fs_protected (VTOVFS(vp)) == 0) {
815 /*
816 * The underlying filesystem does not support content protection. This is also
817 * a valid case. Simply return success.
818 */
819 return 0;
820 }
821
822 /*
823 * At this point, we know we have a HFS vnode that backs a file or directory on a
824 * filesystem that supports content protection
825 */
826 cp = VTOC(vp);
827
828 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
829 return error;
830 }
831
832 entry = cp->c_cpentry;
833
834 if (entry == NULL) {
835 /*
836 * If this cnode is not content protected, simply return success.
837 * Note that this function is called by all I/O-based call sites
838 * when CONFIG_PROTECT is enabled during XNU building.
839 */
840
841 /*
842 * All files should have cprotect structs. It's possible to encounter
843 * a directory from a V2.0 CP system but all files should have protection
844 * EAs
845 */
846 if (vnode_isreg(vp)) {
847 error = EPERM;
848 }
849
850 goto out;
851 }
852
853 vp = CTOV(cp, 0);
854 if (vp == NULL) {
855 /* is it a rsrc */
856 vp = CTOV(cp,1);
857 if (vp == NULL) {
858 error = EINVAL;
859 goto out;
860 }
861 }
862 hfsmp = VTOHFS(vp);
863
864 if ((error = cp_check_access(cp, hfsmp, vnop))) {
865 /* check for raw encrypted access before bailing out */
866 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
867 /*
868 * read access only + asking for the raw encrypted bytes
869 * is legitimate, so reset the error value to 0
870 */
871 error = 0;
872 }
873 else {
874 goto out;
875 }
876 }
877
878 if (entry->cp_flags == 0) {
879 /* no more work to do */
880 goto out;
881 }
882
883 /* upgrade to exclusive lock */
884 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
885 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
886 return error;
887 }
888 } else {
889 cp->c_lockowner = current_thread();
890 }
891
892 /* generate new keys if none have ever been saved */
893 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
894 struct cprotect *newentry = NULL;
895 /*
896 * It's ok if this ends up being wrapped in a different class than 'pclass'.
897 * class modification is OK here.
898 */
899 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
900
901 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
902 if (error == 0) {
903 cp_replace_entry (cp, newentry);
904 entry = newentry;
905 }
906 else {
907 goto out;
908 }
909 }
910
911 /* unwrap keys if needed */
912 if (entry->cp_flags & CP_KEY_FLUSHED) {
913 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
914 /* no need to try to restore keys; they are not going to be used */
915 error = 0;
916 }
917 else {
918 error = cp_restore_keys(entry, hfsmp, cp);
919 if (error) {
920 goto out;
921 }
922 }
923 }
924
925 /* write out the xattr if it's new */
926 if (entry->cp_flags & CP_NO_XATTR)
927 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
928
929 out:
930
931 hfs_unlock(cp);
932 return error;
933 }
934
935
936 int
937 cp_handle_open(struct vnode *vp, int mode)
938 {
939 struct cnode *cp = NULL ;
940 struct cprotect *entry = NULL;
941 struct hfsmount *hfsmp;
942 int error = 0;
943
944 /* If vnode not eligible, just return success */
945 if (!cp_vnode_is_eligible(vp)) {
946 return 0;
947 }
948
949 /* If mount point not properly set up, then also return success */
950 if (!cp_fs_protected(VTOVFS(vp))) {
951 return 0;
952 }
953
954 /* We know the vnode is in a valid state. Acquire cnode and validate */
955 cp = VTOC(vp);
956 hfsmp = VTOHFS(vp);
957
958 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
959 return error;
960 }
961
962 entry = cp->c_cpentry;
963 if (entry == NULL) {
964 /*
965 * If the mount is protected and we couldn't get a cprotect for this vnode,
966 * then it's not valid for opening.
967 */
968 if (vnode_isreg(vp)) {
969 error = EPERM;
970 }
971 goto out;
972 }
973
974 if (!S_ISREG(cp->c_mode))
975 goto out;
976
977 /*
978 * Does the cnode have keys yet? If not, then generate them.
979 */
980 if (entry->cp_flags & CP_NEEDS_KEYS) {
981 struct cprotect *newentry = NULL;
982 /* Allow the keybag to override our class preferences */
983 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
984 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
985 if (error == 0) {
986 cp_replace_entry (cp, newentry);
987 entry = newentry;
988 }
989 else {
990 goto out;
991 }
992 }
993
994 /*
995 * We want to minimize the number of unwraps that we'll have to do since
996 * the cost can vary, depending on the platform we're running.
997 */
998 switch (CP_CLASS(entry->cp_pclass)) {
999 case PROTECTION_CLASS_B:
1000 if (mode & O_CREAT) {
1001 /*
1002 * Class B always allows creation. Since O_CREAT was passed through
1003 * we infer that this was a newly created vnode/cnode. Even though a potential
1004 * race exists when multiple threads attempt to create/open a particular
1005 * file, only one can "win" and actually create it. VFS will unset the
1006 * O_CREAT bit on the loser.
1007 *
1008 * Note that skipping the unwrap check here is not a security issue --
1009 * we have to unwrap the key permanently upon the first I/O.
1010 */
1011 break;
1012 }
1013
1014 if ((entry->cp_flags & CP_KEY_FLUSHED) == 0) {
1015 /*
1016 * For a class B file, attempt the unwrap if we have the key in
1017 * core already.
1018 * The device could have just transitioned into the lock state, and
1019 * this vnode may not yet have been purged from the vnode cache (which would
1020 * remove the keys).
1021 */
1022 cp_cred_s access_in;
1023 cp_wrapped_key_s wrapped_key_in;
1024
1025 cp_init_access(&access_in, cp);
1026 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1027 wrapped_key_in.key = entry->cp_persistent_key;
1028 wrapped_key_in.key_len = entry->cp_persistent_key_len;
1029 /* Use the persistent class when talking to AKS */
1030 wrapped_key_in.dp_class = entry->cp_pclass;
1031 error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, NULL);
1032 if (error) {
1033 error = EPERM;
1034 }
1035 break;
1036 }
1037 /* otherwise, fall through to attempt the unwrap/restore */
1038 case PROTECTION_CLASS_A:
1039 case PROTECTION_CLASS_C:
1040 /*
1041 * At this point, we know that we need to attempt an unwrap if needed; we want
1042 * to makes sure that open(2) fails properly if the device is either just-locked
1043 * or never made it past first unlock. Since the keybag serializes access to the
1044 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1045 * we will get here in two cases:
1046 *
1047 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1048 * purged, the vnode will get flushed if needed.
1049 *
1050 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1051 *
1052 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1053 * we can always attempt the restore.
1054 */
1055 if (entry->cp_flags & CP_KEY_FLUSHED) {
1056 error = cp_restore_keys(entry, hfsmp, cp);
1057 }
1058
1059 if (error) {
1060 error = EPERM;
1061 }
1062
1063 break;
1064
1065 case PROTECTION_CLASS_D:
1066 default:
1067 break;
1068 }
1069
1070 out:
1071 hfs_unlock(cp);
1072 return error;
1073 }
1074
1075
1076 /*
1077 * During hfs resize operations, we have slightly different constraints than during
1078 * normal VNOPS that read/write data to files. Specifically, we already have the cnode
1079 * locked (so nobody else can modify it), and we are doing the IO with root privileges, since
1080 * we are moving the data behind the user's back. So, we skip access checks here (for unlock
1081 * vs. lock), and don't worry about non-existing keys. If the file exists on-disk with valid
1082 * payload, then it must have keys set up already by definition.
1083 */
1084 int
1085 cp_handle_relocate (struct cnode *cp, struct hfsmount *hfsmp)
1086 {
1087 struct cprotect *entry;
1088 int error = -1;
1089
1090 /* cp is already locked */
1091 entry = cp->c_cpentry;
1092 if (!entry)
1093 goto out;
1094
1095 /*
1096 * Still need to validate whether to permit access to the file or not
1097 * based on lock status
1098 */
1099 if ((error = cp_check_access(cp, hfsmp, CP_READ_ACCESS | CP_WRITE_ACCESS))) {
1100 goto out;
1101 }
1102
1103 if (entry->cp_flags == 0) {
1104 /* no more work to do */
1105 error = 0;
1106 goto out;
1107 }
1108
1109 /* it must have keys since it is an existing file with actual payload */
1110
1111 /* unwrap keys if needed */
1112 if (entry->cp_flags & CP_KEY_FLUSHED) {
1113 error = cp_restore_keys(entry, hfsmp, cp);
1114 }
1115
1116 /*
1117 * Don't need to write out the EA since if the file has actual extents,
1118 * it must have an EA
1119 */
1120 out:
1121
1122 /* return the cp still locked */
1123 return error;
1124 }
1125
1126 /*
1127 * cp_getrootxattr:
1128 * Gets the EA we set on the root folder (fileid 1) to get information about the
1129 * version of Content Protection that was used to write to this filesystem.
1130 * Note that all multi-byte fields are written to disk little endian so they must be
1131 * converted to native endian-ness as needed.
1132 */
1133 int
1134 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
1135 {
1136 uio_t auio;
1137 char uio_buf[UIO_SIZEOF(1)];
1138 size_t attrsize = sizeof(struct cp_root_xattr);
1139 int error = 0;
1140 struct vnop_getxattr_args args;
1141
1142 if (!outxattr) {
1143 panic("Content Protection: cp_xattr called with xattr == NULL");
1144 }
1145
1146 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
1147 uio_addiov(auio, CAST_USER_ADDR_T(outxattr), attrsize);
1148
1149 args.a_desc = NULL; // unused
1150 args.a_vp = NULL; //unused since we're writing EA to root folder.
1151 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1152 args.a_uio = auio;
1153 args.a_size = &attrsize;
1154 args.a_options = XATTR_REPLACE;
1155 args.a_context = NULL; // unused
1156
1157 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
1158
1159 /* Now convert the multi-byte fields to native endianness */
1160 outxattr->major_version = OSSwapLittleToHostInt16(outxattr->major_version);
1161 outxattr->minor_version = OSSwapLittleToHostInt16(outxattr->minor_version);
1162 outxattr->flags = OSSwapLittleToHostInt64(outxattr->flags);
1163
1164 if (error != 0) {
1165 goto out;
1166 }
1167
1168 out:
1169 uio_free(auio);
1170 return error;
1171 }
1172
1173 /*
1174 * cp_setrootxattr:
1175 * Sets the EA we set on the root folder (fileid 1) to get information about the
1176 * version of Content Protection that was used to write to this filesystem.
1177 * Note that all multi-byte fields are written to disk little endian so they must be
1178 * converted to little endian as needed.
1179 *
1180 * This will be written to the disk when it detects the EA is not there, or when we need
1181 * to make a modification to the on-disk version that can be done in-place.
1182 */
1183 int
1184 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
1185 {
1186 int error = 0;
1187 struct vnop_setxattr_args args;
1188
1189 args.a_desc = NULL;
1190 args.a_vp = NULL;
1191 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1192 args.a_uio = NULL; //pass data ptr instead
1193 args.a_options = 0;
1194 args.a_context = NULL; //no context needed, only done from mount.
1195
1196 /* Now convert the multi-byte fields to little endian before writing to disk. */
1197 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
1198 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
1199 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
1200
1201 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
1202 sizeof(struct cp_root_xattr), &args, hfsmp, 1);
1203 return error;
1204 }
1205
1206
1207 /*
1208 * Stores new xattr data on the cnode.
1209 * cnode lock held exclusive (if available).
1210 *
1211 * This function is also invoked during file creation.
1212 */
1213 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp, uint32_t fileid, int options)
1214 {
1215 int error = 0;
1216 size_t attrsize;
1217 struct vnop_setxattr_args args;
1218 uint32_t target_fileid;
1219 struct cnode *arg_cp = NULL;
1220 uint32_t tempflags = 0;
1221
1222 args.a_desc = NULL;
1223
1224 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
1225 return EROFS;
1226 }
1227
1228 if (cp) {
1229 args.a_vp = cp->c_vp;
1230 target_fileid = 0;
1231 arg_cp = cp;
1232 }
1233 else {
1234 /*
1235 * When we set the EA in the same txn as the file creation,
1236 * we do not have a vnode/cnode yet. Use the specified fileid.
1237 */
1238 args.a_vp = NULL;
1239 target_fileid = fileid;
1240 }
1241 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1242 args.a_uio = NULL; //pass data ptr instead
1243 args.a_options = options;
1244 args.a_context = vfs_context_current();
1245
1246 /* Note that it's OK to write out an XATTR without keys. */
1247 /* Disable flags that will be invalid as we're writing the EA out at this point. */
1248 tempflags = entry->cp_flags;
1249
1250 /* we're writing the EA; CP_NO_XATTR is invalid */
1251 tempflags &= ~CP_NO_XATTR;
1252
1253 /* CP_SEP_WRAPPEDKEY is informational/runtime only. */
1254 tempflags &= ~CP_SEP_WRAPPEDKEY;
1255
1256 switch(hfsmp->hfs_running_cp_major_vers) {
1257 case CP_NEW_MAJOR_VERS: {
1258 struct cp_xattr_v4 *newxattr = NULL; // 70+ bytes; don't alloc on stack.
1259 MALLOC (newxattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1260 if (newxattr == NULL) {
1261 error = ENOMEM;
1262 break;
1263 }
1264 bzero (newxattr, sizeof(struct cp_xattr_v4));
1265
1266 attrsize = sizeof(*newxattr) - CP_MAX_WRAPPEDKEYSIZE + entry->cp_persistent_key_len;
1267
1268 /* Endian swap the multi-byte fields into L.E from host. */
1269 newxattr->xattr_major_version = OSSwapHostToLittleInt16 (hfsmp->hfs_running_cp_major_vers);
1270 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1271 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1272 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1273 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1274 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1275
1276 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1277
1278 FREE(newxattr, M_TEMP);
1279 break;
1280 }
1281 case CP_PREV_MAJOR_VERS: {
1282 struct cp_xattr_v2 *newxattr = NULL;
1283 MALLOC (newxattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1284 if (newxattr == NULL) {
1285 error = ENOMEM;
1286 break;
1287 }
1288 bzero (newxattr, sizeof(struct cp_xattr_v2));
1289
1290 attrsize = sizeof(*newxattr);
1291
1292 /* Endian swap the multi-byte fields into L.E from host. */
1293 newxattr->xattr_major_version = OSSwapHostToLittleInt16(hfsmp->hfs_running_cp_major_vers);
1294 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1295 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1296 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1297 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1298 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1299
1300 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1301
1302 FREE (newxattr, M_TEMP);
1303 break;
1304 }
1305 default:
1306 printf("hfs: cp_setxattr: Unknown CP version running \n");
1307 break;
1308 }
1309
1310 if (error == 0 ) {
1311 entry->cp_flags &= ~CP_NO_XATTR;
1312 }
1313
1314 return error;
1315
1316
1317 }
1318
1319 /*
1320 * Used by an fcntl to query the underlying FS for its content protection version #
1321 */
1322
1323 int
1324 cp_get_root_major_vers(vnode_t vp, uint32_t *level)
1325 {
1326 int err = 0;
1327 struct hfsmount *hfsmp = NULL;
1328 struct mount *mp = NULL;
1329
1330 mp = VTOVFS(vp);
1331
1332 /* check if it supports content protection */
1333 if (cp_fs_protected(mp) == 0) {
1334 return ENOTSUP;
1335 }
1336
1337 hfsmp = VFSTOHFS(mp);
1338 /* figure out the level */
1339
1340 err = cp_root_major_vers(mp);
1341
1342 if (err == 0) {
1343 *level = hfsmp->hfs_running_cp_major_vers;
1344 }
1345 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1346
1347 return err;
1348 }
1349
1350 /* Used by fcntl to query default protection level of FS */
1351 int cp_get_default_level (struct vnode *vp, uint32_t *level) {
1352 int err = 0;
1353 struct hfsmount *hfsmp = NULL;
1354 struct mount *mp = NULL;
1355
1356 mp = VTOVFS(vp);
1357
1358 /* check if it supports content protection */
1359 if (cp_fs_protected(mp) == 0) {
1360 return ENOTSUP;
1361 }
1362
1363 hfsmp = VFSTOHFS(mp);
1364 /* figure out the default */
1365
1366 *level = hfsmp->default_cp_class;
1367 return err;
1368 }
1369
1370 /********************
1371 * Private Functions
1372 *******************/
1373
1374 static int
1375 cp_root_major_vers(mount_t mp)
1376 {
1377 int err = 0;
1378 struct cp_root_xattr xattr;
1379 struct hfsmount *hfsmp = NULL;
1380
1381 hfsmp = vfs_fsprivate(mp);
1382 err = cp_getrootxattr (hfsmp, &xattr);
1383
1384 if (err == 0) {
1385 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1386 }
1387 else {
1388 return EINVAL;
1389 }
1390
1391 return 0;
1392 }
1393
1394 static int
1395 cp_vnode_is_eligible(struct vnode *vp)
1396 {
1397 return ((vp->v_op == hfs_vnodeop_p) &&
1398 (!vnode_issystem(vp)) &&
1399 (vnode_isreg(vp) || vnode_isdir(vp)));
1400 }
1401
1402
1403
1404 int
1405 cp_is_valid_class(int isdir, int32_t protectionclass)
1406 {
1407 /*
1408 * The valid protection classes are from 0 -> N
1409 * We use a signed argument to detect unassigned values from
1410 * directory entry creation time in HFS.
1411 */
1412 if (isdir) {
1413 /* Directories are not allowed to have F, but they can have "NONE" */
1414 return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
1415 (protectionclass <= PROTECTION_CLASS_D));
1416 }
1417 else {
1418 return ((protectionclass >= PROTECTION_CLASS_A) &&
1419 (protectionclass <= PROTECTION_CLASS_F));
1420 }
1421 }
1422
1423
1424 static struct cprotect *
1425 cp_entry_alloc(size_t keylen)
1426 {
1427 struct cprotect *cp_entry;
1428
1429 if (keylen > CP_MAX_WRAPPEDKEYSIZE)
1430 return (NULL);
1431
1432 MALLOC(cp_entry, struct cprotect *, sizeof(struct cprotect) + keylen,
1433 M_TEMP, M_WAITOK);
1434 if (cp_entry == NULL)
1435 return (NULL);
1436
1437 bzero(cp_entry, sizeof(*cp_entry) + keylen);
1438 cp_entry->cp_persistent_key_len = keylen;
1439 return (cp_entry);
1440 }
1441
1442 static void
1443 cp_entry_dealloc(struct cprotect *entry)
1444 {
1445 uint32_t keylen = entry->cp_persistent_key_len;
1446 bzero(entry, (sizeof(*entry) + keylen));
1447 FREE(entry, M_TEMP);
1448 }
1449
1450
1451 /*
1452 * Initializes a new cprotect entry with xattr data from the cnode.
1453 * cnode lock held shared
1454 */
1455 static int
1456 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, struct cprotect **outentry)
1457 {
1458 int error = 0;
1459 uio_t auio;
1460 size_t attrsize;
1461 char uio_buf[UIO_SIZEOF(1)];
1462 struct vnop_getxattr_args args;
1463 struct cprotect *entry = NULL;
1464
1465 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
1466 args.a_desc = NULL; // unused
1467 args.a_vp = cp->c_vp;
1468 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1469 args.a_uio = auio;
1470 args.a_options = XATTR_REPLACE;
1471 args.a_context = vfs_context_current(); // unused
1472
1473 switch (hfsmp->hfs_running_cp_major_vers) {
1474 case CP_NEW_MAJOR_VERS: {
1475 struct cp_xattr_v4 *xattr = NULL;
1476 MALLOC (xattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1477 if (xattr == NULL) {
1478 error = ENOMEM;
1479 break;
1480 }
1481 bzero(xattr, sizeof (struct cp_xattr_v4));
1482 attrsize = sizeof(*xattr);
1483
1484 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1485 args.a_size = &attrsize;
1486
1487 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1488 if (error != 0) {
1489 FREE (xattr, M_TEMP);
1490 goto out;
1491 }
1492
1493 /* Endian swap the multi-byte fields into host endianness from L.E. */
1494 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1495 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1496 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1497 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1498 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1499
1500 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers ) {
1501 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1502 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1503 error = EINVAL;
1504 FREE (xattr, M_TEMP);
1505
1506 goto out;
1507 }
1508 /*
1509 * Prevent a buffer overflow, and validate the key length obtained from the
1510 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1511 * point.
1512 */
1513 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE) {
1514 error = EINVAL;
1515 FREE (xattr, M_TEMP);
1516
1517 goto out;
1518 }
1519
1520 /*
1521 * Class F files have no backing key; their keylength should be 0,
1522 * though they should have the proper flags set.
1523 *
1524 * A request to instantiate a CP for a class F file should result
1525 * in a bzero'd cp that just says class F, with key_flushed set.
1526 */
1527
1528 /* set up entry with information from xattr */
1529 entry = cp_entry_alloc(xattr->key_size);
1530 if (!entry) {
1531 FREE (xattr, M_TEMP);
1532
1533 return ENOMEM;
1534 }
1535
1536 entry->cp_pclass = xattr->persistent_class;
1537
1538 /*
1539 * Suppress invalid flags that should not be set.
1540 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1541 * be valid; the EA exists.
1542 */
1543 xattr->flags &= ~CP_NO_XATTR;
1544
1545 entry->cp_flags = xattr->flags;
1546 if (xattr->xattr_major_version >= CP_NEW_MAJOR_VERS) {
1547 entry->cp_flags |= CP_OFF_IV_ENABLED;
1548 }
1549
1550 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_F ) {
1551 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1552 }
1553
1554 FREE (xattr, M_TEMP);
1555
1556 break;
1557 }
1558 case CP_PREV_MAJOR_VERS: {
1559 struct cp_xattr_v2 *xattr = NULL;
1560 MALLOC (xattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1561 if (xattr == NULL) {
1562 error = ENOMEM;
1563 break;
1564 }
1565 bzero (xattr, sizeof (struct cp_xattr_v2));
1566 attrsize = sizeof(*xattr);
1567
1568 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1569 args.a_size = &attrsize;
1570
1571 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1572 if (error != 0) {
1573 FREE (xattr, M_TEMP);
1574 goto out;
1575 }
1576
1577 /* Endian swap the multi-byte fields into host endianness from L.E. */
1578 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1579 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1580 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1581 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1582 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1583
1584 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers) {
1585 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1586 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1587 error = EINVAL;
1588 FREE (xattr, M_TEMP);
1589 goto out;
1590 }
1591
1592 /*
1593 * Prevent a buffer overflow, and validate the key length obtained from the
1594 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1595 * point.
1596 */
1597 if (xattr->key_size > CP_V2_WRAPPEDKEYSIZE) {
1598 error = EINVAL;
1599 FREE (xattr, M_TEMP);
1600 goto out;
1601 }
1602 /* set up entry with information from xattr */
1603 entry = cp_entry_alloc(xattr->key_size);
1604 if (!entry) {
1605 FREE (xattr, M_TEMP);
1606 return ENOMEM;
1607 }
1608
1609 entry->cp_pclass = xattr->persistent_class;
1610
1611 /*
1612 * Suppress invalid flags that should not be set.
1613 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1614 * be valid; the EA exists.
1615 */
1616 xattr->flags &= ~CP_NO_XATTR;
1617
1618 entry->cp_flags = xattr->flags;
1619
1620 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_F ) {
1621 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1622 }
1623
1624 FREE (xattr, M_TEMP);
1625 break;
1626 }
1627 }
1628
1629 out:
1630 uio_free(auio);
1631
1632 *outentry = entry;
1633 return error;
1634 }
1635
1636 /*
1637 * If permitted, restore entry's unwrapped key from the persistent key.
1638 * If not, clear key and set CP_KEY_FLUSHED.
1639 * cnode lock held exclusive
1640 */
1641 static int
1642 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
1643 {
1644 int error = 0;
1645
1646 error = cp_unwrap(hfsmp, entry, cp);
1647 if (error) {
1648 entry->cp_flags |= CP_KEY_FLUSHED;
1649 bzero(entry->cp_cache_key, entry->cp_cache_key_len);
1650 error = EPERM;
1651 }
1652 else {
1653 /* ready for business */
1654 entry->cp_flags &= ~CP_KEY_FLUSHED;
1655
1656 }
1657 return error;
1658 }
1659
1660 static int
1661 cp_lock_vfs_callback(mount_t mp, void *arg)
1662 {
1663
1664 /* Use a pointer-width integer field for casting */
1665 unsigned long new_state;
1666 struct hfsmount *hfsmp;
1667
1668 /*
1669 * When iterating the various mount points that may
1670 * be present on a content-protected device, we need to skip
1671 * those that do not have it enabled.
1672 */
1673 if (!cp_fs_protected(mp)) {
1674 return 0;
1675 }
1676 new_state = (unsigned long) arg;
1677
1678 hfsmp = VFSTOHFS(mp);
1679
1680 hfs_lock_mount(hfsmp);
1681 /* this loses all of the upper bytes of precision; that's OK */
1682 hfsmp->hfs_cp_lock_state = (uint8_t) new_state;
1683 hfs_unlock_mount(hfsmp);
1684
1685 if (new_state == CP_LOCKED_STATE) {
1686 /*
1687 * We respond only to lock events. Since cprotect structs
1688 * decrypt/restore keys lazily, the unlock events don't
1689 * actually cause anything to happen.
1690 */
1691 return vnode_iterate(mp, 0, cp_lock_vnode_callback, arg);
1692 }
1693 /* Otherwise just return 0. */
1694 return 0;
1695
1696 }
1697
1698
1699 /*
1700 * Deny access to protected files if keys have been locked.
1701 */
1702 static int
1703 cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused)
1704 {
1705 int error = 0;
1706
1707 /*
1708 * For now it's OK to examine the state variable here without
1709 * holding the HFS lock. This is only a short-circuit; if the state
1710 * transitions (or is in transition) after we examine this field, we'd
1711 * have to handle that anyway.
1712 */
1713 if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) {
1714 return 0;
1715 }
1716
1717 if (!cp->c_cpentry) {
1718 /* unprotected node */
1719 return 0;
1720 }
1721
1722 if (!S_ISREG(cp->c_mode)) {
1723 return 0;
1724 }
1725
1726 /* Deny all access for class A files */
1727 switch (CP_CLASS(cp->c_cpentry->cp_pclass)) {
1728 case PROTECTION_CLASS_A: {
1729 error = EPERM;
1730 break;
1731 }
1732 default:
1733 error = 0;
1734 break;
1735 }
1736
1737 return error;
1738 }
1739
1740 /*
1741 * Respond to a lock or unlock event.
1742 * On lock: clear out keys from memory, then flush file contents.
1743 * On unlock: nothing (function not called).
1744 */
1745 static int
1746 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1747 {
1748 cnode_t *cp = NULL;
1749 struct cprotect *entry = NULL;
1750 int error = 0;
1751 int locked = 1;
1752 unsigned long action = 0;
1753 int took_truncate_lock = 0;
1754
1755 error = vnode_getwithref (vp);
1756 if (error) {
1757 return error;
1758 }
1759
1760 cp = VTOC(vp);
1761
1762 /*
1763 * When cleaning cnodes due to a lock event, we must
1764 * take the truncate lock AND the cnode lock. By taking
1765 * the truncate lock here, we force (nearly) all pending IOs
1766 * to drain before we can acquire the truncate lock. All HFS cluster
1767 * io calls except for swapfile IO need to acquire the truncate lock
1768 * prior to calling into the cluster layer.
1769 */
1770 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1771 took_truncate_lock = 1;
1772
1773 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1774
1775 entry = cp->c_cpentry;
1776 if (!entry) {
1777 /* unprotected vnode: not a regular file */
1778 goto out;
1779 }
1780
1781 action = (unsigned long) arg;
1782 switch (action) {
1783 case CP_LOCKED_STATE: {
1784 vfs_context_t ctx;
1785 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A ||
1786 vnode_isdir(vp)) {
1787 /*
1788 * There is no change at lock for other classes than A.
1789 * B is kept in memory for writing, and class F (for VM) does
1790 * not have a wrapped key, so there is no work needed for
1791 * wrapping/unwrapping.
1792 *
1793 * Note that 'class F' is relevant here because if
1794 * hfs_vnop_strategy does not take the cnode lock
1795 * to protect the cp blob across IO operations, we rely
1796 * implicitly on the truncate lock to be held when doing IO.
1797 * The only case where the truncate lock is not held is during
1798 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1799 * directly to cluster_pageout.
1800 */
1801 goto out;
1802 }
1803
1804 /* Before doing anything else, zero-fill sparse ranges as needed */
1805 ctx = vfs_context_current();
1806 (void) hfs_filedone (vp, ctx, 0);
1807
1808 /* first, sync back dirty pages */
1809 hfs_unlock (cp);
1810 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1811 hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1812
1813 /* flush keys:
1814 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
1815 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
1816 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
1817 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
1818 * Also verified that the cached data in IOFS is overwritten by other data, and there
1819 * is no key leakage in that layer.
1820 */
1821
1822 entry->cp_flags |= CP_KEY_FLUSHED;
1823 bzero(&entry->cp_cache_key, entry->cp_cache_key_len);
1824 bzero(&entry->cp_cache_iv_ctx, sizeof(aes_encrypt_ctx));
1825
1826 /* some write may have arrived in the mean time. dump those pages */
1827 hfs_unlock(cp);
1828 locked = 0;
1829
1830 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
1831 break;
1832 }
1833 case CP_UNLOCKED_STATE: {
1834 /* no-op */
1835 break;
1836 }
1837 default:
1838 panic("Content Protection: unknown lock action %lu\n", action);
1839 }
1840
1841 out:
1842 if (locked) {
1843 hfs_unlock(cp);
1844 }
1845
1846 if (took_truncate_lock) {
1847 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
1848 }
1849
1850 vnode_put (vp);
1851 return error;
1852 }
1853
1854
1855 /*
1856 * cp_rewrap:
1857 *
1858 * Generate a new wrapped key based on the existing cache key.
1859 */
1860
1861 static int
1862 cp_rewrap(struct cnode *cp, struct hfsmount *hfsmp, int newclass)
1863 {
1864
1865 struct cprotect *entry = cp->c_cpentry;
1866 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
1867 size_t keylen = CP_MAX_WRAPPEDKEYSIZE;
1868 int error = 0;
1869 newclass = CP_CLASS(newclass);
1870
1871 /* Structures passed between HFS and AKS */
1872 cp_cred_s access_in;
1873 cp_wrapped_key_s wrapped_key_in;
1874 cp_wrapped_key_s wrapped_key_out;
1875
1876 /*
1877 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1878 * key that is only good as long as the file is open. There is no
1879 * wrapped key, so there isn't anything to wrap.
1880 */
1881 if (newclass == PROTECTION_CLASS_F) {
1882 return EINVAL;
1883 }
1884
1885 cp_init_access(&access_in, cp);
1886
1887 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1888 wrapped_key_in.key = entry->cp_persistent_key;
1889 wrapped_key_in.key_len = entry->cp_persistent_key_len;
1890 /* Use the persistent class when talking to AKS */
1891 wrapped_key_in.dp_class = entry->cp_pclass;
1892
1893 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
1894 wrapped_key_out.key = new_persistent_key;
1895 wrapped_key_out.key_len = keylen;
1896
1897 /*
1898 * inode is passed here to find the backup bag wrapped blob
1899 * from userspace. This lookup will occur shortly after creation
1900 * and only if the file still exists. Beyond this lookup the
1901 * inode is not used. Technically there is a race, we practically
1902 * don't lose.
1903 */
1904 error = g_cp_wrap_func.rewrapper(&access_in,
1905 newclass, /* new class */
1906 &wrapped_key_in,
1907 &wrapped_key_out);
1908
1909 keylen = wrapped_key_out.key_len;
1910
1911 if (error == 0) {
1912 struct cprotect *newentry = NULL;
1913 /*
1914 * Verify that AKS returned to us a wrapped key of the
1915 * target class requested.
1916 */
1917 /* Get the effective class here */
1918 int effective = CP_CLASS(wrapped_key_out.dp_class);
1919 if (effective != newclass) {
1920 /*
1921 * Fail the operation if defaults or some other enforcement
1922 * dictated that the class be wrapped differently.
1923 */
1924
1925 /* TODO: Invalidate the key when 12170074 unblocked */
1926 return EPERM;
1927 }
1928
1929 /* v2 EA's don't support the larger class B keys */
1930 if ((keylen != CP_V2_WRAPPEDKEYSIZE) &&
1931 (hfsmp->hfs_running_cp_major_vers == CP_PREV_MAJOR_VERS)) {
1932 return EINVAL;
1933 }
1934
1935 /* Allocate a new cpentry */
1936 newentry = cp_entry_alloc (keylen);
1937 bcopy (entry, newentry, sizeof(struct cprotect));
1938
1939 /* copy the new key into the entry */
1940 bcopy (new_persistent_key, newentry->cp_persistent_key, keylen);
1941 newentry->cp_persistent_key_len = keylen;
1942 newentry->cp_backing_cnode = cp;
1943
1944 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
1945 newentry->cp_pclass = wrapped_key_out.dp_class;
1946
1947 /* Attach the new entry to the cnode */
1948 cp->c_cpentry = newentry;
1949
1950 /* destroy the old entry */
1951 cp_entry_destroy (entry);
1952 }
1953 else {
1954 error = EPERM;
1955 }
1956
1957 return error;
1958 }
1959
1960
1961 static int
1962 cp_unwrap(struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
1963 {
1964 int error = 0;
1965 uint8_t iv_key[CP_IV_KEYSIZE];
1966
1967 /* Structures passed between HFS and AKS */
1968 cp_cred_s access_in;
1969 cp_wrapped_key_s wrapped_key_in;
1970 cp_raw_key_s key_out;
1971
1972 /*
1973 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1974 * key that is only good as long as the file is open. There is no
1975 * wrapped key, so there isn't anything to unwrap.
1976 */
1977 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
1978 return EPERM;
1979 }
1980
1981 cp_init_access(&access_in, cp);
1982
1983 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1984 wrapped_key_in.key = entry->cp_persistent_key;
1985 wrapped_key_in.key_len = entry->cp_persistent_key_len;
1986 /* Use the persistent class when talking to AKS */
1987 wrapped_key_in.dp_class = entry->cp_pclass;
1988
1989 bzero(&key_out, sizeof(key_out));
1990 key_out.iv_key = iv_key;
1991 key_out.key = entry->cp_cache_key;
1992 /*
1993 * The unwrapper should validate/set the key length for
1994 * the IV key length and the cache key length, however we need
1995 * to supply the correct buffer length so that AKS knows how
1996 * many bytes it has to work with.
1997 */
1998 key_out.iv_key_len = CP_IV_KEYSIZE;
1999 key_out.key_len = CP_MAX_CACHEBUFLEN;
2000
2001 error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, &key_out);
2002 if (!error) {
2003 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2004 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len);
2005 }
2006
2007 if (key_out.iv_key_len == 0 || key_out.iv_key_len > CP_IV_KEYSIZE) {
2008 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len);
2009 }
2010
2011 entry->cp_cache_key_len = key_out.key_len;
2012
2013 /* No need to go here for older EAs */
2014 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
2015 aes_encrypt_key128(iv_key, &entry->cp_cache_iv_ctx);
2016 entry->cp_flags |= CP_OFF_IV_ENABLED;
2017 }
2018
2019 /* Is the key a raw wrapped key? */
2020 if (key_out.flags & CP_RAW_KEY_WRAPPEDKEY) {
2021 /* OR in the right bit for the cprotect */
2022 entry->cp_flags |= CP_SEP_WRAPPEDKEY;
2023 }
2024
2025 } else {
2026 error = EPERM;
2027 }
2028
2029 return error;
2030 }
2031
2032 /* Setup AES context */
2033 static int
2034 cp_setup_aes_ctx(struct cprotect *entry)
2035 {
2036 SHA1_CTX sha1ctxt;
2037 uint8_t cp_cache_iv_key[CP_IV_KEYSIZE]; /* Kiv */
2038
2039 /* First init the cp_cache_iv_key[] */
2040 SHA1Init(&sha1ctxt);
2041
2042 /*
2043 * We can only use this when the keys are generated in the AP; As a result
2044 * we only use the first 32 bytes of key length in the cache key
2045 */
2046 SHA1Update(&sha1ctxt, &entry->cp_cache_key[0], CP_MAX_KEYSIZE);
2047 SHA1Final(&cp_cache_iv_key[0], &sha1ctxt);
2048
2049 aes_encrypt_key128(&cp_cache_iv_key[0], &entry->cp_cache_iv_ctx);
2050
2051 return 0;
2052 }
2053
2054 /*
2055 * cp_generate_keys
2056 *
2057 * Take a cnode that has already been initialized and establish persistent and
2058 * cache keys for it at this time. Note that at the time this is called, the
2059 * directory entry has already been created and we are holding the cnode lock
2060 * on 'cp'.
2061 *
2062 */
2063 int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, int targetclass,
2064 uint32_t keyflags, struct cprotect **newentry)
2065 {
2066
2067 int error = 0;
2068 struct cprotect *newcp = NULL;
2069 *newentry = NULL;
2070
2071 /* Target class must be an effective class only */
2072 targetclass = CP_CLASS(targetclass);
2073
2074 /* Validate that it has a cprotect already */
2075 if (cp->c_cpentry == NULL) {
2076 /* We can't do anything if it shouldn't be protected. */
2077 return 0;
2078 }
2079
2080 /* Asserts for the underlying cprotect */
2081 if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
2082 /* should already have an xattr by this point. */
2083 error = EINVAL;
2084 goto out;
2085 }
2086
2087 if (S_ISREG(cp->c_mode)) {
2088 if ((cp->c_cpentry->cp_flags & CP_NEEDS_KEYS) == 0){
2089 error = EINVAL;
2090 goto out;
2091 }
2092 }
2093
2094 error = cp_new (targetclass, hfsmp, cp, cp->c_mode, keyflags, &newcp);
2095 if (error) {
2096 /*
2097 * Key generation failed. This is not necessarily fatal
2098 * since the device could have transitioned into the lock
2099 * state before we called this.
2100 */
2101 error = EPERM;
2102 goto out;
2103 }
2104
2105 /*
2106 * If we got here, then we have a new cprotect.
2107 * Attempt to write the new one out.
2108 */
2109 error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
2110
2111 if (error) {
2112 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2113 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2114 if (newcp) {
2115 cp_entry_destroy(newcp);
2116 }
2117 goto out;
2118 }
2119
2120 /*
2121 * If we get here then we can assert that:
2122 * 1) generated wrapped/unwrapped keys.
2123 * 2) wrote the new keys to disk.
2124 * 3) cprotect is ready to go.
2125 */
2126
2127 newcp->cp_flags &= ~CP_NEEDS_KEYS;
2128 *newentry = newcp;
2129
2130 out:
2131 return error;
2132
2133 }
2134
2135 void cp_replace_entry (struct cnode *cp, struct cprotect *newentry)
2136 {
2137
2138 if (cp->c_cpentry) {
2139 cp_entry_destroy (cp->c_cpentry);
2140 }
2141 cp->c_cpentry = newentry;
2142 newentry->cp_backing_cnode = cp;
2143
2144 return;
2145 }
2146
2147
2148 /*
2149 * cp_new
2150 *
2151 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2152 * allocate a cprotect, and vend it back to the caller.
2153 *
2154 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2155 * but they do not have keys.
2156 *
2157 */
2158
2159 static int
2160 cp_new(int newclass_eff, struct hfsmount *hfsmp, struct cnode *cp, mode_t cmode,
2161 uint32_t keyflags, struct cprotect **output_entry)
2162 {
2163 struct cprotect *entry = NULL;
2164 int error = 0;
2165 uint8_t new_key[CP_MAX_CACHEBUFLEN];
2166 size_t new_key_len = CP_MAX_CACHEBUFLEN; /* AKS tell us the proper key length, how much of this is used */
2167 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2168 size_t new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
2169 uint8_t iv_key[CP_IV_KEYSIZE];
2170 size_t iv_key_len = CP_IV_KEYSIZE;
2171 int iswrapped = 0;
2172
2173 newclass_eff = CP_CLASS(newclass_eff);
2174
2175 /* Structures passed between HFS and AKS */
2176 cp_cred_s access_in;
2177 cp_wrapped_key_s wrapped_key_out;
2178 cp_raw_key_s key_out;
2179
2180 if (*output_entry != NULL) {
2181 panic ("cp_new with non-null entry!");
2182 }
2183
2184 if (are_wraps_initialized == false) {
2185 printf("hfs: cp_new: wrap/gen functions not yet set\n");
2186 return ENXIO;
2187 }
2188
2189 /* Sanity check that it's a file or directory here */
2190 if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) {
2191 return EPERM;
2192 }
2193
2194 /*
2195 * Step 1: Generate Keys if needed.
2196 *
2197 * For class F files, the kernel provides the key.
2198 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2199 * key that is only good as long as the file is open. There is no
2200 * wrapped key, so there isn't anything to wrap.
2201 *
2202 * For class A->D files, the key store provides the key
2203 *
2204 * For Directories, we only give them a class ; no keys.
2205 */
2206 if (S_ISDIR (cmode)) {
2207 /* Directories */
2208 new_persistent_len = 0;
2209 new_key_len = 0;
2210
2211 error = 0;
2212 }
2213 else {
2214 /* Must be a file */
2215 if (newclass_eff == PROTECTION_CLASS_F) {
2216 /* class F files are not wrapped; they can still use the max key size */
2217 new_key_len = CP_MAX_KEYSIZE;
2218 read_random (&new_key[0], new_key_len);
2219 new_persistent_len = 0;
2220
2221 error = 0;
2222 }
2223 else {
2224 /*
2225 * The keystore is provided the file ID so that it can associate
2226 * the wrapped backup blob with this key from userspace. This
2227 * lookup occurs after successful file creation. Beyond this, the
2228 * file ID is not used. Note that there is a potential race here if
2229 * the file ID is re-used.
2230 */
2231 cp_init_access(&access_in, cp);
2232
2233 bzero(&key_out, sizeof(key_out));
2234 key_out.key = new_key;
2235 key_out.iv_key = iv_key;
2236 /*
2237 * AKS will override our key length fields, but we need to supply
2238 * the length of the buffer in those length fields so that
2239 * AKS knows hoa many bytes it has to work with.
2240 */
2241 key_out.key_len = new_key_len;
2242 key_out.iv_key_len = iv_key_len;
2243
2244 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2245 wrapped_key_out.key = new_persistent_key;
2246 wrapped_key_out.key_len = new_persistent_len;
2247
2248 error = g_cp_wrap_func.new_key(&access_in,
2249 newclass_eff,
2250 &key_out,
2251 &wrapped_key_out);
2252
2253 if (error) {
2254 /* keybag returned failure */
2255 error = EPERM;
2256 goto cpnew_fail;
2257 }
2258
2259 /* Now sanity-check the output from new_key */
2260 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2261 panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len);
2262 }
2263
2264 if (key_out.iv_key_len == 0 || key_out.iv_key_len > CP_IV_KEYSIZE) {
2265 panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len);
2266 }
2267
2268 /*
2269 * AKS is allowed to override our preferences and wrap with a
2270 * different class key for policy reasons. If we were told that
2271 * any class other than the one specified is unacceptable then error out
2272 * if that occurred. Check that the effective class returned by
2273 * AKS is the same as our effective new class
2274 */
2275 if ((int)(CP_CLASS(wrapped_key_out.dp_class)) != newclass_eff) {
2276 if (keyflags & CP_KEYWRAP_DIFFCLASS) {
2277 newclass_eff = CP_CLASS(wrapped_key_out.dp_class);
2278 }
2279 else {
2280 error = EPERM;
2281 /* TODO: When 12170074 fixed, release/invalidate the key! */
2282 goto cpnew_fail;
2283 }
2284 }
2285
2286 new_key_len = key_out.key_len;
2287 iv_key_len = key_out.iv_key_len;
2288 new_persistent_len = wrapped_key_out.key_len;
2289
2290 /* Is the key a SEP wrapped key? */
2291 if (key_out.flags & CP_RAW_KEY_WRAPPEDKEY) {
2292 iswrapped = 1;
2293 }
2294 }
2295 }
2296
2297 /*
2298 * Step 2: allocate cprotect and initialize it.
2299 */
2300
2301
2302 /*
2303 * v2 EA's don't support the larger class B keys
2304 */
2305 if ((new_persistent_len != CP_V2_WRAPPEDKEYSIZE) &&
2306 (hfsmp->hfs_running_cp_major_vers == CP_PREV_MAJOR_VERS)) {
2307 return EINVAL;
2308 }
2309
2310 entry = cp_entry_alloc (new_persistent_len);
2311 if (entry == NULL) {
2312 return ENOMEM;
2313 }
2314
2315 *output_entry = entry;
2316
2317 /*
2318 * For directories and class F files, just store the effective new class.
2319 * AKS does not interact with us in generating keys for F files, and directories
2320 * don't actually have keys.
2321 */
2322 if ( S_ISDIR (cmode) || (newclass_eff == PROTECTION_CLASS_F)) {
2323 entry->cp_pclass = newclass_eff;
2324 }
2325 else {
2326 /*
2327 * otherwise, store what AKS actually returned back to us.
2328 * wrapped_key_out is only valid if we have round-tripped to AKS
2329 */
2330 entry->cp_pclass = wrapped_key_out.dp_class;
2331 }
2332
2333 /* Copy the cache key & IV keys into place if needed. */
2334 if (new_key_len > 0) {
2335 bcopy (new_key, entry->cp_cache_key, new_key_len);
2336 entry->cp_cache_key_len = new_key_len;
2337
2338
2339 /* Initialize the IV key */
2340 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
2341 if (newclass_eff == PROTECTION_CLASS_F) {
2342 /* class F needs a full IV initialize */
2343 cp_setup_aes_ctx(entry);
2344 }
2345 else {
2346 /* Key store gave us an iv key. Just need to wrap it.*/
2347 aes_encrypt_key128(iv_key, &entry->cp_cache_iv_ctx);
2348 }
2349 entry->cp_flags |= CP_OFF_IV_ENABLED;
2350 }
2351 }
2352 if (new_persistent_len > 0) {
2353 bcopy(new_persistent_key, entry->cp_persistent_key, new_persistent_len);
2354 }
2355
2356 /* Mark it as a wrapped key if necessary */
2357 if (iswrapped) {
2358 entry->cp_flags |= CP_SEP_WRAPPEDKEY;
2359 }
2360
2361 cpnew_fail:
2362 return error;
2363 }
2364
2365 /* Initialize the cp_cred_t structure passed to AKS */
2366 static void cp_init_access(cp_cred_t access, struct cnode *cp)
2367 {
2368 vfs_context_t context = vfs_context_current();
2369 kauth_cred_t cred = vfs_context_ucred(context);
2370 proc_t proc = vfs_context_proc(context);
2371
2372 bzero(access, sizeof(*access));
2373
2374 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2375 access->inode = cp->c_fileid;
2376 access->pid = proc_pid(proc);
2377 access->uid = kauth_cred_getuid(cred);
2378
2379 return;
2380 }
2381
2382 #else
2383
2384 int cp_key_store_action(int action __unused)
2385 {
2386 return ENOTSUP;
2387 }
2388
2389
2390 int cp_register_wraps(cp_wrap_func_t key_store_func __unused)
2391 {
2392 return ENOTSUP;
2393 }
2394
2395 #endif /* CONFIG_PROTECT */