]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cprotect.c
4a88d0c527630b81e453aa89d7f98e554495f7ab
[apple/xnu.git] / bsd / hfs / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/cprotect.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/uio_internal.h>
34 #include <sys/ubc_internal.h>
35 #include <sys/vnode_if.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/fcntl.h>
38 #include <libkern/OSByteOrder.h>
39
40 #include "hfs.h"
41 #include "hfs_cnode.h"
42
43 #if CONFIG_PROTECT
44 static struct cp_wrap_func g_cp_wrap_func = {NULL, NULL};
45 static struct cp_global_state g_cp_state = {0, 0, 0};
46
47 extern int (**hfs_vnodeop_p) (void *);
48
49 /*
50 * CP private functions
51 */
52 static int cp_is_valid_class(int);
53 static int cp_root_major_vers(mount_t mp);
54 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
55 static struct cprotect *cp_entry_alloc(size_t);
56 static void cp_entry_dealloc(struct cprotect *entry);
57 static int cp_setup_aes_ctx(struct cprotect *);
58 static int cp_make_keys (struct cprotect **, struct hfsmount *hfsmp, cnid_t, int);
59 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp);
60 static int cp_lock_vfs_callback(mount_t, void *);
61 static int cp_lock_vnode_callback(vnode_t, void *);
62 static int cp_vnode_is_eligible (vnode_t);
63 static int cp_check_access (cnode_t *, int);
64 static int cp_wrap(int, struct hfsmount *hfsmp, cnid_t, struct cprotect**);
65 static int cp_unwrap(int, struct cprotect *);
66
67
68
69 #if DEVELOPMENT || DEBUG
70 #define CP_ASSERT(x) \
71 if ((x) == 0) { \
72 panic("Content Protection: failed assertion in %s", __FUNCTION__); \
73 }
74 #else
75 #define CP_ASSERT(x)
76 #endif
77
78 int
79 cp_key_store_action(int action)
80 {
81 g_cp_state.lock_state = action;
82 if (action == CP_LOCKED_STATE) {
83 /*
84 * Note that because we are using the void* arg to pass the key store
85 * value into the vfs cp iteration, we need to pass around the int as an ptr.
86 * This may silence 32-64 truncation warnings.
87 */
88 return vfs_iterate(0, cp_lock_vfs_callback, (void*)((uintptr_t)action));
89 }
90
91 return 0;
92
93 }
94
95
96 int
97 cp_register_wraps(cp_wrap_func_t key_store_func)
98 {
99 g_cp_wrap_func.wrapper = key_store_func->wrapper;
100 g_cp_wrap_func.unwrapper = key_store_func->unwrapper;
101
102 g_cp_state.wrap_functions_set = 1;
103
104 return 0;
105 }
106
107 #if 0
108 /*
109 * If necessary, this function can be used to
110 * query the device's lock state.
111 */
112 int
113 cp_isdevice_locked (void) {
114 if (g_cp_state.lock_state == CP_UNLOCKED_STATE) {
115 return 0;
116 }
117 return 1;
118 }
119 #endif
120
121 /*
122 * Allocate and initialize a cprotect blob for a new cnode.
123 * Called from hfs_getnewvnode: cnode is locked exclusive.
124 * Read xattr data off the cnode. Then, if conditions permit,
125 * unwrap the file key and cache it in the cprotect blob.
126 */
127 int
128 cp_entry_init(struct cnode *cp, struct mount *mp)
129 {
130 struct cprotect *entry = NULL;
131 int error = 0;
132 struct hfsmount *hfsmp = VFSTOHFS(mp);
133
134 if (!cp_fs_protected (mp)) {
135 cp->c_cpentry = NULL;
136 return 0;
137 }
138
139 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
140 cp->c_cpentry = NULL;
141 return 0;
142 }
143
144 if (!g_cp_state.wrap_functions_set) {
145 printf("hfs: cp_update_entry: wrap functions not yet set\n");
146 return ENXIO;
147 }
148
149 if (hfsmp->hfs_running_cp_major_vers == 0) {
150 cp_root_major_vers(mp);
151 }
152
153 CP_ASSERT (cp->c_cpentry == NULL);
154
155 error = cp_getxattr(cp, hfsmp, &entry);
156
157 /*
158 * Normally, we should always have a CP EA for a file or directory that
159 * we are initializing here. However, there are some extenuating circumstances,
160 * such as the root directory immediately following a newfs_hfs.
161 *
162 * As a result, we leave code here to deal with an ENOATTR which will always
163 * default to a 'D' key, though we don't expect to use it much.
164 */
165 if (error == ENOATTR) {
166 int sub_error;
167
168 sub_error = cp_entry_create_keys (&entry, NULL, hfsmp, PROTECTION_CLASS_D, cp->c_fileid, cp->c_mode);
169
170 /* Now we have keys. Write them out. */
171 if (sub_error == 0) {
172 sub_error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
173 }
174 error = sub_error;
175 }
176 else if (error == 0) {
177 if (S_ISREG(cp->c_mode)) {
178 entry->cp_flags |= CP_KEY_FLUSHED;
179 }
180 }
181 /*
182 * For errors other than ENOATTR, we don't do anything.
183 * cp_entry_destroy can deal with a NULL argument if cp_getxattr
184 * failed malloc or there was a B-Tree error.
185 */
186
187 cp->c_cpentry = entry;
188
189 if (error) {
190 cp_entry_destroy(&cp->c_cpentry);
191 }
192
193 return error;
194 }
195
196 /*
197 * Set up initial key/class pair on cnode. The cnode does not yet exist,
198 * so we must take a pointer to the cprotect struct.
199 *
200 * NOTE:
201 * We call this function in two places:
202 * 1) hfs_makenode *prior* to taking the journal/b-tree locks.
203 * A successful return value from this function is a pre-requisite for continuing on
204 * with file creation, as a wrap failure should immediately preclude the creation of
205 * the file.
206 *
207 * 2) cp_entry_init if we are trying to establish keys for a file/directory that did not
208 * have them already. (newfs_hfs may create entries in the namespace).
209 *
210 * At this point, we hold the directory cnode lock exclusive if it is available.
211 */
212 int
213 cp_entry_create_keys(struct cprotect **entry_ptr, struct cnode *dcp, struct hfsmount *hfsmp,
214 uint32_t input_class, cnid_t fileid, mode_t cmode)
215 {
216 int error = 0;
217 struct cprotect *entry = NULL;
218 size_t keylen;
219
220 /* Default to class D */
221 uint32_t target_class = PROTECTION_CLASS_D;
222
223 /* Decide the target class. Input argument takes priority. */
224 if (cp_is_valid_class (input_class)) {
225 target_class = input_class;
226 /*
227 * One exception, F is never valid for a directory
228 * because its children may inherit and userland will be
229 * unable to read/write to the files.
230 */
231 if (S_ISDIR(cmode)) {
232 if (target_class == PROTECTION_CLASS_F) {
233 return EINVAL;
234 }
235 }
236 }
237 else {
238 /* If no valid class was supplied, then inherit from parent if possible */
239 if ((dcp) && (dcp->c_cpentry)) {
240 uint32_t parentclass = dcp->c_cpentry->cp_pclass;
241 /* If the parent class is not valid, default back to D */
242 if (cp_is_valid_class(parentclass)) {
243 /* Parent class was good. use it. */
244 target_class = parentclass;
245 }
246 /* Otherwise, we already defaulted to 'D' */
247 }
248 }
249
250 keylen = S_ISDIR(cmode) ? 0 : CP_INITIAL_WRAPPEDKEYSIZE;
251 entry = cp_entry_alloc (keylen);
252 if (!entry) {
253 *entry_ptr = NULL;
254 return ENOMEM;
255 }
256
257 if (S_ISREG(cmode)) {
258 entry->cp_pclass = target_class;
259 entry->cp_flags |= CP_NEEDS_KEYS;
260 /*
261 * The 'fileid' argument to this function will either be
262 * a valid fileid for an existing file/dir, or it will be 0.
263 * If it is 0, then that is an indicator to the layer below
264 * that the file does not yet exist and we need to bypass the
265 * cp_wrap work to the keybag.
266 *
267 * If we are being invoked on behalf of a file/dir that does
268 * not yet have a key, then it will be a valid key and we
269 * need to behave like a setclass.
270 */
271 error = cp_make_keys(&entry, hfsmp, fileid, entry->cp_pclass);
272 }
273 else if (S_ISDIR(cmode)) {
274 /* Directories just get their cp_pclass set */
275 entry->cp_pclass = target_class;
276 }
277 else {
278 /* Unsupported for non-dir and non-file. */
279 error = EINVAL;
280 }
281
282 /*
283 * We only initialize and create the keys here; we cannot
284 * write out the EA until the journal lock and EA b-tree locks
285 * are acquired.
286 */
287
288 if (error) {
289 /* destroy the CP blob */
290 cp_entry_destroy (&entry);
291 *entry_ptr = NULL;
292 }
293 else {
294 /* otherwise, emit the cprotect entry */
295 *entry_ptr = entry;
296 }
297
298 return error;
299 }
300
301 /*
302 * Set up an initial key/class pair for a disassociated cprotect entry.
303 * This function is used to generate transient keys that will never be
304 * written to disk. We use class F for this since it provides the exact
305 * semantics that are needed here. Because we never attach this blob to
306 * a cnode directly, we take a pointer to the cprotect struct.
307 *
308 * This function is primarily used in the HFS FS truncation codepath
309 * where we may rely on AES symmetry to relocate encrypted data from
310 * one spot in the disk to another.
311 */
312 int cp_entry_gentempkeys(struct cprotect **entry_ptr, struct hfsmount *hfsmp) {
313 int error = 0;
314 struct cprotect *entry = NULL;
315 size_t keylen;
316
317 /* Default to class F */
318 uint32_t target_class = PROTECTION_CLASS_F;
319
320 /*
321 * This should only be used for files, so we default to the
322 * initial wrapped key size
323 */
324 keylen = CP_INITIAL_WRAPPEDKEYSIZE;
325 entry = cp_entry_alloc (keylen);
326 if (!entry) {
327 *entry_ptr = NULL;
328 return ENOMEM;
329 }
330
331 error = cp_make_keys (&entry, hfsmp, 0, target_class);
332
333 /*
334 * We only initialize the keys here; we don't write anything out
335 */
336
337 if (error) {
338 /* destroy the CP blob */
339 cp_entry_destroy (&entry);
340 *entry_ptr = NULL;
341 }
342 else {
343 /* otherwise, emit the cprotect entry */
344 *entry_ptr = entry;
345 }
346
347 return error;
348
349 }
350
351 /*
352 * Tear down and clear a cprotect blob for a closing file.
353 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
354 */
355 void
356 cp_entry_destroy(struct cprotect **entry_ptr) {
357 struct cprotect *entry = *entry_ptr;
358 if (!entry) {
359 /* nothing to clean up */
360 return;
361 }
362 *entry_ptr = NULL;
363 cp_entry_dealloc(entry);
364 }
365
366
367 int
368 cp_fs_protected (mount_t mnt) {
369 return (vfs_flags(mnt) & MNT_CPROTECT);
370 }
371
372
373 /*
374 * Return a pointer to underlying cnode if there is one for this vnode.
375 * Done without taking cnode lock, inspecting only vnode state.
376 */
377 struct cnode *
378 cp_get_protected_cnode(struct vnode *vp)
379 {
380 if (!cp_vnode_is_eligible(vp)) {
381 return NULL;
382 }
383
384 if (!cp_fs_protected(VTOVFS(vp))) {
385 /* mount point doesn't support it */
386 return NULL;
387 }
388
389 return (struct cnode*) vp->v_data;
390 }
391
392
393 /*
394 * Sets *class to persistent class associated with vnode,
395 * or returns error.
396 */
397 int
398 cp_vnode_getclass(struct vnode *vp, int *class)
399 {
400 struct cprotect *entry;
401 int error = 0;
402 struct cnode *cp;
403 int took_truncate_lock = 0;
404 struct hfsmount *hfsmp = NULL;
405
406 /* Is this an interesting vp? */
407 if (!cp_vnode_is_eligible (vp)) {
408 return EBADF;
409 }
410
411 /* Is the mount point formatted for content protection? */
412 if (!cp_fs_protected(VTOVFS(vp))) {
413 return EPERM;
414 }
415
416 cp = VTOC(vp);
417 hfsmp = VTOHFS(vp);
418
419 /*
420 * Take the truncate lock up-front in shared mode because we may need
421 * to manipulate the CP blob. Pend lock events until we're done here.
422 */
423 hfs_lock_truncate (cp, HFS_SHARED_LOCK);
424 took_truncate_lock = 1;
425
426 /*
427 * We take only the shared cnode lock up-front. If it turns out that
428 * we need to manipulate the CP blob to write a key out, drop the
429 * shared cnode lock and acquire an exclusive lock.
430 */
431 error = hfs_lock(cp, HFS_SHARED_LOCK);
432 if (error) {
433 hfs_unlock_truncate(cp, 0);
434 return error;
435 }
436
437 /* pull the class from the live entry */
438 entry = cp->c_cpentry;
439
440 if (!entry) {
441 panic("Content Protection: uninitialized cnode %p", cp);
442 }
443
444 /*
445 * Any vnode on a content protected filesystem must have keys
446 * created by the time the vnode is vended out. If we generate
447 * a vnode that does not have keys, something bad happened.
448 */
449 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
450 panic ("cp_vnode_getclass: cp %p has no keys!", cp);
451 }
452
453 if (error == 0) {
454 *class = entry->cp_pclass;
455 }
456
457 if (took_truncate_lock) {
458 hfs_unlock_truncate(cp, 0);
459 }
460
461 hfs_unlock(cp);
462 return error;
463 }
464
465
466 /*
467 * Sets persistent class for this file or directory.
468 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
469 * If the new class can't be accessed now, EPERM.
470 * Otherwise, record class and re-wrap key if the mount point is content-protected.
471 */
472 int
473 cp_vnode_setclass(struct vnode *vp, uint32_t newclass)
474 {
475 struct cnode *cp;
476 struct cprotect *entry = 0;
477 int error = 0;
478 int took_truncate_lock = 0;
479 u_int32_t keylen = 0;
480 struct hfsmount *hfsmp = NULL;
481
482 if (!cp_is_valid_class(newclass)) {
483 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
484 return EINVAL;
485 }
486
487 if (vnode_isdir(vp)) {
488 if (newclass == PROTECTION_CLASS_F) {
489 /*
490 * Directories are not allowed to set to class F, since the
491 * children may inherit it and then userland will not be able
492 * to read/write to the file.
493 */
494 return EINVAL;
495 }
496 }
497
498 /* Is this an interesting vp? */
499 if (!cp_vnode_is_eligible(vp)) {
500 return EBADF;
501 }
502
503 /* Is the mount point formatted for content protection? */
504 if (!cp_fs_protected(VTOVFS(vp))) {
505 return EPERM;
506 }
507
508 cp = VTOC(vp);
509 hfsmp = VTOHFS(vp);
510
511 /*
512 * Take the cnode truncate lock exclusive because we want to manipulate the
513 * CP blob. The lock-event handling code is doing the same. This also forces
514 * all pending IOs to drain before we can re-write the persistent and cache keys.
515 */
516 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK);
517 took_truncate_lock = 1;
518
519 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK)) {
520 return EINVAL;
521 }
522
523 entry = cp->c_cpentry;
524 if (entry == NULL) {
525 error = EINVAL;
526 goto out;
527 }
528
529 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
530 /*
531 * We should have created this vnode and its keys atomically during
532 * file/directory creation. If we get here and it doesn't have keys yet,
533 * something bad happened.
534 */
535 panic ("cp_vnode_setclass: cp %p has no keys!\n", cp);
536 }
537
538 if (entry->cp_flags & CP_KEY_FLUSHED) {
539 error = cp_restore_keys(entry, hfsmp);
540 if (error)
541 goto out;
542 }
543
544 /* re-wrap per-file key with new class */
545 if (vnode_isreg(vp)) {
546 error = cp_wrap(newclass, hfsmp, cp->c_fileid, &cp->c_cpentry);
547 if (error) {
548 /* we didn't have perms to set this class. leave file as-is and error out */
549 goto out;
550 }
551 }
552
553 /* cp_wrap() potentially updates c_cpentry because we passed in its ptr */
554 entry = cp->c_cpentry;
555
556 entry->cp_pclass = newclass;
557
558 /* prepare to write the xattr out */
559 keylen = entry->cp_persistent_key_len;
560
561 error = cp_setxattr(cp, entry, VTOHFS(vp), 0,XATTR_REPLACE);
562 if (error == ENOATTR)
563 error = cp_setxattr(cp, entry, VTOHFS(vp), 0, XATTR_CREATE);
564
565 out:
566
567 if (took_truncate_lock) {
568 hfs_unlock_truncate (cp, 0);
569 }
570 hfs_unlock(cp);
571 return error;
572 }
573
574
575 int cp_vnode_transcode(vnode_t vp)
576 {
577 struct cnode *cp;
578 struct cprotect *entry = 0;
579 int error = 0;
580 int took_truncate_lock = 0;
581 struct hfsmount *hfsmp = NULL;
582
583 /* Is this an interesting vp? */
584 if (!cp_vnode_is_eligible(vp)) {
585 return EBADF;
586 }
587
588 /* Is the mount point formatted for content protection? */
589 if (!cp_fs_protected(VTOVFS(vp))) {
590 return EPERM;
591 }
592
593 cp = VTOC(vp);
594 hfsmp = VTOHFS(vp);
595
596 /*
597 * Take the cnode truncate lock exclusive because we want to manipulate the
598 * CP blob. The lock-event handling code is doing the same. This also forces
599 * all pending IOs to drain before we can re-write the persistent and cache keys.
600 */
601 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK);
602 took_truncate_lock = 1;
603
604 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK)) {
605 return EINVAL;
606 }
607
608 entry = cp->c_cpentry;
609 if (entry == NULL) {
610 error = EINVAL;
611 goto out;
612 }
613
614 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
615 /*
616 * If we are transcoding keys for AKB, then we should have already established
617 * a set of keys for this vnode. IF we don't have keys yet, then something bad
618 * happened.
619 */
620 panic ("cp_vnode_transcode: cp %p has no keys!", cp);
621 }
622
623 if (entry->cp_flags & CP_KEY_FLUSHED) {
624 error = cp_restore_keys(entry, hfsmp);
625
626 if (error) {
627 goto out;
628 }
629 }
630
631 /* Send the per-file key for re-wrap with the current class information
632 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
633 * Don't need to process any outputs, so just clear the locks and pass along the error. */
634 if (vnode_isreg(vp)) {
635
636 /* Picked up the following from cp_wrap().
637 * If needed, more comments available there. */
638
639 if (entry->cp_pclass == PROTECTION_CLASS_F) {
640 error = EINVAL;
641 goto out;
642 }
643
644 error = g_cp_wrap_func.wrapper(entry->cp_pclass,
645 cp->c_fileid,
646 entry->cp_cache_key,
647 entry->cp_cache_key_len,
648 NULL,
649 NULL);
650
651 if(error)
652 error = EPERM;
653 }
654
655 out:
656 if (took_truncate_lock) {
657 hfs_unlock_truncate (cp, 0);
658 }
659 hfs_unlock(cp);
660 return error;
661 }
662
663
664 /*
665 * Check permission for the given operation (read, write) on this node.
666 * Additionally, if the node needs work, do it:
667 * - create a new key for the file if one hasn't been set before
668 * - write out the xattr if it hasn't already been saved
669 * - unwrap the key if needed
670 *
671 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
672 *
673 * Note that this function does *NOT* take the cnode truncate lock. This is because
674 * the thread calling us may already have the truncate lock. It is not necessary
675 * because either we successfully finish this function before the keys are tossed
676 * and the IO will fail, or the keys are tossed and then this function will fail.
677 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
678 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
679 */
680 int
681 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
682 {
683 struct cprotect *entry;
684 int error = 0;
685 struct hfsmount *hfsmp = NULL;
686 struct cnode *cp = NULL;
687
688 /*
689 * First, do validation against the vnode before proceeding any further:
690 * Is this vnode originating from a valid content-protected filesystem ?
691 */
692 if (cp_vnode_is_eligible(vp) == 0) {
693 /*
694 * It is either not HFS or not a file/dir. Just return success. This is a valid
695 * case if servicing i/o against another filesystem type from VFS
696 */
697 return 0;
698 }
699
700 if (cp_fs_protected (VTOVFS(vp)) == 0) {
701 /*
702 * The underlying filesystem does not support content protection. This is also
703 * a valid case. Simply return success.
704 */
705 return 0;
706 }
707
708 /*
709 * At this point, we know we have a HFS vnode that backs a file or directory on a
710 * filesystem that supports content protection
711 */
712 cp = VTOC(vp);
713
714 if ((error = hfs_lock(cp, HFS_SHARED_LOCK))) {
715 return error;
716 }
717
718 entry = cp->c_cpentry;
719
720 if (!entry) {
721 /*
722 * If this cnode is not content protected, simply return success.
723 * Note that this function is called by all I/O-based call sites
724 * when CONFIG_PROTECT is enabled during XNU building.
725 */
726
727 goto out;
728 }
729
730 vp = CTOV(cp, 0);
731 if (vp == NULL) {
732 /* is it a rsrc */
733 vp = CTOV(cp,1);
734 if (vp == NULL) {
735 error = EINVAL;
736 goto out;
737 }
738 }
739 hfsmp = VTOHFS(vp);
740
741 if ((error = cp_check_access(cp, vnop))) {
742 /* check for raw encrypted access before bailing out */
743 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
744 /*
745 * read access only + asking for the raw encrypted bytes
746 * is legitimate, so reset the error value to 0
747 */
748 error = 0;
749 }
750 else {
751 goto out;
752 }
753 }
754
755 if (entry->cp_flags == 0) {
756 /* no more work to do */
757 goto out;
758 }
759
760 /* upgrade to exclusive lock */
761 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
762 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
763 return error;
764 }
765 } else {
766 cp->c_lockowner = current_thread();
767 }
768
769 /* generate new keys if none have ever been saved */
770 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
771 /*
772 * By the time we're trying to initiate I/O against a content
773 * protected vnode, we should have already created keys for this
774 * file/dir. If we don't have keys, something bad happened.
775 */
776 panic ("cp_handle_vnop: cp %p has no keys!", cp);
777 }
778
779 /* unwrap keys if needed */
780 if (entry->cp_flags & CP_KEY_FLUSHED) {
781 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
782 /* no need to try to restore keys; they are not going to be used */
783 error = 0;
784 }
785 else {
786 error = cp_restore_keys(entry, hfsmp);
787
788 if (error) {
789 goto out;
790 }
791 }
792 }
793
794 /* write out the xattr if it's new */
795 if (entry->cp_flags & CP_NO_XATTR)
796 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
797
798 out:
799
800 hfs_unlock(cp);
801 return error;
802 }
803
804
805 int
806 cp_handle_open(struct vnode *vp, int mode)
807 {
808 struct cnode *cp = NULL ;
809 struct cprotect *entry = NULL;
810 int error = 0;
811
812 /* If vnode not eligible, just return success */
813 if (!cp_vnode_is_eligible(vp)) {
814 return 0;
815 }
816
817 /* If mount point not properly set up, then also return success */
818 if (!cp_fs_protected(VTOVFS(vp))) {
819 return 0;
820 }
821
822 /* We know the vnode is in a valid state. acquire cnode and validate */
823 cp = VTOC(vp);
824
825 if ((error = hfs_lock(cp, HFS_SHARED_LOCK))) {
826 return error;
827 }
828
829 entry = cp->c_cpentry;
830 if (!entry)
831 goto out;
832
833 if (!S_ISREG(cp->c_mode))
834 goto out;
835
836 switch (entry->cp_pclass) {
837 case PROTECTION_CLASS_B:
838 /* Class B always allows creation */
839 if (mode & O_CREAT)
840 goto out;
841 case PROTECTION_CLASS_A:
842 error = g_cp_wrap_func.unwrapper(entry->cp_pclass,
843 entry->cp_persistent_key,
844 entry->cp_persistent_key_len,
845 NULL, NULL);
846 if (error)
847 error = EPERM;
848 break;
849 default:
850 break;
851 }
852
853 out:
854 hfs_unlock(cp);
855 return error;
856 }
857
858
859 /*
860 * During hfs resize operations, we have slightly different constraints than during
861 * normal VNOPS that read/write data to files. Specifically, we already have the cnode
862 * locked (so nobody else can modify it), and we are doing the IO with root privileges, since
863 * we are moving the data behind the user's back. So, we skip access checks here (for unlock
864 * vs. lock), and don't worry about non-existing keys. If the file exists on-disk with valid
865 * payload, then it must have keys set up already by definition.
866 */
867 int
868 cp_handle_relocate (struct cnode *cp, struct hfsmount *hfsmp) {
869 struct cprotect *entry;
870 int error = -1;
871
872 /* cp is already locked */
873 entry = cp->c_cpentry;
874 if (!entry)
875 goto out;
876
877 /*
878 * Still need to validate whether to permit access to the file or not
879 * based on lock status
880 */
881 if ((error = cp_check_access(cp, CP_READ_ACCESS | CP_WRITE_ACCESS))) {
882 goto out;
883 }
884
885 if (entry->cp_flags == 0) {
886 /* no more work to do */
887 error = 0;
888 goto out;
889 }
890
891 /* it must have keys since it is an existing file with actual payload */
892
893 /* unwrap keys if needed */
894 if (entry->cp_flags & CP_KEY_FLUSHED) {
895 error = cp_restore_keys(entry, hfsmp);
896 }
897
898 /*
899 * Don't need to write out the EA since if the file has actual extents,
900 * it must have an EA
901 */
902 out:
903
904 /* return the cp still locked */
905 return error;
906 }
907
908 /*
909 * cp_getrootxattr:
910 * Gets the EA we set on the root folder (fileid 1) to get information about the
911 * version of Content Protection that was used to write to this filesystem.
912 * Note that all multi-byte fields are written to disk little endian so they must be
913 * converted to native endian-ness as needed.
914 */
915 int
916 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr) {
917 uio_t auio;
918 char uio_buf[UIO_SIZEOF(1)];
919 size_t attrsize = sizeof(struct cp_root_xattr);
920 int error = 0;
921 struct vnop_getxattr_args args;
922
923 if (!outxattr) {
924 panic("Content Protection: cp_xattr called with xattr == NULL");
925 }
926
927 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
928 uio_addiov(auio, CAST_USER_ADDR_T(outxattr), attrsize);
929
930 args.a_desc = NULL; // unused
931 args.a_vp = NULL; //unused since we're writing EA to root folder.
932 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
933 args.a_uio = auio;
934 args.a_size = &attrsize;
935 args.a_options = XATTR_REPLACE;
936 args.a_context = NULL; // unused
937
938 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
939
940 /* Now convert the multi-byte fields to native endianness */
941 outxattr->major_version = OSSwapLittleToHostInt16(outxattr->major_version);
942 outxattr->minor_version = OSSwapLittleToHostInt16(outxattr->minor_version);
943 outxattr->flags = OSSwapLittleToHostInt64(outxattr->flags);
944
945 if (error != 0) {
946 goto out;
947 }
948
949 out:
950 uio_free(auio);
951 return error;
952 }
953
954 /*
955 * cp_setrootxattr:
956 * Sets the EA we set on the root folder (fileid 1) to get information about the
957 * version of Content Protection that was used to write to this filesystem.
958 * Note that all multi-byte fields are written to disk little endian so they must be
959 * converted to little endian as needed.
960 *
961 * This will be written to the disk when it detects the EA is not there, or when we need
962 * to make a modification to the on-disk version that can be done in-place.
963 */
964 int
965 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
966 {
967 int error = 0;
968 struct vnop_setxattr_args args;
969
970 args.a_desc = NULL;
971 args.a_vp = NULL;
972 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
973 args.a_uio = NULL; //pass data ptr instead
974 args.a_options = 0;
975 args.a_context = NULL; //no context needed, only done from mount.
976
977 /* Now convert the multi-byte fields to little endian before writing to disk. */
978 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
979 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
980 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
981
982 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
983 sizeof(struct cp_root_xattr), &args, hfsmp, 1);
984 return error;
985 }
986
987
988 /*
989 * Stores new xattr data on the cnode.
990 * cnode lock held exclusive (if available).
991 *
992 * This function is also invoked during file creation.
993 */
994 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp, uint32_t fileid, int options)
995 {
996 int error = 0;
997 size_t attrsize;
998 struct vnop_setxattr_args args;
999 uint32_t target_fileid;
1000 struct cnode *arg_cp = NULL;
1001 uint32_t tempflags = 0;
1002
1003 args.a_desc = NULL;
1004 if (cp) {
1005 args.a_vp = cp->c_vp;
1006 target_fileid = 0;
1007 arg_cp = cp;
1008 }
1009 else {
1010 /*
1011 * When we set the EA in the same txn as the file creation,
1012 * we do not have a vnode/cnode yet. Use the specified fileid.
1013 */
1014 args.a_vp = NULL;
1015 target_fileid = fileid;
1016 }
1017 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1018 args.a_uio = NULL; //pass data ptr instead
1019 args.a_options = options;
1020 args.a_context = vfs_context_current();
1021
1022 /* Add asserts for the CP flags in the CP blob. */
1023 if (entry->cp_flags & CP_NEEDS_KEYS) {
1024 panic ("cp_setxattr: cp %p , cpentry %p still needs keys!", cp, entry);
1025 }
1026
1027 /* Disable flags that will be invalid as we're writing the EA out at this point. */
1028 tempflags = entry->cp_flags;
1029 tempflags &= ~CP_NO_XATTR;
1030
1031 switch(hfsmp->hfs_running_cp_major_vers) {
1032 case CP_NEW_MAJOR_VERS: {
1033 struct cp_xattr_v4 *newxattr = NULL; // 70+ bytes; don't alloc on stack.
1034 MALLOC (newxattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1035 if (newxattr == NULL) {
1036 error = ENOMEM;
1037 break;
1038 }
1039 bzero (newxattr, sizeof(struct cp_xattr_v4));
1040
1041 attrsize = sizeof(*newxattr) - CP_MAX_WRAPPEDKEYSIZE + entry->cp_persistent_key_len;
1042
1043 /* Endian swap the multi-byte fields into L.E from host. */
1044 newxattr->xattr_major_version = OSSwapHostToLittleInt16 (hfsmp->hfs_running_cp_major_vers);
1045 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1046 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1047 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1048 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1049 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1050
1051 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1052
1053 FREE(newxattr, M_TEMP);
1054 break;
1055 }
1056 case CP_PREV_MAJOR_VERS: {
1057 struct cp_xattr_v2 *newxattr = NULL;
1058 MALLOC (newxattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1059 if (newxattr == NULL) {
1060 error = ENOMEM;
1061 break;
1062 }
1063 bzero (newxattr, sizeof(struct cp_xattr_v2));
1064
1065 attrsize = sizeof(*newxattr);
1066
1067 /* Endian swap the multi-byte fields into L.E from host. */
1068 newxattr->xattr_major_version = OSSwapHostToLittleInt16(hfsmp->hfs_running_cp_major_vers);
1069 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1070 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1071 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1072 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1073 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1074
1075 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1076
1077 FREE (newxattr, M_TEMP);
1078 break;
1079 }
1080 }
1081
1082 if (error == 0 ) {
1083 entry->cp_flags &= ~CP_NO_XATTR;
1084 }
1085
1086 return error;
1087
1088
1089 }
1090
1091 /*
1092 * This function takes a cprotect struct with the cache keys and re-wraps them for
1093 * MKB's sake so that it can update its own data structures. It is useful when
1094 * there may not be a cnode in existence yet (for example, after creating
1095 * a file).
1096 */
1097 int
1098 cp_update_mkb (struct cprotect *entry, uint32_t fileid) {
1099
1100 int error = 0;
1101
1102 /* We already validated this pclass earlier */
1103 if (entry->cp_pclass != PROTECTION_CLASS_F ) {
1104 error = g_cp_wrap_func.wrapper (entry->cp_pclass, fileid, entry->cp_cache_key,
1105 entry->cp_cache_key_len, NULL, NULL);
1106 }
1107
1108 if (error) {
1109 error = EPERM;
1110 }
1111
1112 return error;
1113 }
1114
1115 /*
1116 * Used by an fcntl to query the underlying FS for its content protection version #
1117 */
1118
1119 int
1120 cp_get_root_major_vers(vnode_t vp, uint32_t *level) {
1121 int err = 0;
1122 struct hfsmount *hfsmp = NULL;
1123 struct mount *mp = NULL;
1124
1125 mp = VTOVFS(vp);
1126
1127 /* check if it supports content protection */
1128 if (cp_fs_protected(mp) == 0) {
1129 return EINVAL;
1130 }
1131
1132 hfsmp = VFSTOHFS(mp);
1133 /* figure out the level */
1134
1135 err = cp_root_major_vers(mp);
1136
1137 if (err == 0) {
1138 *level = hfsmp->hfs_running_cp_major_vers;
1139 }
1140 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1141
1142 return err;
1143 }
1144
1145 /********************
1146 * Private Functions
1147 *******************/
1148
1149 static int
1150 cp_root_major_vers(mount_t mp)
1151 {
1152 int err = 0;
1153 struct cp_root_xattr xattr;
1154 struct hfsmount *hfsmp = NULL;
1155
1156 hfsmp = vfs_fsprivate(mp);
1157 err = cp_getrootxattr (hfsmp, &xattr);
1158
1159 if (err == 0) {
1160 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1161 }
1162 else {
1163 return EINVAL;
1164 }
1165
1166 return 0;
1167 }
1168
1169 static int
1170 cp_vnode_is_eligible(struct vnode *vp)
1171 {
1172 return ((vp->v_op == hfs_vnodeop_p) &&
1173 (!vnode_issystem(vp)) &&
1174 (vnode_isreg(vp) || vnode_isdir(vp)));
1175 }
1176
1177
1178
1179 static int
1180 cp_is_valid_class(int class)
1181 {
1182 return ((class >= PROTECTION_CLASS_A) &&
1183 (class <= PROTECTION_CLASS_F));
1184 }
1185
1186
1187 static struct cprotect *
1188 cp_entry_alloc(size_t keylen)
1189 {
1190 struct cprotect *cp_entry;
1191
1192 if (keylen > CP_MAX_WRAPPEDKEYSIZE)
1193 return (NULL);
1194
1195 MALLOC(cp_entry, struct cprotect *, sizeof(struct cprotect) + keylen,
1196 M_TEMP, M_WAITOK);
1197 if (cp_entry == NULL)
1198 return (NULL);
1199
1200 bzero(cp_entry, sizeof(*cp_entry) + keylen);
1201 cp_entry->cp_persistent_key_len = keylen;
1202 return (cp_entry);
1203 }
1204
1205 static void
1206 cp_entry_dealloc(struct cprotect *entry)
1207 {
1208 uint32_t keylen = entry->cp_persistent_key_len;
1209 bzero(entry, (sizeof(*entry) + keylen));
1210 FREE(entry, M_TEMP);
1211 }
1212
1213
1214 /*
1215 * Initializes a new cprotect entry with xattr data from the cnode.
1216 * cnode lock held shared
1217 */
1218 static int
1219 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, struct cprotect **outentry)
1220 {
1221 int error = 0;
1222 uio_t auio;
1223 size_t attrsize;
1224 char uio_buf[UIO_SIZEOF(1)];
1225 struct vnop_getxattr_args args;
1226 struct cprotect *entry = NULL;
1227
1228 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
1229 args.a_desc = NULL; // unused
1230 args.a_vp = cp->c_vp;
1231 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1232 args.a_uio = auio;
1233 args.a_options = XATTR_REPLACE;
1234 args.a_context = vfs_context_current(); // unused
1235
1236 switch (hfsmp->hfs_running_cp_major_vers) {
1237 case CP_NEW_MAJOR_VERS: {
1238 struct cp_xattr_v4 *xattr = NULL;
1239 MALLOC (xattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1240 if (xattr == NULL) {
1241 error = ENOMEM;
1242 break;
1243 }
1244 bzero(xattr, sizeof (struct cp_xattr_v4));
1245 attrsize = sizeof(*xattr);
1246
1247 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1248 args.a_size = &attrsize;
1249
1250 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1251 if (error != 0) {
1252 FREE (xattr, M_TEMP);
1253 goto out;
1254 }
1255
1256 /* Endian swap the multi-byte fields into host endianness from L.E. */
1257 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1258 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1259 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1260 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1261 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1262
1263 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers ) {
1264 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1265 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1266 error = EINVAL;
1267 FREE (xattr, M_TEMP);
1268
1269 goto out;
1270 }
1271 /*
1272 * Prevent a buffer overflow, and validate the key length obtained from the
1273 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1274 * point.
1275 */
1276 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE) {
1277 error = EINVAL;
1278 FREE (xattr, M_TEMP);
1279
1280 goto out;
1281 }
1282
1283 /* set up entry with information from xattr */
1284 entry = cp_entry_alloc(xattr->key_size);
1285 if (!entry) {
1286 FREE (xattr, M_TEMP);
1287
1288 return ENOMEM;
1289 }
1290
1291 entry->cp_pclass = xattr->persistent_class;
1292 if (xattr->xattr_major_version >= CP_NEW_MAJOR_VERS) {
1293 entry->cp_flags |= CP_OFF_IV_ENABLED;
1294 }
1295 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1296
1297 FREE (xattr, M_TEMP);
1298
1299 break;
1300 }
1301 case CP_PREV_MAJOR_VERS: {
1302 struct cp_xattr_v2 *xattr = NULL;
1303 MALLOC (xattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1304 if (xattr == NULL) {
1305 error = ENOMEM;
1306 break;
1307 }
1308 bzero (xattr, sizeof (struct cp_xattr_v2));
1309 attrsize = sizeof(*xattr);
1310
1311 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1312 args.a_size = &attrsize;
1313
1314 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1315 if (error != 0) {
1316 FREE (xattr, M_TEMP);
1317 goto out;
1318 }
1319
1320 /* Endian swap the multi-byte fields into host endianness from L.E. */
1321 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1322 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1323 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1324 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1325 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1326
1327 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers) {
1328 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1329 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1330 error = EINVAL;
1331 FREE (xattr, M_TEMP);
1332 goto out;
1333 }
1334
1335 /*
1336 * Prevent a buffer overflow, and validate the key length obtained from the
1337 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1338 * point.
1339 */
1340 if (xattr->key_size > CP_V2_WRAPPEDKEYSIZE) {
1341 error = EINVAL;
1342 FREE (xattr, M_TEMP);
1343 goto out;
1344 }
1345 /* set up entry with information from xattr */
1346 entry = cp_entry_alloc(xattr->key_size);
1347 if (!entry) {
1348 FREE (xattr, M_TEMP);
1349 return ENOMEM;
1350 }
1351
1352 entry->cp_pclass = xattr->persistent_class;
1353 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1354 FREE (xattr, M_TEMP);
1355 break;
1356 }
1357 }
1358
1359 out:
1360 uio_free(auio);
1361
1362 *outentry = entry;
1363 return error;
1364 }
1365
1366
1367 /* Setup AES context */
1368 static int
1369 cp_setup_aes_ctx(struct cprotect *entry)
1370 {
1371 SHA1_CTX sha1ctxt;
1372 uint8_t cp_cache_iv_key[CP_IV_KEYSIZE]; /* Kiv */
1373
1374 /* First init the cp_cache_iv_key[] */
1375 SHA1Init(&sha1ctxt);
1376 SHA1Update(&sha1ctxt, &entry->cp_cache_key[0], CP_MAX_KEYSIZE);
1377 SHA1Final(&cp_cache_iv_key[0], &sha1ctxt);
1378
1379 aes_encrypt_key128(&cp_cache_iv_key[0], &entry->cp_cache_iv_ctx);
1380
1381 return 0;
1382 }
1383
1384
1385 /*
1386 * Make a new random per-file key and wrap it.
1387 * Normally this will get default_pclass as PROTECTION_CLASS_D.
1388 *
1389 * But when the directory's class is set, we use that as the default.
1390 */
1391 static int
1392 cp_make_keys(struct cprotect **entry_arg, struct hfsmount *hfsmp, cnid_t fileid, int default_pclass)
1393 {
1394 struct cprotect *entry = *entry_arg;
1395 int target_pclass = 0;
1396 int error = 0;
1397
1398 if (g_cp_state.wrap_functions_set != 1) {
1399 printf("hfs: CP: could not create keys: no wrappers set\n");
1400 return ENXIO;
1401 }
1402
1403 /* create new cp data: key and class */
1404 entry->cp_cache_key_len = CP_MAX_KEYSIZE;
1405 read_random(&entry->cp_cache_key[0], entry->cp_cache_key_len);
1406
1407 if (cp_is_valid_class(default_pclass) == 0) {
1408 target_pclass = PROTECTION_CLASS_D;
1409 } else {
1410 target_pclass = default_pclass;
1411 }
1412
1413 /*
1414 * Attempt to wrap the new key in the class key specified by target_pclass
1415 * Note that because we may be inheriting a protection level specified
1416 * by the containing directory, this can fail; we could be trying to
1417 * wrap this cache key in the class 'A' key while the device is locked.
1418 * As such, emit an error if we fail to wrap the key here, instead of
1419 * panicking.
1420 */
1421
1422 error = cp_wrap(target_pclass, hfsmp, fileid, entry_arg);
1423
1424 if (error) {
1425 goto out;
1426 }
1427 /* cp_wrap() potentially updates c_cpentry */
1428 entry = *entry_arg;
1429
1430 /* set the pclass to the target since the wrap was successful */
1431 entry->cp_pclass = target_pclass;
1432
1433 /* No need to go here for older EAs */
1434 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
1435 cp_setup_aes_ctx(entry);
1436 entry->cp_flags |= CP_OFF_IV_ENABLED;
1437 }
1438
1439 /* ready for business */
1440 entry->cp_flags &= ~CP_NEEDS_KEYS;
1441 entry->cp_flags |= CP_NO_XATTR;
1442
1443 out:
1444 return error;
1445 }
1446
1447 /*
1448 * If permitted, restore entry's unwrapped key from the persistent key.
1449 * If not, clear key and set CP_KEY_FLUSHED.
1450 * cnode lock held exclusive
1451 */
1452 static int
1453 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp)
1454 {
1455 int error = 0;
1456
1457 error = cp_unwrap(entry->cp_pclass, entry);
1458 if (error) {
1459 entry->cp_flags |= CP_KEY_FLUSHED;
1460 bzero(entry->cp_cache_key, entry->cp_cache_key_len);
1461 error = EPERM;
1462 }
1463 else {
1464 /* No need to go here for older EAs */
1465 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
1466 cp_setup_aes_ctx(entry);
1467 entry->cp_flags |= CP_OFF_IV_ENABLED;
1468 }
1469
1470 /* ready for business */
1471 entry->cp_flags &= ~CP_KEY_FLUSHED;
1472
1473 }
1474 return error;
1475 }
1476
1477 static int
1478 cp_lock_vfs_callback(mount_t mp, void *arg) {
1479
1480 /*
1481 * When iterating the various mount points that may
1482 * be present on a content-protected device, we need to skip
1483 * those that do not have it enabled.
1484 */
1485 if (!cp_fs_protected(mp)) {
1486 return 0;
1487 }
1488
1489 return vnode_iterate(mp, 0, cp_lock_vnode_callback, arg);
1490 }
1491
1492
1493 /*
1494 * Deny access to protected files if keys have been locked.
1495 */
1496 static int
1497 cp_check_access(struct cnode *cp, int vnop __unused)
1498 {
1499 int error = 0;
1500
1501 if (g_cp_state.lock_state == CP_UNLOCKED_STATE) {
1502 return 0;
1503 }
1504
1505 if (!cp->c_cpentry) {
1506 /* unprotected node */
1507 return 0;
1508 }
1509
1510 if (!S_ISREG(cp->c_mode)) {
1511 return 0;
1512 }
1513
1514 /* Deny all access for class A files */
1515 switch (cp->c_cpentry->cp_pclass) {
1516 case PROTECTION_CLASS_A: {
1517 error = EPERM;
1518 break;
1519 }
1520 default:
1521 error = 0;
1522 break;
1523 }
1524
1525 return error;
1526 }
1527
1528 /*
1529 * Respond to a lock or unlock event.
1530 * On lock: clear out keys from memory, then flush file contents.
1531 * On unlock: nothing (function not called).
1532 */
1533 static int
1534 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1535 {
1536 cnode_t *cp = NULL;
1537 struct cprotect *entry = NULL;
1538 int error = 0;
1539 int locked = 1;
1540 int action = 0;
1541 int took_truncate_lock = 0;
1542
1543 error = vnode_getwithref (vp);
1544 if (error) {
1545 return error;
1546 }
1547
1548 cp = VTOC(vp);
1549
1550 /*
1551 * When cleaning cnodes due to a lock event, we must
1552 * take the truncate lock AND the cnode lock. By taking
1553 * the truncate lock here, we force (nearly) all pending IOs
1554 * to drain before we can acquire the truncate lock. All HFS cluster
1555 * io calls except for swapfile IO need to acquire the truncate lock
1556 * prior to calling into the cluster layer.
1557 */
1558 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK);
1559 took_truncate_lock = 1;
1560
1561 hfs_lock(cp, HFS_FORCE_LOCK);
1562
1563 entry = cp->c_cpentry;
1564 if (!entry) {
1565 /* unprotected vnode: not a regular file */
1566 goto out;
1567 }
1568
1569 action = (int)((uintptr_t) arg);
1570 switch (action) {
1571 case CP_LOCKED_STATE: {
1572 vfs_context_t ctx;
1573 if (entry->cp_pclass != PROTECTION_CLASS_A ||
1574 vnode_isdir(vp)) {
1575 /*
1576 * There is no change at lock for other classes than A.
1577 * B is kept in memory for writing, and class F (for VM) does
1578 * not have a wrapped key, so there is no work needed for
1579 * wrapping/unwrapping.
1580 *
1581 * Note that 'class F' is relevant here because if
1582 * hfs_vnop_strategy does not take the cnode lock
1583 * to protect the cp blob across IO operations, we rely
1584 * implicitly on the truncate lock to be held when doing IO.
1585 * The only case where the truncate lock is not held is during
1586 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1587 * directly to cluster_pageout.
1588 */
1589 goto out;
1590 }
1591
1592 /* Before doing anything else, zero-fill sparse ranges as needed */
1593 ctx = vfs_context_current();
1594 (void) hfs_filedone (vp, ctx);
1595
1596 /* first, sync back dirty pages */
1597 hfs_unlock (cp);
1598 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1599 hfs_lock (cp, HFS_FORCE_LOCK);
1600
1601 /* flush keys:
1602 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
1603 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
1604 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
1605 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
1606 * Also verified that the cached data in IOFS is overwritten by other data, and there
1607 * is no key leakage in that layer.
1608 */
1609
1610 entry->cp_flags |= CP_KEY_FLUSHED;
1611 bzero(&entry->cp_cache_key, entry->cp_cache_key_len);
1612 bzero(&entry->cp_cache_iv_ctx, sizeof(aes_encrypt_ctx));
1613
1614 /* some write may have arrived in the mean time. dump those pages */
1615 hfs_unlock(cp);
1616 locked = 0;
1617
1618 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
1619 break;
1620 }
1621 case CP_UNLOCKED_STATE: {
1622 /* no-op */
1623 break;
1624 }
1625 default:
1626 panic("Content Protection: unknown lock action %d\n", action);
1627 }
1628
1629 out:
1630 if (locked) {
1631 hfs_unlock(cp);
1632 }
1633
1634 if (took_truncate_lock) {
1635 hfs_unlock_truncate (cp, 0);
1636 }
1637
1638 vnode_put (vp);
1639 return error;
1640 }
1641
1642 static int
1643 cp_wrap(int class, struct hfsmount *hfsmp, cnid_t fileid, struct cprotect **entry_ptr)
1644 {
1645
1646 struct cprotect *entry = *entry_ptr;
1647 uint8_t newkey[CP_MAX_WRAPPEDKEYSIZE];
1648 size_t keylen = CP_MAX_WRAPPEDKEYSIZE;
1649 int error = 0;
1650
1651 /*
1652 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1653 * key that is only good as long as the file is open. There is no
1654 * wrapped key, so there isn't anything to wrap.
1655 */
1656 if (class == PROTECTION_CLASS_F) {
1657 bzero(entry->cp_persistent_key, entry->cp_persistent_key_len);
1658 entry->cp_persistent_key_len = 0;
1659 return 0;
1660 }
1661
1662 /*
1663 * inode is passed here to find the backup bag wrapped blob
1664 * from userspace. This lookup will occur shortly after creation
1665 * and only if the file still exists. Beyond this lookup the
1666 * inode is not used. Technically there is a race, we practically
1667 * don't lose.
1668 */
1669 error = g_cp_wrap_func.wrapper(class,
1670 fileid,
1671 entry->cp_cache_key,
1672 entry->cp_cache_key_len,
1673 newkey,
1674 &keylen);
1675
1676 if (!error) {
1677 /*
1678 * v2 EA's don't support the larger class B keys
1679 */
1680 if ((keylen != CP_V2_WRAPPEDKEYSIZE) &&
1681 (hfsmp->hfs_running_cp_major_vers == CP_PREV_MAJOR_VERS)) {
1682 return EINVAL;
1683 }
1684
1685 /*
1686 * Reallocate the entry if the new persistent key changed length
1687 */
1688 if (entry->cp_persistent_key_len != keylen) {
1689 struct cprotect *oldentry = entry;
1690
1691 entry = cp_entry_alloc(keylen);
1692 if (entry == NULL)
1693 return ENOMEM;
1694
1695 bcopy(oldentry, entry, sizeof(struct cprotect));
1696 entry->cp_persistent_key_len = keylen;
1697
1698 cp_entry_destroy (&oldentry);
1699
1700 *entry_ptr = entry;
1701 }
1702
1703 bcopy(newkey, entry->cp_persistent_key, keylen);
1704 }
1705 else {
1706 error = EPERM;
1707 }
1708
1709 return error;
1710 }
1711
1712
1713 static int
1714 cp_unwrap(int class, struct cprotect *entry)
1715 {
1716 int error = 0;
1717 size_t keylen = CP_MAX_KEYSIZE;
1718
1719 /*
1720 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1721 * key that is only good as long as the file is open. There is no
1722 * wrapped key, so there isn't anything to unwrap.
1723 */
1724 if (class == PROTECTION_CLASS_F) {
1725 return EPERM;
1726 }
1727
1728 error = g_cp_wrap_func.unwrapper(class,
1729 entry->cp_persistent_key,
1730 entry->cp_persistent_key_len,
1731 entry->cp_cache_key,
1732 &keylen);
1733 if (!error) {
1734 entry->cp_cache_key_len = keylen;
1735 } else {
1736 error = EPERM;
1737 }
1738
1739 return error;
1740 }
1741
1742
1743 #else
1744
1745 int cp_key_store_action(int action __unused)
1746 {
1747 return ENOTSUP;
1748 }
1749
1750
1751 int cp_register_wraps(cp_wrap_func_t key_store_func __unused)
1752 {
1753 return ENOTSUP;
1754 }
1755
1756 #endif /* CONFIG_PROTECT */