]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cprotect.c
xnu-2422.90.20.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/cprotect.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/uio_internal.h>
34 #include <sys/ubc_internal.h>
35 #include <sys/vnode_if.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/fcntl.h>
38 #include <libkern/OSByteOrder.h>
39 #include <sys/proc.h>
40 #include <sys/kauth.h>
41
42 #include "hfs.h"
43 #include "hfs_cnode.h"
44
45 #if CONFIG_PROTECT
46 static struct cp_wrap_func g_cp_wrap_func = {};
47 static struct cp_global_state g_cp_state = {0, 0, 0};
48
49 extern int (**hfs_vnodeop_p) (void *);
50
51 /*
52 * CP private functions
53 */
54 static int cp_root_major_vers(mount_t mp);
55 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
56 static struct cprotect *cp_entry_alloc(size_t);
57 static void cp_entry_dealloc(struct cprotect *entry);
58 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
59 static int cp_lock_vfs_callback(mount_t, void *);
60 static int cp_lock_vnode_callback(vnode_t, void *);
61 static int cp_vnode_is_eligible (vnode_t);
62 static int cp_check_access (cnode_t *, int);
63 static int cp_new(int newclass, struct hfsmount *hfsmp, struct cnode *cp, mode_t cmode, struct cprotect **output_entry);
64 static int cp_rewrap(struct cnode *cp, struct hfsmount *hfsmp, int newclass);
65 static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
66 static int cp_setup_aes_ctx(struct cprotect *entry);
67 static void cp_init_access(cp_cred_t access, struct cnode *cp);
68
69
70
71 #if DEVELOPMENT || DEBUG
72 #define CP_ASSERT(x) \
73 if ((x) == 0) { \
74 panic("Content Protection: failed assertion in %s", __FUNCTION__); \
75 }
76 #else
77 #define CP_ASSERT(x)
78 #endif
79
80 int
81 cp_key_store_action(int action)
82 {
83
84 if (action < 0 || action > CP_MAX_STATE) {
85 return -1;
86 }
87
88 /* this truncates the upper 3 bytes */
89 g_cp_state.lock_state = (uint8_t)action;
90
91 if (action == CP_LOCKED_STATE) {
92 /*
93 * Upcast the value in 'action' to be a pointer-width unsigned integer.
94 * This avoids issues relating to pointer-width.
95 */
96 unsigned long action_arg = (unsigned long) action;
97 return vfs_iterate(0, cp_lock_vfs_callback, (void*)action_arg);
98 }
99
100 /* Do nothing on unlock events */
101 return 0;
102
103 }
104
105
106 int
107 cp_register_wraps(cp_wrap_func_t key_store_func)
108 {
109 g_cp_wrap_func.new_key = key_store_func->new_key;
110 g_cp_wrap_func.unwrapper = key_store_func->unwrapper;
111 g_cp_wrap_func.rewrapper = key_store_func->rewrapper;
112 /* do not use invalidater until rdar://12170050 goes in ! */
113 g_cp_wrap_func.invalidater = key_store_func->invalidater;
114
115 g_cp_state.wrap_functions_set = 1;
116
117 return 0;
118 }
119
120 /*
121 * Allocate and initialize a cprotect blob for a new cnode.
122 * Called from hfs_getnewvnode: cnode is locked exclusive.
123 *
124 * Read xattr data off the cnode. Then, if conditions permit,
125 * unwrap the file key and cache it in the cprotect blob.
126 */
127 int
128 cp_entry_init(struct cnode *cp, struct mount *mp)
129 {
130 struct cprotect *entry = NULL;
131 int error = 0;
132 struct hfsmount *hfsmp = VFSTOHFS(mp);
133
134 /*
135 * The cnode should be locked at this point, regardless of whether or not
136 * we are creating a new item in the namespace or vending a vnode on behalf
137 * of lookup. The only time we tell getnewvnode to skip the lock is when
138 * constructing a resource fork vnode. But a resource fork vnode must come
139 * after the regular data fork cnode has already been constructed.
140 */
141 if (!cp_fs_protected (mp)) {
142 cp->c_cpentry = NULL;
143 return 0;
144 }
145
146 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
147 cp->c_cpentry = NULL;
148 return 0;
149 }
150
151 if (!g_cp_state.wrap_functions_set) {
152 printf("hfs: cp_update_entry: wrap functions not yet set\n");
153 return ENXIO;
154 }
155
156 if (hfsmp->hfs_running_cp_major_vers == 0) {
157 panic ("hfs cp: no running mount point version! ");
158 }
159
160 CP_ASSERT (cp->c_cpentry == NULL);
161
162 error = cp_getxattr(cp, hfsmp, &entry);
163 if (error == 0) {
164 /*
165 * Success; attribute was found, though it may not have keys.
166 * If the entry is not returned without keys, we will delay generating
167 * keys until the first I/O.
168 */
169 if (S_ISREG(cp->c_mode)) {
170 if (entry->cp_flags & CP_NEEDS_KEYS) {
171 entry->cp_flags &= ~CP_KEY_FLUSHED;
172 }
173 else {
174 entry->cp_flags |= CP_KEY_FLUSHED;
175 }
176 }
177 }
178 else if (error == ENOATTR) {
179 /*
180 * Normally, we should always have a CP EA for a file or directory that
181 * we are initializing here. However, there are some extenuating circumstances,
182 * such as the root directory immediately following a newfs_hfs.
183 *
184 * As a result, we leave code here to deal with an ENOATTR which will always
185 * default to a 'D/NONE' key, though we don't expect to use it much.
186 */
187 int target_class = PROTECTION_CLASS_D;
188
189 if (S_ISDIR(cp->c_mode)) {
190 target_class = PROTECTION_CLASS_DIR_NONE;
191 }
192 error = cp_new (target_class, hfsmp, cp, cp->c_mode, &entry);
193 if (error == 0) {
194 error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
195 }
196 }
197
198 /*
199 * Bail out if:
200 * a) error was not ENOATTR (we got something bad from the getxattr call)
201 * b) we encountered an error setting the xattr above.
202 * c) we failed to generate a new cprotect data structure.
203 */
204 if (error) {
205 goto out;
206 }
207
208 cp->c_cpentry = entry;
209
210 out:
211 if (error == 0) {
212 entry->cp_backing_cnode = cp;
213 }
214 else {
215 if (entry) {
216 cp_entry_destroy(entry);
217 }
218 cp->c_cpentry = NULL;
219 }
220
221 return error;
222 }
223
224 /*
225 * cp_setup_newentry
226 *
227 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
228 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
229 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
230 * and the file/directory is established, then we can ask it to generate keys. Note that
231 * this introduces a potential race; If the device is locked and the wrapping
232 * keys are purged between the time we call this function and the time we ask it to generate
233 * keys for us, we could have to fail the open(2) call and back out the entry.
234 */
235
236 int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp, int32_t suppliedclass,
237 mode_t cmode, struct cprotect **tmpentry)
238 {
239 int isdir = 0;
240 struct cprotect *entry = NULL;
241 uint32_t target_class = hfsmp->default_cp_class;
242
243 if (hfsmp->hfs_running_cp_major_vers == 0) {
244 panic ("CP: major vers not set in mount!");
245 }
246
247 if (S_ISDIR (cmode)) {
248 isdir = 1;
249 }
250
251 /* Decide the target class. Input argument takes priority. */
252 if (cp_is_valid_class (isdir, suppliedclass)) {
253 /* caller supplies -1 if it was not specified so we will default to the mount point value */
254 target_class = suppliedclass;
255 /*
256 * One exception, F is never valid for a directory
257 * because its children may inherit and userland will be
258 * unable to read/write to the files.
259 */
260 if (isdir) {
261 if (target_class == PROTECTION_CLASS_F) {
262 *tmpentry = NULL;
263 return EINVAL;
264 }
265 }
266 }
267 else {
268 /*
269 * If no valid class was supplied, behave differently depending on whether or not
270 * the item being created is a file or directory.
271 *
272 * for FILE:
273 * If parent directory has a non-zero class, use that.
274 * If parent directory has a zero class (not set), then attempt to
275 * apply the mount point default.
276 *
277 * for DIRECTORY:
278 * Directories always inherit from the parent; if the parent
279 * has a NONE class set, then we can continue to use that.
280 */
281 if ((dcp) && (dcp->c_cpentry)) {
282 uint32_t parentclass = dcp->c_cpentry->cp_pclass;
283 /* If the parent class is not valid, default to the mount point value */
284 if (cp_is_valid_class(1, parentclass)) {
285 if (isdir) {
286 target_class = parentclass;
287 }
288 else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
289 /* files can inherit so long as it's not NONE */
290 target_class = parentclass;
291 }
292 }
293 /* Otherwise, we already defaulted to the mount point's default */
294 }
295 }
296
297 /* Generate the cprotect to vend out */
298 entry = cp_entry_alloc (0);
299 if (entry == NULL) {
300 *tmpentry = NULL;
301 return ENOMEM;
302 }
303
304 /*
305 * We don't have keys yet, so fill in what we can. At this point
306 * this blob has no keys and it has no backing xattr. We just know the
307 * target class.
308 */
309 entry->cp_flags = (CP_NEEDS_KEYS | CP_NO_XATTR);
310 entry->cp_pclass = target_class;
311 *tmpentry = entry;
312
313 return 0;
314 }
315
316
317 /*
318 * cp_needs_tempkeys
319 *
320 * Relay to caller whether or not the filesystem should generate temporary keys
321 * during resize operations.
322 */
323
324 int cp_needs_tempkeys (struct hfsmount *hfsmp, int *needs)
325 {
326
327 if (hfsmp->hfs_running_cp_major_vers < CP_PREV_MAJOR_VERS ||
328 hfsmp->hfs_running_cp_major_vers > CP_NEW_MAJOR_VERS) {
329 return -1;
330 }
331
332 /* CP_NEW_MAJOR_VERS implies CP_OFF_IV_ENABLED */
333 if (hfsmp->hfs_running_cp_major_vers < CP_NEW_MAJOR_VERS) {
334 *needs = 0;
335 }
336 else {
337 *needs = 1;
338 }
339
340 return 0;
341 }
342
343
344 /*
345 * Set up an initial key/class pair for a disassociated cprotect entry.
346 * This function is used to generate transient keys that will never be
347 * written to disk. We use class F for this since it provides the exact
348 * semantics that are needed here. Because we never attach this blob to
349 * a cnode directly, we take a pointer to the cprotect struct.
350 *
351 * This function is primarily used in the HFS FS truncation codepath
352 * where we may rely on AES symmetry to relocate encrypted data from
353 * one spot in the disk to another.
354 */
355 int cp_entry_gentempkeys(struct cprotect **entry_ptr, struct hfsmount *hfsmp)
356 {
357
358 struct cprotect *entry = NULL;
359
360 if (hfsmp->hfs_running_cp_major_vers < CP_NEW_MAJOR_VERS) {
361 return EPERM;
362 }
363
364 /*
365 * This should only be used for files and won't be written out.
366 * We don't need a persistent key.
367 */
368 entry = cp_entry_alloc (0);
369 if (entry == NULL) {
370 *entry_ptr = NULL;
371 return ENOMEM;
372 }
373 entry->cp_cache_key_len = CP_MAX_KEYSIZE;
374 entry->cp_pclass = PROTECTION_CLASS_F;
375 entry->cp_persistent_key_len = 0;
376
377 /* Generate the class F key */
378 read_random (&entry->cp_cache_key[0], entry->cp_cache_key_len);
379
380 /* Generate the IV key */
381 cp_setup_aes_ctx(entry);
382 entry->cp_flags |= CP_OFF_IV_ENABLED;
383
384 *entry_ptr = entry;
385 return 0;
386
387 }
388
389 /*
390 * Tear down and clear a cprotect blob for a closing file.
391 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
392 */
393 void
394 cp_entry_destroy(struct cprotect *entry_ptr)
395 {
396 if (entry_ptr == NULL) {
397 /* nothing to clean up */
398 return;
399 }
400 cp_entry_dealloc(entry_ptr);
401 }
402
403
404 int
405 cp_fs_protected (mount_t mnt)
406 {
407 return (vfs_flags(mnt) & MNT_CPROTECT);
408 }
409
410
411 /*
412 * Return a pointer to underlying cnode if there is one for this vnode.
413 * Done without taking cnode lock, inspecting only vnode state.
414 */
415 struct cnode *
416 cp_get_protected_cnode(struct vnode *vp)
417 {
418 if (!cp_vnode_is_eligible(vp)) {
419 return NULL;
420 }
421
422 if (!cp_fs_protected(VTOVFS(vp))) {
423 /* mount point doesn't support it */
424 return NULL;
425 }
426
427 return (struct cnode*) vp->v_data;
428 }
429
430
431 /*
432 * Sets *class to persistent class associated with vnode,
433 * or returns error.
434 */
435 int
436 cp_vnode_getclass(struct vnode *vp, int *class)
437 {
438 struct cprotect *entry;
439 int error = 0;
440 struct cnode *cp;
441 int took_truncate_lock = 0;
442 struct hfsmount *hfsmp = NULL;
443
444 /* Is this an interesting vp? */
445 if (!cp_vnode_is_eligible (vp)) {
446 return EBADF;
447 }
448
449 /* Is the mount point formatted for content protection? */
450 if (!cp_fs_protected(VTOVFS(vp))) {
451 return ENOTSUP;
452 }
453
454 cp = VTOC(vp);
455 hfsmp = VTOHFS(vp);
456
457 /*
458 * Take the truncate lock up-front in shared mode because we may need
459 * to manipulate the CP blob. Pend lock events until we're done here.
460 */
461 hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
462 took_truncate_lock = 1;
463
464 /*
465 * We take only the shared cnode lock up-front. If it turns out that
466 * we need to manipulate the CP blob to write a key out, drop the
467 * shared cnode lock and acquire an exclusive lock.
468 */
469 error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
470 if (error) {
471 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
472 return error;
473 }
474
475 /* pull the class from the live entry */
476 entry = cp->c_cpentry;
477
478 if (entry == NULL) {
479 panic("Content Protection: uninitialized cnode %p", cp);
480 }
481
482 /* Note that we may not have keys yet, but we know the target class. */
483
484 if (error == 0) {
485 *class = entry->cp_pclass;
486 }
487
488 if (took_truncate_lock) {
489 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
490 }
491
492 hfs_unlock(cp);
493 return error;
494 }
495
496
497 /*
498 * Sets persistent class for this file or directory.
499 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
500 * If the new class can't be accessed now, EPERM.
501 * Otherwise, record class and re-wrap key if the mount point is content-protected.
502 */
503 int
504 cp_vnode_setclass(struct vnode *vp, uint32_t newclass)
505 {
506 struct cnode *cp;
507 struct cprotect *entry = 0;
508 int error = 0;
509 int took_truncate_lock = 0;
510 struct hfsmount *hfsmp = NULL;
511 int isdir = 0;
512
513 if (vnode_isdir (vp)) {
514 isdir = 1;
515 }
516
517 if (!cp_is_valid_class(isdir, newclass)) {
518 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
519 return EINVAL;
520 }
521
522 /* Is this an interesting vp? */
523 if (!cp_vnode_is_eligible(vp)) {
524 return EBADF;
525 }
526
527 /* Is the mount point formatted for content protection? */
528 if (!cp_fs_protected(VTOVFS(vp))) {
529 return ENOTSUP;
530 }
531
532 hfsmp = VTOHFS(vp);
533 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
534 return EROFS;
535 }
536
537 /*
538 * Take the cnode truncate lock exclusive because we want to manipulate the
539 * CP blob. The lock-event handling code is doing the same. This also forces
540 * all pending IOs to drain before we can re-write the persistent and cache keys.
541 */
542 cp = VTOC(vp);
543 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
544 took_truncate_lock = 1;
545
546 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
547 return EINVAL;
548 }
549
550 entry = cp->c_cpentry;
551 if (entry == NULL) {
552 error = EINVAL;
553 goto out;
554 }
555
556 /*
557 * re-wrap per-file key with new class.
558 * Generate an entirely new key if switching to F.
559 */
560 if (vnode_isreg(vp)) {
561 /*
562 * The vnode is a file. Before proceeding with the re-wrap, we need
563 * to unwrap the keys before proceeding. This is to ensure that
564 * the destination class's properties still work appropriately for the
565 * target class (since B allows I/O but an unwrap prior to the next unlock
566 * will not be allowed).
567 */
568 if (entry->cp_flags & CP_KEY_FLUSHED) {
569 error = cp_restore_keys (entry, hfsmp, cp);
570 if (error) {
571 goto out;
572 }
573 }
574 if (newclass == PROTECTION_CLASS_F) {
575 /* Verify that file is blockless if switching to class F */
576 if (cp->c_datafork->ff_size > 0) {
577 error = EINVAL;
578 goto out;
579 }
580
581 entry->cp_pclass = newclass;
582 entry->cp_cache_key_len = CP_MAX_KEYSIZE;
583 read_random (&entry->cp_cache_key[0], entry->cp_cache_key_len);
584 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
585 cp_setup_aes_ctx (entry);
586 entry->cp_flags |= CP_OFF_IV_ENABLED;
587 }
588 bzero(entry->cp_persistent_key, entry->cp_persistent_key_len);
589 entry->cp_persistent_key_len = 0;
590 } else {
591 /* Deny the setclass if file is to be moved from F to something else */
592 if (entry->cp_pclass == PROTECTION_CLASS_F) {
593 error = EPERM;
594 goto out;
595 }
596 /* We cannot call cp_rewrap unless the keys were already in existence. */
597 if (entry->cp_flags & CP_NEEDS_KEYS) {
598 struct cprotect *newentry = NULL;
599 error = cp_generate_keys (hfsmp, cp, newclass, &newentry);
600 if (error == 0) {
601 cp_replace_entry (cp, newentry);
602 }
603 /* Bypass the setxattr code below since generate_keys does it for us */
604 goto out;
605 }
606 else {
607 error = cp_rewrap(cp, hfsmp, newclass);
608 }
609 }
610 if (error) {
611 /* we didn't have perms to set this class. leave file as-is and error out */
612 goto out;
613 }
614 }
615 else if (vnode_isdir(vp)) {
616 /* For directories, just update the pclass */
617 entry->cp_pclass = newclass;
618 error = 0;
619 }
620 else {
621 /* anything else, just error out */
622 error = EINVAL;
623 goto out;
624 }
625
626 /*
627 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
628 * existed. If the keys were never generated, then they'll skip the setxattr calls.
629 */
630
631 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
632 if (error == ENOATTR) {
633 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
634 }
635
636 out:
637
638 if (took_truncate_lock) {
639 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
640 }
641 hfs_unlock(cp);
642 return error;
643 }
644
645
646 int cp_vnode_transcode(vnode_t vp)
647 {
648 struct cnode *cp;
649 struct cprotect *entry = 0;
650 int error = 0;
651 int took_truncate_lock = 0;
652 struct hfsmount *hfsmp = NULL;
653
654 /* Structures passed between HFS and AKS */
655 cp_cred_s access_in;
656 cp_wrapped_key_s wrapped_key_in;
657
658 /* Is this an interesting vp? */
659 if (!cp_vnode_is_eligible(vp)) {
660 return EBADF;
661 }
662
663 /* Is the mount point formatted for content protection? */
664 if (!cp_fs_protected(VTOVFS(vp))) {
665 return ENOTSUP;
666 }
667
668 cp = VTOC(vp);
669 hfsmp = VTOHFS(vp);
670
671 /*
672 * Take the cnode truncate lock exclusive because we want to manipulate the
673 * CP blob. The lock-event handling code is doing the same. This also forces
674 * all pending IOs to drain before we can re-write the persistent and cache keys.
675 */
676 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
677 took_truncate_lock = 1;
678
679 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
680 return EINVAL;
681 }
682
683 entry = cp->c_cpentry;
684 if (entry == NULL) {
685 error = EINVAL;
686 goto out;
687 }
688
689 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
690 /*
691 * If we are transcoding keys for AKB, then we should have already established
692 * a set of keys for this vnode. IF we don't have keys yet, then something bad
693 * happened.
694 */
695 error = EINVAL;
696 goto out;
697 }
698
699 /* Send the per-file key in wrapped form for re-wrap with the current class information
700 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
701 * Don't need to process any outputs, so just clear the locks and pass along the error. */
702 if (vnode_isreg(vp)) {
703
704 /* Picked up the following from cp_wrap().
705 * If needed, more comments available there. */
706
707 if (entry->cp_pclass == PROTECTION_CLASS_F) {
708 error = EINVAL;
709 goto out;
710 }
711
712 cp_init_access(&access_in, cp);
713
714 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
715 wrapped_key_in.key = entry->cp_persistent_key;
716 wrapped_key_in.key_len = entry->cp_persistent_key_len;
717 wrapped_key_in.dp_class = entry->cp_pclass;
718
719 error = g_cp_wrap_func.rewrapper(&access_in,
720 entry->cp_pclass,
721 &wrapped_key_in,
722 NULL);
723
724 if(error)
725 error = EPERM;
726 }
727
728 out:
729 if (took_truncate_lock) {
730 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
731 }
732 hfs_unlock(cp);
733 return error;
734 }
735
736
737 /*
738 * Check permission for the given operation (read, write) on this node.
739 * Additionally, if the node needs work, do it:
740 * - create a new key for the file if one hasn't been set before
741 * - write out the xattr if it hasn't already been saved
742 * - unwrap the key if needed
743 *
744 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
745 *
746 * Note that this function does *NOT* take the cnode truncate lock. This is because
747 * the thread calling us may already have the truncate lock. It is not necessary
748 * because either we successfully finish this function before the keys are tossed
749 * and the IO will fail, or the keys are tossed and then this function will fail.
750 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
751 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
752 */
753 int
754 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
755 {
756 struct cprotect *entry;
757 int error = 0;
758 struct hfsmount *hfsmp = NULL;
759 struct cnode *cp = NULL;
760
761 /*
762 * First, do validation against the vnode before proceeding any further:
763 * Is this vnode originating from a valid content-protected filesystem ?
764 */
765 if (cp_vnode_is_eligible(vp) == 0) {
766 /*
767 * It is either not HFS or not a file/dir. Just return success. This is a valid
768 * case if servicing i/o against another filesystem type from VFS
769 */
770 return 0;
771 }
772
773 if (cp_fs_protected (VTOVFS(vp)) == 0) {
774 /*
775 * The underlying filesystem does not support content protection. This is also
776 * a valid case. Simply return success.
777 */
778 return 0;
779 }
780
781 /*
782 * At this point, we know we have a HFS vnode that backs a file or directory on a
783 * filesystem that supports content protection
784 */
785 cp = VTOC(vp);
786
787 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
788 return error;
789 }
790
791 entry = cp->c_cpentry;
792
793 if (entry == NULL) {
794 /*
795 * If this cnode is not content protected, simply return success.
796 * Note that this function is called by all I/O-based call sites
797 * when CONFIG_PROTECT is enabled during XNU building.
798 */
799
800 /*
801 * All files should have cprotect structs. It's possible to encounter
802 * a directory from a V2.0 CP system but all files should have protection
803 * EAs
804 */
805 if (vnode_isreg(vp)) {
806 error = EPERM;
807 }
808
809 goto out;
810 }
811
812 vp = CTOV(cp, 0);
813 if (vp == NULL) {
814 /* is it a rsrc */
815 vp = CTOV(cp,1);
816 if (vp == NULL) {
817 error = EINVAL;
818 goto out;
819 }
820 }
821 hfsmp = VTOHFS(vp);
822
823 if ((error = cp_check_access(cp, vnop))) {
824 /* check for raw encrypted access before bailing out */
825 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
826 /*
827 * read access only + asking for the raw encrypted bytes
828 * is legitimate, so reset the error value to 0
829 */
830 error = 0;
831 }
832 else {
833 goto out;
834 }
835 }
836
837 if (entry->cp_flags == 0) {
838 /* no more work to do */
839 goto out;
840 }
841
842 /* upgrade to exclusive lock */
843 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
844 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
845 return error;
846 }
847 } else {
848 cp->c_lockowner = current_thread();
849 }
850
851 /* generate new keys if none have ever been saved */
852 if ((entry->cp_flags & CP_NEEDS_KEYS)) {
853 struct cprotect *newentry = NULL;
854 error = cp_generate_keys (hfsmp, cp, cp->c_cpentry->cp_pclass, &newentry);
855 if (error == 0) {
856 cp_replace_entry (cp, newentry);
857 entry = newentry;
858 }
859 else {
860 goto out;
861 }
862 }
863
864 /* unwrap keys if needed */
865 if (entry->cp_flags & CP_KEY_FLUSHED) {
866 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
867 /* no need to try to restore keys; they are not going to be used */
868 error = 0;
869 }
870 else {
871 error = cp_restore_keys(entry, hfsmp, cp);
872 if (error) {
873 goto out;
874 }
875 }
876 }
877
878 /* write out the xattr if it's new */
879 if (entry->cp_flags & CP_NO_XATTR)
880 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
881
882 out:
883
884 hfs_unlock(cp);
885 return error;
886 }
887
888
889 int
890 cp_handle_open(struct vnode *vp, int mode)
891 {
892 struct cnode *cp = NULL ;
893 struct cprotect *entry = NULL;
894 struct hfsmount *hfsmp;
895 int error = 0;
896
897 /* If vnode not eligible, just return success */
898 if (!cp_vnode_is_eligible(vp)) {
899 return 0;
900 }
901
902 /* If mount point not properly set up, then also return success */
903 if (!cp_fs_protected(VTOVFS(vp))) {
904 return 0;
905 }
906
907 /* We know the vnode is in a valid state. acquire cnode and validate */
908 cp = VTOC(vp);
909 hfsmp = VTOHFS(vp);
910
911 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
912 return error;
913 }
914
915 entry = cp->c_cpentry;
916 if (entry == NULL) {
917 /*
918 * If the mount is protected and we couldn't get a cprotect for this vnode,
919 * then it's not valid for opening.
920 */
921 if (vnode_isreg(vp)) {
922 error = EPERM;
923 }
924 goto out;
925 }
926
927 if (!S_ISREG(cp->c_mode))
928 goto out;
929
930 /*
931 * Does the cnode have keys yet? If not, then generate them.
932 */
933 if (entry->cp_flags & CP_NEEDS_KEYS) {
934 struct cprotect *newentry = NULL;
935 error = cp_generate_keys (hfsmp, cp, cp->c_cpentry->cp_pclass, &newentry);
936 if (error == 0) {
937 cp_replace_entry (cp, newentry);
938 entry = newentry;
939 }
940 else {
941 goto out;
942 }
943 }
944
945 /*
946 * We want to minimize the number of unwraps that we'll have to do since
947 * the cost can vary, depending on the platform we're running.
948 */
949 switch (entry->cp_pclass) {
950 case PROTECTION_CLASS_B:
951 if (mode & O_CREAT) {
952 /*
953 * Class B always allows creation. Since O_CREAT was passed through
954 * we infer that this was a newly created vnode/cnode. Even though a potential
955 * race exists when multiple threads attempt to create/open a particular
956 * file, only one can "win" and actually create it. VFS will unset the
957 * O_CREAT bit on the loser.
958 *
959 * Note that skipping the unwrap check here is not a security issue --
960 * we have to unwrap the key permanently upon the first I/O.
961 */
962 break;
963 }
964
965 if ((entry->cp_flags & CP_KEY_FLUSHED) == 0) {
966 /*
967 * For a class B file, attempt the unwrap if we have the key in
968 * core already.
969 * The device could have just transitioned into the lock state, and
970 * this vnode may not yet have been purged from the vnode cache (which would
971 * remove the keys).
972 */
973 cp_cred_s access_in;
974 cp_wrapped_key_s wrapped_key_in;
975
976 cp_init_access(&access_in, cp);
977 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
978 wrapped_key_in.key = entry->cp_persistent_key;
979 wrapped_key_in.key_len = entry->cp_persistent_key_len;
980 wrapped_key_in.dp_class = entry->cp_pclass;
981 error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, NULL);
982 if (error) {
983 error = EPERM;
984 }
985 break;
986 }
987 /* otherwise, fall through to attempt the unwrap/restore */
988 case PROTECTION_CLASS_A:
989 case PROTECTION_CLASS_C:
990 /*
991 * At this point, we know that we need to attempt an unwrap if needed; we want
992 * to makes sure that open(2) fails properly if the device is either just-locked
993 * or never made it past first unlock. Since the keybag serializes access to the
994 * unwrapping keys for us and only calls our VFS callback once they've been purged,
995 * we will get here in two cases:
996 *
997 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
998 * purged, the vnode will get flushed if needed.
999 *
1000 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1001 *
1002 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1003 * we can always attempt the restore.
1004 */
1005 if (entry->cp_flags & CP_KEY_FLUSHED) {
1006 error = cp_restore_keys(entry, hfsmp, cp);
1007 }
1008
1009 if (error) {
1010 error = EPERM;
1011 }
1012
1013 break;
1014
1015 case PROTECTION_CLASS_D:
1016 default:
1017 break;
1018 }
1019
1020 out:
1021 hfs_unlock(cp);
1022 return error;
1023 }
1024
1025
1026 /*
1027 * During hfs resize operations, we have slightly different constraints than during
1028 * normal VNOPS that read/write data to files. Specifically, we already have the cnode
1029 * locked (so nobody else can modify it), and we are doing the IO with root privileges, since
1030 * we are moving the data behind the user's back. So, we skip access checks here (for unlock
1031 * vs. lock), and don't worry about non-existing keys. If the file exists on-disk with valid
1032 * payload, then it must have keys set up already by definition.
1033 */
1034 int
1035 cp_handle_relocate (struct cnode *cp, struct hfsmount *hfsmp)
1036 {
1037 struct cprotect *entry;
1038 int error = -1;
1039
1040 /* cp is already locked */
1041 entry = cp->c_cpentry;
1042 if (!entry)
1043 goto out;
1044
1045 /*
1046 * Still need to validate whether to permit access to the file or not
1047 * based on lock status
1048 */
1049 if ((error = cp_check_access(cp, CP_READ_ACCESS | CP_WRITE_ACCESS))) {
1050 goto out;
1051 }
1052
1053 if (entry->cp_flags == 0) {
1054 /* no more work to do */
1055 error = 0;
1056 goto out;
1057 }
1058
1059 /* it must have keys since it is an existing file with actual payload */
1060
1061 /* unwrap keys if needed */
1062 if (entry->cp_flags & CP_KEY_FLUSHED) {
1063 error = cp_restore_keys(entry, hfsmp, cp);
1064 }
1065
1066 /*
1067 * Don't need to write out the EA since if the file has actual extents,
1068 * it must have an EA
1069 */
1070 out:
1071
1072 /* return the cp still locked */
1073 return error;
1074 }
1075
1076 /*
1077 * cp_getrootxattr:
1078 * Gets the EA we set on the root folder (fileid 1) to get information about the
1079 * version of Content Protection that was used to write to this filesystem.
1080 * Note that all multi-byte fields are written to disk little endian so they must be
1081 * converted to native endian-ness as needed.
1082 */
1083 int
1084 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
1085 {
1086 uio_t auio;
1087 char uio_buf[UIO_SIZEOF(1)];
1088 size_t attrsize = sizeof(struct cp_root_xattr);
1089 int error = 0;
1090 struct vnop_getxattr_args args;
1091
1092 if (!outxattr) {
1093 panic("Content Protection: cp_xattr called with xattr == NULL");
1094 }
1095
1096 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
1097 uio_addiov(auio, CAST_USER_ADDR_T(outxattr), attrsize);
1098
1099 args.a_desc = NULL; // unused
1100 args.a_vp = NULL; //unused since we're writing EA to root folder.
1101 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1102 args.a_uio = auio;
1103 args.a_size = &attrsize;
1104 args.a_options = XATTR_REPLACE;
1105 args.a_context = NULL; // unused
1106
1107 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
1108
1109 /* Now convert the multi-byte fields to native endianness */
1110 outxattr->major_version = OSSwapLittleToHostInt16(outxattr->major_version);
1111 outxattr->minor_version = OSSwapLittleToHostInt16(outxattr->minor_version);
1112 outxattr->flags = OSSwapLittleToHostInt64(outxattr->flags);
1113
1114 if (error != 0) {
1115 goto out;
1116 }
1117
1118 out:
1119 uio_free(auio);
1120 return error;
1121 }
1122
1123 /*
1124 * cp_setrootxattr:
1125 * Sets the EA we set on the root folder (fileid 1) to get information about the
1126 * version of Content Protection that was used to write to this filesystem.
1127 * Note that all multi-byte fields are written to disk little endian so they must be
1128 * converted to little endian as needed.
1129 *
1130 * This will be written to the disk when it detects the EA is not there, or when we need
1131 * to make a modification to the on-disk version that can be done in-place.
1132 */
1133 int
1134 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
1135 {
1136 int error = 0;
1137 struct vnop_setxattr_args args;
1138
1139 args.a_desc = NULL;
1140 args.a_vp = NULL;
1141 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1142 args.a_uio = NULL; //pass data ptr instead
1143 args.a_options = 0;
1144 args.a_context = NULL; //no context needed, only done from mount.
1145
1146 /* Now convert the multi-byte fields to little endian before writing to disk. */
1147 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
1148 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
1149 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
1150
1151 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
1152 sizeof(struct cp_root_xattr), &args, hfsmp, 1);
1153 return error;
1154 }
1155
1156
1157 /*
1158 * Stores new xattr data on the cnode.
1159 * cnode lock held exclusive (if available).
1160 *
1161 * This function is also invoked during file creation.
1162 */
1163 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp, uint32_t fileid, int options)
1164 {
1165 int error = 0;
1166 size_t attrsize;
1167 struct vnop_setxattr_args args;
1168 uint32_t target_fileid;
1169 struct cnode *arg_cp = NULL;
1170 uint32_t tempflags = 0;
1171
1172 args.a_desc = NULL;
1173
1174 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
1175 return EROFS;
1176 }
1177
1178 if (cp) {
1179 args.a_vp = cp->c_vp;
1180 target_fileid = 0;
1181 arg_cp = cp;
1182 }
1183 else {
1184 /*
1185 * When we set the EA in the same txn as the file creation,
1186 * we do not have a vnode/cnode yet. Use the specified fileid.
1187 */
1188 args.a_vp = NULL;
1189 target_fileid = fileid;
1190 }
1191 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1192 args.a_uio = NULL; //pass data ptr instead
1193 args.a_options = options;
1194 args.a_context = vfs_context_current();
1195
1196 /* Note that it's OK to write out an XATTR without keys. */
1197 /* Disable flags that will be invalid as we're writing the EA out at this point. */
1198 tempflags = entry->cp_flags;
1199 tempflags &= ~CP_NO_XATTR;
1200
1201 switch(hfsmp->hfs_running_cp_major_vers) {
1202 case CP_NEW_MAJOR_VERS: {
1203 struct cp_xattr_v4 *newxattr = NULL; // 70+ bytes; don't alloc on stack.
1204 MALLOC (newxattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1205 if (newxattr == NULL) {
1206 error = ENOMEM;
1207 break;
1208 }
1209 bzero (newxattr, sizeof(struct cp_xattr_v4));
1210
1211 attrsize = sizeof(*newxattr) - CP_MAX_WRAPPEDKEYSIZE + entry->cp_persistent_key_len;
1212
1213 /* Endian swap the multi-byte fields into L.E from host. */
1214 newxattr->xattr_major_version = OSSwapHostToLittleInt16 (hfsmp->hfs_running_cp_major_vers);
1215 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1216 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1217 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1218 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1219 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1220
1221 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1222
1223 FREE(newxattr, M_TEMP);
1224 break;
1225 }
1226 case CP_PREV_MAJOR_VERS: {
1227 struct cp_xattr_v2 *newxattr = NULL;
1228 MALLOC (newxattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1229 if (newxattr == NULL) {
1230 error = ENOMEM;
1231 break;
1232 }
1233 bzero (newxattr, sizeof(struct cp_xattr_v2));
1234
1235 attrsize = sizeof(*newxattr);
1236
1237 /* Endian swap the multi-byte fields into L.E from host. */
1238 newxattr->xattr_major_version = OSSwapHostToLittleInt16(hfsmp->hfs_running_cp_major_vers);
1239 newxattr->xattr_minor_version = OSSwapHostToLittleInt16(CP_MINOR_VERS);
1240 newxattr->key_size = OSSwapHostToLittleInt32(entry->cp_persistent_key_len);
1241 newxattr->flags = OSSwapHostToLittleInt32(tempflags);
1242 newxattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1243 bcopy(entry->cp_persistent_key, newxattr->persistent_key, entry->cp_persistent_key_len);
1244
1245 error = hfs_setxattr_internal(arg_cp, (caddr_t)newxattr, attrsize, &args, hfsmp, target_fileid);
1246
1247 FREE (newxattr, M_TEMP);
1248 break;
1249 }
1250 default:
1251 printf("hfs: cp_setxattr: Unknown CP version running \n");
1252 break;
1253 }
1254
1255 if (error == 0 ) {
1256 entry->cp_flags &= ~CP_NO_XATTR;
1257 }
1258
1259 return error;
1260
1261
1262 }
1263
1264 /*
1265 * Used by an fcntl to query the underlying FS for its content protection version #
1266 */
1267
1268 int
1269 cp_get_root_major_vers(vnode_t vp, uint32_t *level)
1270 {
1271 int err = 0;
1272 struct hfsmount *hfsmp = NULL;
1273 struct mount *mp = NULL;
1274
1275 mp = VTOVFS(vp);
1276
1277 /* check if it supports content protection */
1278 if (cp_fs_protected(mp) == 0) {
1279 return ENOTSUP;
1280 }
1281
1282 hfsmp = VFSTOHFS(mp);
1283 /* figure out the level */
1284
1285 err = cp_root_major_vers(mp);
1286
1287 if (err == 0) {
1288 *level = hfsmp->hfs_running_cp_major_vers;
1289 }
1290 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1291
1292 return err;
1293 }
1294
1295 /* Used by fcntl to query default protection level of FS */
1296 int cp_get_default_level (struct vnode *vp, uint32_t *level) {
1297 int err = 0;
1298 struct hfsmount *hfsmp = NULL;
1299 struct mount *mp = NULL;
1300
1301 mp = VTOVFS(vp);
1302
1303 /* check if it supports content protection */
1304 if (cp_fs_protected(mp) == 0) {
1305 return ENOTSUP;
1306 }
1307
1308 hfsmp = VFSTOHFS(mp);
1309 /* figure out the default */
1310
1311 *level = hfsmp->default_cp_class;
1312 return err;
1313 }
1314
1315 /********************
1316 * Private Functions
1317 *******************/
1318
1319 static int
1320 cp_root_major_vers(mount_t mp)
1321 {
1322 int err = 0;
1323 struct cp_root_xattr xattr;
1324 struct hfsmount *hfsmp = NULL;
1325
1326 hfsmp = vfs_fsprivate(mp);
1327 err = cp_getrootxattr (hfsmp, &xattr);
1328
1329 if (err == 0) {
1330 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1331 }
1332 else {
1333 return EINVAL;
1334 }
1335
1336 return 0;
1337 }
1338
1339 static int
1340 cp_vnode_is_eligible(struct vnode *vp)
1341 {
1342 return ((vp->v_op == hfs_vnodeop_p) &&
1343 (!vnode_issystem(vp)) &&
1344 (vnode_isreg(vp) || vnode_isdir(vp)));
1345 }
1346
1347
1348
1349 int
1350 cp_is_valid_class(int isdir, int32_t protectionclass)
1351 {
1352 /*
1353 * The valid protection classes are from 0 -> N
1354 * We use a signed argument to detect unassigned values from
1355 * directory entry creation time in HFS.
1356 */
1357 if (isdir) {
1358 /* Directories are not allowed to have F, but they can have "NONE" */
1359 return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
1360 (protectionclass <= PROTECTION_CLASS_D));
1361 }
1362 else {
1363 return ((protectionclass >= PROTECTION_CLASS_A) &&
1364 (protectionclass <= PROTECTION_CLASS_F));
1365 }
1366 }
1367
1368
1369 static struct cprotect *
1370 cp_entry_alloc(size_t keylen)
1371 {
1372 struct cprotect *cp_entry;
1373
1374 if (keylen > CP_MAX_WRAPPEDKEYSIZE)
1375 return (NULL);
1376
1377 MALLOC(cp_entry, struct cprotect *, sizeof(struct cprotect) + keylen,
1378 M_TEMP, M_WAITOK);
1379 if (cp_entry == NULL)
1380 return (NULL);
1381
1382 bzero(cp_entry, sizeof(*cp_entry) + keylen);
1383 cp_entry->cp_persistent_key_len = keylen;
1384 return (cp_entry);
1385 }
1386
1387 static void
1388 cp_entry_dealloc(struct cprotect *entry)
1389 {
1390 uint32_t keylen = entry->cp_persistent_key_len;
1391 bzero(entry, (sizeof(*entry) + keylen));
1392 FREE(entry, M_TEMP);
1393 }
1394
1395
1396 /*
1397 * Initializes a new cprotect entry with xattr data from the cnode.
1398 * cnode lock held shared
1399 */
1400 static int
1401 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, struct cprotect **outentry)
1402 {
1403 int error = 0;
1404 uio_t auio;
1405 size_t attrsize;
1406 char uio_buf[UIO_SIZEOF(1)];
1407 struct vnop_getxattr_args args;
1408 struct cprotect *entry = NULL;
1409
1410 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
1411 args.a_desc = NULL; // unused
1412 args.a_vp = cp->c_vp;
1413 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1414 args.a_uio = auio;
1415 args.a_options = XATTR_REPLACE;
1416 args.a_context = vfs_context_current(); // unused
1417
1418 switch (hfsmp->hfs_running_cp_major_vers) {
1419 case CP_NEW_MAJOR_VERS: {
1420 struct cp_xattr_v4 *xattr = NULL;
1421 MALLOC (xattr, struct cp_xattr_v4*, sizeof(struct cp_xattr_v4), M_TEMP, M_WAITOK);
1422 if (xattr == NULL) {
1423 error = ENOMEM;
1424 break;
1425 }
1426 bzero(xattr, sizeof (struct cp_xattr_v4));
1427 attrsize = sizeof(*xattr);
1428
1429 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1430 args.a_size = &attrsize;
1431
1432 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1433 if (error != 0) {
1434 FREE (xattr, M_TEMP);
1435 goto out;
1436 }
1437
1438 /* Endian swap the multi-byte fields into host endianness from L.E. */
1439 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1440 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1441 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1442 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1443 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1444
1445 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers ) {
1446 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1447 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1448 error = EINVAL;
1449 FREE (xattr, M_TEMP);
1450
1451 goto out;
1452 }
1453 /*
1454 * Prevent a buffer overflow, and validate the key length obtained from the
1455 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1456 * point.
1457 */
1458 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE) {
1459 error = EINVAL;
1460 FREE (xattr, M_TEMP);
1461
1462 goto out;
1463 }
1464
1465 /*
1466 * Class F files have no backing key; their keylength should be 0,
1467 * though they should have the proper flags set.
1468 *
1469 * A request to instantiate a CP for a class F file should result
1470 * in a bzero'd cp that just says class F, with key_flushed set.
1471 */
1472
1473 /* set up entry with information from xattr */
1474 entry = cp_entry_alloc(xattr->key_size);
1475 if (!entry) {
1476 FREE (xattr, M_TEMP);
1477
1478 return ENOMEM;
1479 }
1480
1481 entry->cp_pclass = xattr->persistent_class;
1482
1483 /*
1484 * Suppress invalid flags that should not be set.
1485 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1486 * be valid; the EA exists.
1487 */
1488 xattr->flags &= ~CP_NO_XATTR;
1489
1490 entry->cp_flags = xattr->flags;
1491 if (xattr->xattr_major_version >= CP_NEW_MAJOR_VERS) {
1492 entry->cp_flags |= CP_OFF_IV_ENABLED;
1493 }
1494
1495 if (entry->cp_pclass != PROTECTION_CLASS_F ) {
1496 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1497 }
1498
1499 FREE (xattr, M_TEMP);
1500
1501 break;
1502 }
1503 case CP_PREV_MAJOR_VERS: {
1504 struct cp_xattr_v2 *xattr = NULL;
1505 MALLOC (xattr, struct cp_xattr_v2*, sizeof(struct cp_xattr_v2), M_TEMP, M_WAITOK);
1506 if (xattr == NULL) {
1507 error = ENOMEM;
1508 break;
1509 }
1510 bzero (xattr, sizeof (struct cp_xattr_v2));
1511 attrsize = sizeof(*xattr);
1512
1513 uio_addiov(auio, CAST_USER_ADDR_T(xattr), attrsize);
1514 args.a_size = &attrsize;
1515
1516 error = hfs_getxattr_internal(cp, &args, VTOHFS(cp->c_vp), 0);
1517 if (error != 0) {
1518 FREE (xattr, M_TEMP);
1519 goto out;
1520 }
1521
1522 /* Endian swap the multi-byte fields into host endianness from L.E. */
1523 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1524 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1525 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1526 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1527 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1528
1529 if (xattr->xattr_major_version != hfsmp->hfs_running_cp_major_vers) {
1530 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1531 xattr->xattr_major_version, hfsmp->hfs_running_cp_major_vers);
1532 error = EINVAL;
1533 FREE (xattr, M_TEMP);
1534 goto out;
1535 }
1536
1537 /*
1538 * Prevent a buffer overflow, and validate the key length obtained from the
1539 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1540 * point.
1541 */
1542 if (xattr->key_size > CP_V2_WRAPPEDKEYSIZE) {
1543 error = EINVAL;
1544 FREE (xattr, M_TEMP);
1545 goto out;
1546 }
1547 /* set up entry with information from xattr */
1548 entry = cp_entry_alloc(xattr->key_size);
1549 if (!entry) {
1550 FREE (xattr, M_TEMP);
1551 return ENOMEM;
1552 }
1553
1554 entry->cp_pclass = xattr->persistent_class;
1555
1556 /*
1557 * Suppress invalid flags that should not be set.
1558 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1559 * be valid; the EA exists.
1560 */
1561 xattr->flags &= ~CP_NO_XATTR;
1562
1563 entry->cp_flags = xattr->flags;
1564
1565 if (entry->cp_pclass != PROTECTION_CLASS_F ) {
1566 bcopy(xattr->persistent_key, entry->cp_persistent_key, xattr->key_size);
1567 }
1568
1569 FREE (xattr, M_TEMP);
1570 break;
1571 }
1572 }
1573
1574 out:
1575 uio_free(auio);
1576
1577 *outentry = entry;
1578 return error;
1579 }
1580
1581 /*
1582 * If permitted, restore entry's unwrapped key from the persistent key.
1583 * If not, clear key and set CP_KEY_FLUSHED.
1584 * cnode lock held exclusive
1585 */
1586 static int
1587 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
1588 {
1589 int error = 0;
1590
1591 error = cp_unwrap(hfsmp, entry, cp);
1592 if (error) {
1593 entry->cp_flags |= CP_KEY_FLUSHED;
1594 bzero(entry->cp_cache_key, entry->cp_cache_key_len);
1595 error = EPERM;
1596 }
1597 else {
1598 /* ready for business */
1599 entry->cp_flags &= ~CP_KEY_FLUSHED;
1600
1601 }
1602 return error;
1603 }
1604
1605 static int
1606 cp_lock_vfs_callback(mount_t mp, void *arg)
1607 {
1608
1609 /* Use a pointer-width integer field for casting */
1610 unsigned long new_state;
1611
1612 /*
1613 * When iterating the various mount points that may
1614 * be present on a content-protected device, we need to skip
1615 * those that do not have it enabled.
1616 */
1617 if (!cp_fs_protected(mp)) {
1618 return 0;
1619 }
1620
1621 new_state = (unsigned long) arg;
1622 if (new_state == CP_LOCKED_STATE) {
1623 /*
1624 * We respond only to lock events. Since cprotect structs
1625 * decrypt/restore keys lazily, the unlock events don't
1626 * actually cause anything to happen.
1627 */
1628 return vnode_iterate(mp, 0, cp_lock_vnode_callback, arg);
1629 }
1630 /* Otherwise just return 0. */
1631 return 0;
1632
1633 }
1634
1635
1636 /*
1637 * Deny access to protected files if keys have been locked.
1638 */
1639 static int
1640 cp_check_access(struct cnode *cp, int vnop __unused)
1641 {
1642 int error = 0;
1643
1644 if (g_cp_state.lock_state == CP_UNLOCKED_STATE) {
1645 return 0;
1646 }
1647
1648 if (!cp->c_cpentry) {
1649 /* unprotected node */
1650 return 0;
1651 }
1652
1653 if (!S_ISREG(cp->c_mode)) {
1654 return 0;
1655 }
1656
1657 /* Deny all access for class A files */
1658 switch (cp->c_cpentry->cp_pclass) {
1659 case PROTECTION_CLASS_A: {
1660 error = EPERM;
1661 break;
1662 }
1663 default:
1664 error = 0;
1665 break;
1666 }
1667
1668 return error;
1669 }
1670
1671 /*
1672 * Respond to a lock or unlock event.
1673 * On lock: clear out keys from memory, then flush file contents.
1674 * On unlock: nothing (function not called).
1675 */
1676 static int
1677 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1678 {
1679 cnode_t *cp = NULL;
1680 struct cprotect *entry = NULL;
1681 int error = 0;
1682 int locked = 1;
1683 unsigned long action = 0;
1684 int took_truncate_lock = 0;
1685
1686 error = vnode_getwithref (vp);
1687 if (error) {
1688 return error;
1689 }
1690
1691 cp = VTOC(vp);
1692
1693 /*
1694 * When cleaning cnodes due to a lock event, we must
1695 * take the truncate lock AND the cnode lock. By taking
1696 * the truncate lock here, we force (nearly) all pending IOs
1697 * to drain before we can acquire the truncate lock. All HFS cluster
1698 * io calls except for swapfile IO need to acquire the truncate lock
1699 * prior to calling into the cluster layer.
1700 */
1701 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1702 took_truncate_lock = 1;
1703
1704 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1705
1706 entry = cp->c_cpentry;
1707 if (!entry) {
1708 /* unprotected vnode: not a regular file */
1709 goto out;
1710 }
1711
1712 action = (unsigned long) arg;
1713 switch (action) {
1714 case CP_LOCKED_STATE: {
1715 vfs_context_t ctx;
1716 if (entry->cp_pclass != PROTECTION_CLASS_A ||
1717 vnode_isdir(vp)) {
1718 /*
1719 * There is no change at lock for other classes than A.
1720 * B is kept in memory for writing, and class F (for VM) does
1721 * not have a wrapped key, so there is no work needed for
1722 * wrapping/unwrapping.
1723 *
1724 * Note that 'class F' is relevant here because if
1725 * hfs_vnop_strategy does not take the cnode lock
1726 * to protect the cp blob across IO operations, we rely
1727 * implicitly on the truncate lock to be held when doing IO.
1728 * The only case where the truncate lock is not held is during
1729 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1730 * directly to cluster_pageout.
1731 */
1732 goto out;
1733 }
1734
1735 /* Before doing anything else, zero-fill sparse ranges as needed */
1736 ctx = vfs_context_current();
1737 (void) hfs_filedone (vp, ctx);
1738
1739 /* first, sync back dirty pages */
1740 hfs_unlock (cp);
1741 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
1742 hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1743
1744 /* flush keys:
1745 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
1746 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
1747 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
1748 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
1749 * Also verified that the cached data in IOFS is overwritten by other data, and there
1750 * is no key leakage in that layer.
1751 */
1752
1753 entry->cp_flags |= CP_KEY_FLUSHED;
1754 bzero(&entry->cp_cache_key, entry->cp_cache_key_len);
1755 bzero(&entry->cp_cache_iv_ctx, sizeof(aes_encrypt_ctx));
1756
1757 /* some write may have arrived in the mean time. dump those pages */
1758 hfs_unlock(cp);
1759 locked = 0;
1760
1761 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
1762 break;
1763 }
1764 case CP_UNLOCKED_STATE: {
1765 /* no-op */
1766 break;
1767 }
1768 default:
1769 panic("Content Protection: unknown lock action %lu\n", action);
1770 }
1771
1772 out:
1773 if (locked) {
1774 hfs_unlock(cp);
1775 }
1776
1777 if (took_truncate_lock) {
1778 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
1779 }
1780
1781 vnode_put (vp);
1782 return error;
1783 }
1784
1785
1786 /*
1787 * cp_rewrap:
1788 *
1789 * Generate a new wrapped key based on the existing cache key.
1790 */
1791
1792 static int
1793 cp_rewrap(struct cnode *cp, struct hfsmount *hfsmp, int newclass)
1794 {
1795
1796 struct cprotect *entry = cp->c_cpentry;
1797 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
1798 size_t keylen = CP_MAX_WRAPPEDKEYSIZE;
1799 int error = 0;
1800
1801 /* Structures passed between HFS and AKS */
1802 cp_cred_s access_in;
1803 cp_wrapped_key_s wrapped_key_in;
1804 cp_wrapped_key_s wrapped_key_out;
1805
1806 /*
1807 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1808 * key that is only good as long as the file is open. There is no
1809 * wrapped key, so there isn't anything to wrap.
1810 */
1811 if (newclass == PROTECTION_CLASS_F) {
1812 return EINVAL;
1813 }
1814
1815 cp_init_access(&access_in, cp);
1816
1817 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1818 wrapped_key_in.key = entry->cp_persistent_key;
1819 wrapped_key_in.key_len = entry->cp_persistent_key_len;
1820 wrapped_key_in.dp_class = entry->cp_pclass;
1821
1822 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
1823 wrapped_key_out.key = new_persistent_key;
1824 wrapped_key_out.key_len = keylen;
1825
1826 /*
1827 * inode is passed here to find the backup bag wrapped blob
1828 * from userspace. This lookup will occur shortly after creation
1829 * and only if the file still exists. Beyond this lookup the
1830 * inode is not used. Technically there is a race, we practically
1831 * don't lose.
1832 */
1833 error = g_cp_wrap_func.rewrapper(&access_in,
1834 newclass, /* new class */
1835 &wrapped_key_in,
1836 &wrapped_key_out);
1837
1838 keylen = wrapped_key_out.key_len;
1839
1840 if (error == 0) {
1841 struct cprotect *newentry = NULL;
1842 /*
1843 * v2 EA's don't support the larger class B keys
1844 */
1845 if ((keylen != CP_V2_WRAPPEDKEYSIZE) &&
1846 (hfsmp->hfs_running_cp_major_vers == CP_PREV_MAJOR_VERS)) {
1847 return EINVAL;
1848 }
1849
1850 /* Allocate a new cpentry */
1851 newentry = cp_entry_alloc (keylen);
1852 bcopy (entry, newentry, sizeof(struct cprotect));
1853
1854 /* copy the new key into the entry */
1855 bcopy (new_persistent_key, newentry->cp_persistent_key, keylen);
1856 newentry->cp_persistent_key_len = keylen;
1857 newentry->cp_backing_cnode = cp;
1858 newentry->cp_pclass = newclass;
1859
1860 /* Attach the new entry to the cnode */
1861 cp->c_cpentry = newentry;
1862
1863 /* destroy the old entry */
1864 cp_entry_destroy (entry);
1865 }
1866 else {
1867 error = EPERM;
1868 }
1869
1870 return error;
1871 }
1872
1873
1874 static int
1875 cp_unwrap(struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
1876 {
1877 int error = 0;
1878 uint8_t iv_key[CP_IV_KEYSIZE];
1879
1880 /* Structures passed between HFS and AKS */
1881 cp_cred_s access_in;
1882 cp_wrapped_key_s wrapped_key_in;
1883 cp_raw_key_s key_out;
1884
1885 /*
1886 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1887 * key that is only good as long as the file is open. There is no
1888 * wrapped key, so there isn't anything to unwrap.
1889 */
1890 if (entry->cp_pclass == PROTECTION_CLASS_F) {
1891 return EPERM;
1892 }
1893
1894 cp_init_access(&access_in, cp);
1895
1896 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1897 wrapped_key_in.key = entry->cp_persistent_key;
1898 wrapped_key_in.key_len = entry->cp_persistent_key_len;
1899 wrapped_key_in.dp_class = entry->cp_pclass;
1900
1901 bzero(&key_out, sizeof(key_out));
1902 key_out.key = entry->cp_cache_key;
1903 key_out.key_len = CP_MAX_KEYSIZE;
1904 key_out.iv_key = iv_key;
1905 key_out.iv_key_len = CP_IV_KEYSIZE;
1906
1907 error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, &key_out);
1908 if (!error) {
1909 entry->cp_cache_key_len = key_out.key_len;
1910
1911 /* No need to go here for older EAs */
1912 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
1913 aes_encrypt_key128(iv_key, &entry->cp_cache_iv_ctx);
1914 entry->cp_flags |= CP_OFF_IV_ENABLED;
1915 }
1916 } else {
1917 error = EPERM;
1918 }
1919
1920 return error;
1921 }
1922
1923 /* Setup AES context */
1924 static int
1925 cp_setup_aes_ctx(struct cprotect *entry)
1926 {
1927 SHA1_CTX sha1ctxt;
1928 uint8_t cp_cache_iv_key[CP_IV_KEYSIZE]; /* Kiv */
1929
1930 /* First init the cp_cache_iv_key[] */
1931 SHA1Init(&sha1ctxt);
1932 SHA1Update(&sha1ctxt, &entry->cp_cache_key[0], CP_MAX_KEYSIZE);
1933 SHA1Final(&cp_cache_iv_key[0], &sha1ctxt);
1934
1935 aes_encrypt_key128(&cp_cache_iv_key[0], &entry->cp_cache_iv_ctx);
1936
1937 return 0;
1938 }
1939
1940 /*
1941 * cp_generate_keys
1942 *
1943 * Take a cnode that has already been initialized and establish persistent and
1944 * cache keys for it at this time. Note that at the time this is called, the
1945 * directory entry has already been created and we are holding the cnode lock
1946 * on 'cp'.
1947 *
1948 */
1949 int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, int targetclass, struct cprotect **newentry)
1950 {
1951
1952 int error = 0;
1953 struct cprotect *newcp = NULL;
1954 *newentry = NULL;
1955
1956 /* Validate that it has a cprotect already */
1957 if (cp->c_cpentry == NULL) {
1958 /* We can't do anything if it shouldn't be protected. */
1959 return 0;
1960 }
1961
1962 /* Asserts for the underlying cprotect */
1963 if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
1964 /* should already have an xattr by this point. */
1965 error = EINVAL;
1966 goto out;
1967 }
1968
1969 if (S_ISREG(cp->c_mode)) {
1970 if ((cp->c_cpentry->cp_flags & CP_NEEDS_KEYS) == 0){
1971 error = EINVAL;
1972 goto out;
1973 }
1974 }
1975
1976 error = cp_new (targetclass, hfsmp, cp, cp->c_mode, &newcp);
1977 if (error) {
1978 /*
1979 * Key generation failed. This is not necessarily fatal
1980 * since the device could have transitioned into the lock
1981 * state before we called this.
1982 */
1983 error = EPERM;
1984 goto out;
1985 }
1986
1987 /*
1988 * If we got here, then we have a new cprotect.
1989 * Attempt to write the new one out.
1990 */
1991 error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
1992
1993 if (error) {
1994 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
1995 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
1996 if (newcp) {
1997 cp_entry_destroy(newcp);
1998 }
1999 goto out;
2000 }
2001
2002 /*
2003 * If we get here then we can assert that:
2004 * 1) generated wrapped/unwrapped keys.
2005 * 2) wrote the new keys to disk.
2006 * 3) cprotect is ready to go.
2007 */
2008
2009 newcp->cp_flags &= ~CP_NEEDS_KEYS;
2010 *newentry = newcp;
2011
2012 out:
2013 return error;
2014
2015 }
2016
2017 void cp_replace_entry (struct cnode *cp, struct cprotect *newentry)
2018 {
2019
2020 if (cp->c_cpentry) {
2021 cp_entry_destroy (cp->c_cpentry);
2022 }
2023 cp->c_cpentry = newentry;
2024 newentry->cp_backing_cnode = cp;
2025
2026 return;
2027 }
2028
2029
2030 /*
2031 * cp_new
2032 *
2033 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2034 * allocate a cprotect, and vend it back to the caller.
2035 *
2036 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2037 * but they do not have keys.
2038 *
2039 */
2040
2041 static int
2042 cp_new(int newclass, struct hfsmount *hfsmp, struct cnode *cp, mode_t cmode, struct cprotect **output_entry)
2043 {
2044 struct cprotect *entry = NULL;
2045 int error = 0;
2046 uint8_t new_key[CP_MAX_KEYSIZE];
2047 size_t new_key_len = CP_MAX_KEYSIZE;
2048 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2049 size_t new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
2050 uint8_t iv_key[CP_IV_KEYSIZE];
2051 size_t iv_key_len = CP_IV_KEYSIZE;
2052
2053 /* Structures passed between HFS and AKS */
2054 cp_cred_s access_in;
2055 cp_wrapped_key_s wrapped_key_out;
2056 cp_raw_key_s key_out;
2057
2058 if (*output_entry != NULL) {
2059 panic ("cp_new with non-null entry!");
2060 }
2061
2062 if (!g_cp_state.wrap_functions_set) {
2063 printf("hfs: cp_new: wrap/gen functions not yet set\n");
2064 return ENXIO;
2065 }
2066
2067 /*
2068 * Step 1: Generate Keys if needed.
2069 *
2070 * For class F files, the kernel provides the key.
2071 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2072 * key that is only good as long as the file is open. There is no
2073 * wrapped key, so there isn't anything to wrap.
2074 *
2075 * For class A->D files, the key store provides the key
2076 *
2077 * For Directories, we only give them a class ; no keys.
2078 */
2079 if (S_ISDIR (cmode)) {
2080 /* Directories */
2081 new_persistent_len = 0;
2082 new_key_len = 0;
2083
2084 error = 0;
2085 }
2086 else if (S_ISREG(cmode)) {
2087 /* Files */
2088 if (newclass == PROTECTION_CLASS_F) {
2089 new_key_len = CP_MAX_KEYSIZE;
2090 read_random (&new_key[0], new_key_len);
2091 new_persistent_len = 0;
2092
2093 error = 0;
2094 }
2095 else {
2096 /*
2097 * The keystore is provided the file ID so that it can associate
2098 * the wrapped backup blob with this key from userspace. This
2099 * lookup occurs after successful file creation. Beyond this, the
2100 * file ID is not used. Note that there is a potential race here if
2101 * the file ID is re-used.
2102 */
2103 cp_init_access(&access_in, cp);
2104
2105 bzero(&key_out, sizeof(key_out));
2106 key_out.key = new_key;
2107 key_out.key_len = new_key_len;
2108 key_out.iv_key = iv_key;
2109 key_out.iv_key_len = iv_key_len;
2110
2111 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2112 wrapped_key_out.key = new_persistent_key;
2113 wrapped_key_out.key_len = new_persistent_len;
2114
2115 error = g_cp_wrap_func.new_key(&access_in,
2116 newclass,
2117 &key_out,
2118 &wrapped_key_out);
2119
2120 new_key_len = key_out.key_len;
2121 iv_key_len = key_out.iv_key_len;
2122 new_persistent_len = wrapped_key_out.key_len;
2123 }
2124
2125 }
2126 else {
2127 /* Something other than file or dir? */
2128 error = EPERM;
2129 }
2130
2131 /*
2132 * Step 2: Allocate cprotect and initialize it.
2133 */
2134
2135 if (error == 0) {
2136 /*
2137 * v2 EA's don't support the larger class B keys
2138 */
2139 if ((new_persistent_len != CP_V2_WRAPPEDKEYSIZE) &&
2140 (hfsmp->hfs_running_cp_major_vers == CP_PREV_MAJOR_VERS)) {
2141 return EINVAL;
2142 }
2143
2144 entry = cp_entry_alloc (new_persistent_len);
2145 if (entry == NULL) {
2146 return ENOMEM;
2147 }
2148
2149 *output_entry = entry;
2150
2151 entry->cp_pclass = newclass;
2152
2153 /* Copy the cache key & IV keys into place if needed. */
2154 if (new_key_len > 0) {
2155 bcopy (new_key, entry->cp_cache_key, new_key_len);
2156 entry->cp_cache_key_len = new_key_len;
2157
2158 /* Initialize the IV key */
2159 if (hfsmp->hfs_running_cp_major_vers == CP_NEW_MAJOR_VERS) {
2160 if (newclass == PROTECTION_CLASS_F) {
2161 /* class F needs a full IV initialize */
2162 cp_setup_aes_ctx(entry);
2163 }
2164 else {
2165 /* Key store gave us an iv key. Just need to wrap it.*/
2166 aes_encrypt_key128(iv_key, &entry->cp_cache_iv_ctx);
2167 }
2168 entry->cp_flags |= CP_OFF_IV_ENABLED;
2169 }
2170 }
2171 if (new_persistent_len > 0) {
2172 bcopy(new_persistent_key, entry->cp_persistent_key, new_persistent_len);
2173 }
2174 }
2175 else {
2176 error = EPERM;
2177 }
2178
2179 return error;
2180 }
2181
2182 /* Initialize the cp_cred_t structure passed to AKS */
2183 static void cp_init_access(cp_cred_t access, struct cnode *cp)
2184 {
2185 vfs_context_t context = vfs_context_current();
2186 kauth_cred_t cred = vfs_context_ucred(context);
2187 proc_t proc = vfs_context_proc(context);
2188
2189 bzero(access, sizeof(*access));
2190
2191 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2192 access->inode = cp->c_fileid;
2193 access->pid = proc_pid(proc);
2194 access->uid = kauth_cred_getuid(cred);
2195
2196 return;
2197 }
2198
2199 #else
2200
2201 int cp_key_store_action(int action __unused)
2202 {
2203 return ENOTSUP;
2204 }
2205
2206
2207 int cp_register_wraps(cp_wrap_func_t key_store_func __unused)
2208 {
2209 return ENOTSUP;
2210 }
2211
2212 #endif /* CONFIG_PROTECT */