2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <sys/cprotect.h>
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/uio_internal.h>
34 #include <sys/ubc_internal.h>
35 #include <sys/vnode_if.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/fcntl.h>
38 #include <libkern/OSByteOrder.h>
40 #include <sys/kauth.h>
43 #include "hfs_cnode.h"
44 #include "hfs_fsctl.h"
48 * The wrap function pointers and the variable to indicate if they
49 * are initialized are system-wide, and hence are defined globally.
51 static struct cp_wrap_func g_cp_wrap_func
= {};
52 static int are_wraps_initialized
= false;
54 extern int (**hfs_vnodeop_p
) (void *);
57 * CP private functions
59 static int cp_root_major_vers(mount_t mp
);
60 static int cp_getxattr(cnode_t
*, struct hfsmount
*hfsmp
, struct cprotect
**);
61 static struct cprotect
*cp_entry_alloc(size_t);
62 static void cp_entry_dealloc(struct cprotect
*entry
);
63 static int cp_restore_keys(struct cprotect
*, struct hfsmount
*hfsmp
, struct cnode
*);
64 static int cp_lock_vfs_callback(mount_t
, void *);
65 static int cp_lock_vnode_callback(vnode_t
, void *);
66 static int cp_vnode_is_eligible (vnode_t
);
67 static int cp_check_access (cnode_t
*cp
, struct hfsmount
*hfsmp
, int vnop
);
68 static int cp_new(int newclass
, struct hfsmount
*hfsmp
, struct cnode
*cp
, mode_t cmode
,
69 uint32_t flags
, struct cprotect
**output_entry
);
70 static int cp_rewrap(struct cnode
*cp
, struct hfsmount
*hfsmp
, int newclass
);
71 static int cp_unwrap(struct hfsmount
*, struct cprotect
*, struct cnode
*);
72 static int cp_setup_aes_ctx(struct cprotect
*entry
);
73 static void cp_init_access(cp_cred_t access
, struct cnode
*cp
);
75 static inline int cp_get_crypto_generation (uint32_t protclass
) {
76 if (protclass
& CP_CRYPTO_G1
) {
83 #if DEVELOPMENT || DEBUG
84 #define CP_ASSERT(x) \
86 panic("Content Protection: failed assertion in %s", __FUNCTION__); \
93 cp_key_store_action(int action
)
96 if (action
< 0 || action
> CP_MAX_STATE
) {
101 * The lock state is kept locally to each data protected filesystem to
102 * avoid using globals. Pass along the lock request to each filesystem
103 * we iterate through.
107 * Upcast the value in 'action' to be a pointer-width unsigned integer.
108 * This avoids issues relating to pointer-width.
110 unsigned long action_arg
= (unsigned long) action
;
111 return vfs_iterate(0, cp_lock_vfs_callback
, (void*)action_arg
);
116 cp_register_wraps(cp_wrap_func_t key_store_func
)
118 g_cp_wrap_func
.new_key
= key_store_func
->new_key
;
119 g_cp_wrap_func
.unwrapper
= key_store_func
->unwrapper
;
120 g_cp_wrap_func
.rewrapper
= key_store_func
->rewrapper
;
121 /* do not use invalidater until rdar://12170050 goes in ! */
122 g_cp_wrap_func
.invalidater
= key_store_func
->invalidater
;
123 g_cp_wrap_func
.backup_key
= key_store_func
->backup_key
;
125 /* Mark the functions as initialized in the function pointer container */
126 are_wraps_initialized
= true;
132 * Allocate and initialize a cprotect blob for a new cnode.
133 * Called from hfs_getnewvnode: cnode is locked exclusive.
135 * Read xattr data off the cnode. Then, if conditions permit,
136 * unwrap the file key and cache it in the cprotect blob.
139 cp_entry_init(struct cnode
*cp
, struct mount
*mp
)
141 struct cprotect
*entry
= NULL
;
143 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
146 * The cnode should be locked at this point, regardless of whether or not
147 * we are creating a new item in the namespace or vending a vnode on behalf
148 * of lookup. The only time we tell getnewvnode to skip the lock is when
149 * constructing a resource fork vnode. But a resource fork vnode must come
150 * after the regular data fork cnode has already been constructed.
152 if (!cp_fs_protected (mp
)) {
153 cp
->c_cpentry
= NULL
;
157 if (!S_ISREG(cp
->c_mode
) && !S_ISDIR(cp
->c_mode
)) {
158 cp
->c_cpentry
= NULL
;
162 if (are_wraps_initialized
== false) {
163 printf("hfs: cp_update_entry: wrap functions not yet set\n");
167 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
168 panic ("hfs cp: no running mount point version! ");
171 CP_ASSERT (cp
->c_cpentry
== NULL
);
173 error
= cp_getxattr(cp
, hfsmp
, &entry
);
176 * Success; attribute was found, though it may not have keys.
177 * If the entry is not returned without keys, we will delay generating
178 * keys until the first I/O.
180 if (S_ISREG(cp
->c_mode
)) {
181 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
182 entry
->cp_flags
&= ~CP_KEY_FLUSHED
;
185 entry
->cp_flags
|= CP_KEY_FLUSHED
;
189 else if (error
== ENOATTR
) {
191 * Normally, we should always have a CP EA for a file or directory that
192 * we are initializing here. However, there are some extenuating circumstances,
193 * such as the root directory immediately following a newfs_hfs.
195 * As a result, we leave code here to deal with an ENOATTR which will always
196 * default to a 'D/NONE' key, though we don't expect to use it much.
198 int target_class
= PROTECTION_CLASS_D
;
200 if (S_ISDIR(cp
->c_mode
)) {
201 target_class
= PROTECTION_CLASS_DIR_NONE
;
203 /* allow keybag to override our class preferences */
204 uint32_t keyflags
= CP_KEYWRAP_DIFFCLASS
;
205 error
= cp_new (target_class
, hfsmp
, cp
, cp
->c_mode
, keyflags
, &entry
);
207 error
= cp_setxattr (cp
, entry
, hfsmp
, cp
->c_fileid
, XATTR_CREATE
);
213 * a) error was not ENOATTR (we got something bad from the getxattr call)
214 * b) we encountered an error setting the xattr above.
215 * c) we failed to generate a new cprotect data structure.
221 cp
->c_cpentry
= entry
;
225 entry
->cp_backing_cnode
= cp
;
229 cp_entry_destroy(entry
);
231 cp
->c_cpentry
= NULL
;
240 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
241 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
242 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
243 * and the file/directory is established, then we can ask it to generate keys. Note that
244 * this introduces a potential race; If the device is locked and the wrapping
245 * keys are purged between the time we call this function and the time we ask it to generate
246 * keys for us, we could have to fail the open(2) call and back out the entry.
249 int cp_setup_newentry (struct hfsmount
*hfsmp
, struct cnode
*dcp
, int32_t suppliedclass
,
250 mode_t cmode
, struct cprotect
**tmpentry
)
253 struct cprotect
*entry
= NULL
;
254 uint32_t target_class
= hfsmp
->default_cp_class
;
255 suppliedclass
= CP_CLASS(suppliedclass
);
257 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
258 panic ("CP: major vers not set in mount!");
261 if (S_ISDIR (cmode
)) {
265 /* Decide the target class. Input argument takes priority. */
266 if (cp_is_valid_class (isdir
, suppliedclass
)) {
267 /* caller supplies -1 if it was not specified so we will default to the mount point value */
268 target_class
= suppliedclass
;
270 * One exception, F is never valid for a directory
271 * because its children may inherit and userland will be
272 * unable to read/write to the files.
275 if (target_class
== PROTECTION_CLASS_F
) {
283 * If no valid class was supplied, behave differently depending on whether or not
284 * the item being created is a file or directory.
287 * If parent directory has a non-zero class, use that.
288 * If parent directory has a zero class (not set), then attempt to
289 * apply the mount point default.
292 * Directories always inherit from the parent; if the parent
293 * has a NONE class set, then we can continue to use that.
295 if ((dcp
) && (dcp
->c_cpentry
)) {
296 uint32_t parentclass
= CP_CLASS(dcp
->c_cpentry
->cp_pclass
);
297 /* If the parent class is not valid, default to the mount point value */
298 if (cp_is_valid_class(1, parentclass
)) {
300 target_class
= parentclass
;
302 else if (parentclass
!= PROTECTION_CLASS_DIR_NONE
) {
303 /* files can inherit so long as it's not NONE */
304 target_class
= parentclass
;
307 /* Otherwise, we already defaulted to the mount point's default */
311 /* Generate the cprotect to vend out */
312 entry
= cp_entry_alloc (0);
319 * We don't have keys yet, so fill in what we can. At this point
320 * this blob has no keys and it has no backing xattr. We just know the
323 entry
->cp_flags
= (CP_NEEDS_KEYS
| CP_NO_XATTR
);
324 /* Note this is only the effective class */
325 entry
->cp_pclass
= target_class
;
335 * Relay to caller whether or not the filesystem should generate temporary keys
336 * during resize operations.
339 int cp_needs_tempkeys (struct hfsmount
*hfsmp
, int *needs
)
342 if (hfsmp
->hfs_running_cp_major_vers
< CP_PREV_MAJOR_VERS
||
343 hfsmp
->hfs_running_cp_major_vers
> CP_NEW_MAJOR_VERS
) {
347 /* CP_NEW_MAJOR_VERS implies CP_OFF_IV_ENABLED */
348 if (hfsmp
->hfs_running_cp_major_vers
< CP_NEW_MAJOR_VERS
) {
360 * Set up an initial key/class pair for a disassociated cprotect entry.
361 * This function is used to generate transient keys that will never be
362 * written to disk. We use class F for this since it provides the exact
363 * semantics that are needed here. Because we never attach this blob to
364 * a cnode directly, we take a pointer to the cprotect struct.
366 * This function is primarily used in the HFS FS truncation codepath
367 * where we may rely on AES symmetry to relocate encrypted data from
368 * one spot in the disk to another.
370 int cp_entry_gentempkeys(struct cprotect
**entry_ptr
, struct hfsmount
*hfsmp
)
373 struct cprotect
*entry
= NULL
;
375 if (hfsmp
->hfs_running_cp_major_vers
< CP_NEW_MAJOR_VERS
) {
380 * This should only be used for files and won't be written out.
381 * We don't need a persistent key.
383 entry
= cp_entry_alloc (0);
388 /* This is generated in-kernel so we leave it at the max key*/
389 entry
->cp_cache_key_len
= CP_MAX_KEYSIZE
;
391 /* This pclass is only the effective class */
392 entry
->cp_pclass
= PROTECTION_CLASS_F
;
393 entry
->cp_persistent_key_len
= 0;
395 /* Generate the class F key */
396 read_random (&entry
->cp_cache_key
[0], entry
->cp_cache_key_len
);
398 /* Generate the IV key */
399 cp_setup_aes_ctx(entry
);
400 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
408 * Tear down and clear a cprotect blob for a closing file.
409 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
412 cp_entry_destroy(struct cprotect
*entry_ptr
)
414 if (entry_ptr
== NULL
) {
415 /* nothing to clean up */
418 cp_entry_dealloc(entry_ptr
);
423 cp_fs_protected (mount_t mnt
)
425 return (vfs_flags(mnt
) & MNT_CPROTECT
);
430 * Return a pointer to underlying cnode if there is one for this vnode.
431 * Done without taking cnode lock, inspecting only vnode state.
434 cp_get_protected_cnode(struct vnode
*vp
)
436 if (!cp_vnode_is_eligible(vp
)) {
440 if (!cp_fs_protected(VTOVFS(vp
))) {
441 /* mount point doesn't support it */
445 return (struct cnode
*) vp
->v_data
;
450 * Sets *class to persistent class associated with vnode,
454 cp_vnode_getclass(struct vnode
*vp
, int *class)
456 struct cprotect
*entry
;
459 int took_truncate_lock
= 0;
460 struct hfsmount
*hfsmp
= NULL
;
462 /* Is this an interesting vp? */
463 if (!cp_vnode_is_eligible (vp
)) {
467 /* Is the mount point formatted for content protection? */
468 if (!cp_fs_protected(VTOVFS(vp
))) {
476 * Take the truncate lock up-front in shared mode because we may need
477 * to manipulate the CP blob. Pend lock events until we're done here.
479 hfs_lock_truncate (cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
480 took_truncate_lock
= 1;
483 * We take only the shared cnode lock up-front. If it turns out that
484 * we need to manipulate the CP blob to write a key out, drop the
485 * shared cnode lock and acquire an exclusive lock.
487 error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
489 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
493 /* pull the class from the live entry */
494 entry
= cp
->c_cpentry
;
497 panic("Content Protection: uninitialized cnode %p", cp
);
500 /* Note that we may not have keys yet, but we know the target class. */
503 *class = CP_CLASS(entry
->cp_pclass
);
506 if (took_truncate_lock
) {
507 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
516 * Sets persistent class for this file or directory.
517 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
518 * If the new class can't be accessed now, EPERM.
519 * Otherwise, record class and re-wrap key if the mount point is content-protected.
522 cp_vnode_setclass(struct vnode
*vp
, uint32_t newclass
)
525 struct cprotect
*entry
= 0;
527 int took_truncate_lock
= 0;
528 struct hfsmount
*hfsmp
= NULL
;
531 if (vnode_isdir (vp
)) {
535 /* Ensure we only use the effective class here */
536 newclass
= CP_CLASS(newclass
);
538 if (!cp_is_valid_class(isdir
, newclass
)) {
539 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass
);
543 /* Is this an interesting vp? */
544 if (!cp_vnode_is_eligible(vp
)) {
548 /* Is the mount point formatted for content protection? */
549 if (!cp_fs_protected(VTOVFS(vp
))) {
554 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
559 * Take the cnode truncate lock exclusive because we want to manipulate the
560 * CP blob. The lock-event handling code is doing the same. This also forces
561 * all pending IOs to drain before we can re-write the persistent and cache keys.
564 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
565 took_truncate_lock
= 1;
568 * The truncate lock is not sufficient to guarantee the CP blob
569 * isn't being used. We must wait for existing writes to finish.
571 vnode_waitforwrites(vp
, 0, 0, 0, "cp_vnode_setclass");
573 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
577 entry
= cp
->c_cpentry
;
584 * re-wrap per-file key with new class.
585 * Generate an entirely new key if switching to F.
587 if (vnode_isreg(vp
)) {
589 * The vnode is a file. Before proceeding with the re-wrap, we need
590 * to unwrap the keys before proceeding. This is to ensure that
591 * the destination class's properties still work appropriately for the
592 * target class (since B allows I/O but an unwrap prior to the next unlock
593 * will not be allowed).
595 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
596 error
= cp_restore_keys (entry
, hfsmp
, cp
);
601 if (newclass
== PROTECTION_CLASS_F
) {
602 /* Verify that file is blockless if switching to class F */
603 if (cp
->c_datafork
->ff_size
> 0) {
608 /* newclass is only the effective class */
609 entry
->cp_pclass
= newclass
;
611 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
612 entry
->cp_cache_key_len
= CP_MAX_KEYSIZE
;
613 read_random (&entry
->cp_cache_key
[0], entry
->cp_cache_key_len
);
614 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
615 cp_setup_aes_ctx (entry
);
616 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
618 bzero(entry
->cp_persistent_key
, entry
->cp_persistent_key_len
);
619 entry
->cp_persistent_key_len
= 0;
621 /* Deny the setclass if file is to be moved from F to something else */
622 if (entry
->cp_pclass
== PROTECTION_CLASS_F
) {
626 /* We cannot call cp_rewrap unless the keys were already in existence. */
627 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
628 struct cprotect
*newentry
= NULL
;
630 * We want to fail if we can't wrap to the target class. By not setting
631 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
632 * to 'newclass' then error out.
635 error
= cp_generate_keys (hfsmp
, cp
, newclass
, flags
, &newentry
);
637 cp_replace_entry (cp
, newentry
);
639 /* Bypass the setxattr code below since generate_keys does it for us */
643 error
= cp_rewrap(cp
, hfsmp
, newclass
);
647 /* we didn't have perms to set this class. leave file as-is and error out */
651 else if (vnode_isdir(vp
)) {
652 /* For directories, just update the pclass. newclass is only effective class */
653 entry
->cp_pclass
= newclass
;
657 /* anything else, just error out */
663 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
664 * existed. If the keys were never generated, then they'll skip the setxattr calls.
667 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_REPLACE
);
668 if (error
== ENOATTR
) {
669 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_CREATE
);
674 if (took_truncate_lock
) {
675 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
682 int cp_vnode_transcode(vnode_t vp
, void *key
, unsigned *len
)
685 struct cprotect
*entry
= 0;
687 int took_truncate_lock
= 0;
688 struct hfsmount
*hfsmp
= NULL
;
690 /* Structures passed between HFS and AKS */
692 cp_wrapped_key_s wrapped_key_in
, wrapped_key_out
;
694 /* Is this an interesting vp? */
695 if (!cp_vnode_is_eligible(vp
)) {
699 /* Is the mount point formatted for content protection? */
700 if (!cp_fs_protected(VTOVFS(vp
))) {
708 * Take the cnode truncate lock exclusive because we want to manipulate the
709 * CP blob. The lock-event handling code is doing the same. This also forces
710 * all pending IOs to drain before we can re-write the persistent and cache keys.
712 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
713 took_truncate_lock
= 1;
715 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
719 entry
= cp
->c_cpentry
;
725 if ((entry
->cp_flags
& CP_NEEDS_KEYS
)) {
727 * If we are transcoding keys for AKB, then we should have already established
728 * a set of keys for this vnode. IF we don't have keys yet, then something bad
735 /* Send the per-file key in wrapped form for re-wrap with the current class information
736 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
737 * Don't need to process any outputs, so just clear the locks and pass along the error. */
738 if (vnode_isreg(vp
)) {
740 /* Picked up the following from cp_wrap().
741 * If needed, more comments available there. */
743 if (CP_CLASS(entry
->cp_pclass
) == PROTECTION_CLASS_F
) {
748 cp_init_access(&access_in
, cp
);
750 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
751 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
752 wrapped_key_in
.key
= entry
->cp_persistent_key
;
753 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
754 /* Use the actual persistent class when talking to AKS */
755 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
756 wrapped_key_out
.key
= key
;
757 wrapped_key_out
.key_len
= *len
;
759 error
= g_cp_wrap_func
.backup_key(&access_in
,
766 *len
= wrapped_key_out
.key_len
;
770 if (took_truncate_lock
) {
771 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
779 * Check permission for the given operation (read, write) on this node.
780 * Additionally, if the node needs work, do it:
781 * - create a new key for the file if one hasn't been set before
782 * - write out the xattr if it hasn't already been saved
783 * - unwrap the key if needed
785 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
787 * Note that this function does *NOT* take the cnode truncate lock. This is because
788 * the thread calling us may already have the truncate lock. It is not necessary
789 * because either we successfully finish this function before the keys are tossed
790 * and the IO will fail, or the keys are tossed and then this function will fail.
791 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
792 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
795 cp_handle_vnop(struct vnode
*vp
, int vnop
, int ioflag
)
797 struct cprotect
*entry
;
799 struct hfsmount
*hfsmp
= NULL
;
800 struct cnode
*cp
= NULL
;
803 * First, do validation against the vnode before proceeding any further:
804 * Is this vnode originating from a valid content-protected filesystem ?
806 if (cp_vnode_is_eligible(vp
) == 0) {
808 * It is either not HFS or not a file/dir. Just return success. This is a valid
809 * case if servicing i/o against another filesystem type from VFS
814 if (cp_fs_protected (VTOVFS(vp
)) == 0) {
816 * The underlying filesystem does not support content protection. This is also
817 * a valid case. Simply return success.
823 * At this point, we know we have a HFS vnode that backs a file or directory on a
824 * filesystem that supports content protection
828 if ((error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
))) {
832 entry
= cp
->c_cpentry
;
836 * If this cnode is not content protected, simply return success.
837 * Note that this function is called by all I/O-based call sites
838 * when CONFIG_PROTECT is enabled during XNU building.
842 * All files should have cprotect structs. It's possible to encounter
843 * a directory from a V2.0 CP system but all files should have protection
846 if (vnode_isreg(vp
)) {
864 if ((error
= cp_check_access(cp
, hfsmp
, vnop
))) {
865 /* check for raw encrypted access before bailing out */
866 if ((vnop
== CP_READ_ACCESS
) && (ioflag
& IO_ENCRYPTED
)) {
868 * read access only + asking for the raw encrypted bytes
869 * is legitimate, so reset the error value to 0
878 if (entry
->cp_flags
== 0) {
879 /* no more work to do */
883 /* upgrade to exclusive lock */
884 if (lck_rw_lock_shared_to_exclusive(&cp
->c_rwlock
) == FALSE
) {
885 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
889 cp
->c_lockowner
= current_thread();
892 /* generate new keys if none have ever been saved */
893 if ((entry
->cp_flags
& CP_NEEDS_KEYS
)) {
894 struct cprotect
*newentry
= NULL
;
896 * It's ok if this ends up being wrapped in a different class than 'pclass'.
897 * class modification is OK here.
899 uint32_t flags
= CP_KEYWRAP_DIFFCLASS
;
901 error
= cp_generate_keys (hfsmp
, cp
, CP_CLASS(cp
->c_cpentry
->cp_pclass
), flags
, &newentry
);
903 cp_replace_entry (cp
, newentry
);
911 /* unwrap keys if needed */
912 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
913 if ((vnop
== CP_READ_ACCESS
) && (ioflag
& IO_ENCRYPTED
)) {
914 /* no need to try to restore keys; they are not going to be used */
918 error
= cp_restore_keys(entry
, hfsmp
, cp
);
925 /* write out the xattr if it's new */
926 if (entry
->cp_flags
& CP_NO_XATTR
)
927 error
= cp_setxattr(cp
, entry
, VTOHFS(cp
->c_vp
), 0, XATTR_CREATE
);
937 cp_handle_open(struct vnode
*vp
, int mode
)
939 struct cnode
*cp
= NULL
;
940 struct cprotect
*entry
= NULL
;
941 struct hfsmount
*hfsmp
;
944 /* If vnode not eligible, just return success */
945 if (!cp_vnode_is_eligible(vp
)) {
949 /* If mount point not properly set up, then also return success */
950 if (!cp_fs_protected(VTOVFS(vp
))) {
954 /* We know the vnode is in a valid state. Acquire cnode and validate */
958 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
962 entry
= cp
->c_cpentry
;
965 * If the mount is protected and we couldn't get a cprotect for this vnode,
966 * then it's not valid for opening.
968 if (vnode_isreg(vp
)) {
974 if (!S_ISREG(cp
->c_mode
))
978 * Does the cnode have keys yet? If not, then generate them.
980 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
981 struct cprotect
*newentry
= NULL
;
982 /* Allow the keybag to override our class preferences */
983 uint32_t flags
= CP_KEYWRAP_DIFFCLASS
;
984 error
= cp_generate_keys (hfsmp
, cp
, CP_CLASS(cp
->c_cpentry
->cp_pclass
), flags
, &newentry
);
986 cp_replace_entry (cp
, newentry
);
995 * We want to minimize the number of unwraps that we'll have to do since
996 * the cost can vary, depending on the platform we're running.
998 switch (CP_CLASS(entry
->cp_pclass
)) {
999 case PROTECTION_CLASS_B
:
1000 if (mode
& O_CREAT
) {
1002 * Class B always allows creation. Since O_CREAT was passed through
1003 * we infer that this was a newly created vnode/cnode. Even though a potential
1004 * race exists when multiple threads attempt to create/open a particular
1005 * file, only one can "win" and actually create it. VFS will unset the
1006 * O_CREAT bit on the loser.
1008 * Note that skipping the unwrap check here is not a security issue --
1009 * we have to unwrap the key permanently upon the first I/O.
1014 if ((entry
->cp_flags
& CP_KEY_FLUSHED
) == 0) {
1016 * For a class B file, attempt the unwrap if we have the key in
1018 * The device could have just transitioned into the lock state, and
1019 * this vnode may not yet have been purged from the vnode cache (which would
1022 cp_cred_s access_in
;
1023 cp_wrapped_key_s wrapped_key_in
;
1025 cp_init_access(&access_in
, cp
);
1026 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1027 wrapped_key_in
.key
= entry
->cp_persistent_key
;
1028 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
1029 /* Use the persistent class when talking to AKS */
1030 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1031 error
= g_cp_wrap_func
.unwrapper(&access_in
, &wrapped_key_in
, NULL
);
1037 /* otherwise, fall through to attempt the unwrap/restore */
1038 case PROTECTION_CLASS_A
:
1039 case PROTECTION_CLASS_C
:
1041 * At this point, we know that we need to attempt an unwrap if needed; we want
1042 * to makes sure that open(2) fails properly if the device is either just-locked
1043 * or never made it past first unlock. Since the keybag serializes access to the
1044 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1045 * we will get here in two cases:
1047 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1048 * purged, the vnode will get flushed if needed.
1050 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1052 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1053 * we can always attempt the restore.
1055 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
1056 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1065 case PROTECTION_CLASS_D
:
1077 * During hfs resize operations, we have slightly different constraints than during
1078 * normal VNOPS that read/write data to files. Specifically, we already have the cnode
1079 * locked (so nobody else can modify it), and we are doing the IO with root privileges, since
1080 * we are moving the data behind the user's back. So, we skip access checks here (for unlock
1081 * vs. lock), and don't worry about non-existing keys. If the file exists on-disk with valid
1082 * payload, then it must have keys set up already by definition.
1085 cp_handle_relocate (struct cnode
*cp
, struct hfsmount
*hfsmp
)
1087 struct cprotect
*entry
;
1090 /* cp is already locked */
1091 entry
= cp
->c_cpentry
;
1096 * Still need to validate whether to permit access to the file or not
1097 * based on lock status
1099 if ((error
= cp_check_access(cp
, hfsmp
, CP_READ_ACCESS
| CP_WRITE_ACCESS
))) {
1103 if (entry
->cp_flags
== 0) {
1104 /* no more work to do */
1109 /* it must have keys since it is an existing file with actual payload */
1111 /* unwrap keys if needed */
1112 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
1113 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1117 * Don't need to write out the EA since if the file has actual extents,
1118 * it must have an EA
1122 /* return the cp still locked */
1128 * Gets the EA we set on the root folder (fileid 1) to get information about the
1129 * version of Content Protection that was used to write to this filesystem.
1130 * Note that all multi-byte fields are written to disk little endian so they must be
1131 * converted to native endian-ness as needed.
1134 cp_getrootxattr(struct hfsmount
* hfsmp
, struct cp_root_xattr
*outxattr
)
1137 char uio_buf
[UIO_SIZEOF(1)];
1138 size_t attrsize
= sizeof(struct cp_root_xattr
);
1140 struct vnop_getxattr_args args
;
1143 panic("Content Protection: cp_xattr called with xattr == NULL");
1146 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
1147 uio_addiov(auio
, CAST_USER_ADDR_T(outxattr
), attrsize
);
1149 args
.a_desc
= NULL
; // unused
1150 args
.a_vp
= NULL
; //unused since we're writing EA to root folder.
1151 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1153 args
.a_size
= &attrsize
;
1154 args
.a_options
= XATTR_REPLACE
;
1155 args
.a_context
= NULL
; // unused
1157 error
= hfs_getxattr_internal(NULL
, &args
, hfsmp
, 1);
1159 /* Now convert the multi-byte fields to native endianness */
1160 outxattr
->major_version
= OSSwapLittleToHostInt16(outxattr
->major_version
);
1161 outxattr
->minor_version
= OSSwapLittleToHostInt16(outxattr
->minor_version
);
1162 outxattr
->flags
= OSSwapLittleToHostInt64(outxattr
->flags
);
1175 * Sets the EA we set on the root folder (fileid 1) to get information about the
1176 * version of Content Protection that was used to write to this filesystem.
1177 * Note that all multi-byte fields are written to disk little endian so they must be
1178 * converted to little endian as needed.
1180 * This will be written to the disk when it detects the EA is not there, or when we need
1181 * to make a modification to the on-disk version that can be done in-place.
1184 cp_setrootxattr(struct hfsmount
*hfsmp
, struct cp_root_xattr
*newxattr
)
1187 struct vnop_setxattr_args args
;
1191 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1192 args
.a_uio
= NULL
; //pass data ptr instead
1194 args
.a_context
= NULL
; //no context needed, only done from mount.
1196 /* Now convert the multi-byte fields to little endian before writing to disk. */
1197 newxattr
->major_version
= OSSwapHostToLittleInt16(newxattr
->major_version
);
1198 newxattr
->minor_version
= OSSwapHostToLittleInt16(newxattr
->minor_version
);
1199 newxattr
->flags
= OSSwapHostToLittleInt64(newxattr
->flags
);
1201 error
= hfs_setxattr_internal(NULL
, (caddr_t
)newxattr
,
1202 sizeof(struct cp_root_xattr
), &args
, hfsmp
, 1);
1208 * Stores new xattr data on the cnode.
1209 * cnode lock held exclusive (if available).
1211 * This function is also invoked during file creation.
1213 int cp_setxattr(struct cnode
*cp
, struct cprotect
*entry
, struct hfsmount
*hfsmp
, uint32_t fileid
, int options
)
1217 struct vnop_setxattr_args args
;
1218 uint32_t target_fileid
;
1219 struct cnode
*arg_cp
= NULL
;
1220 uint32_t tempflags
= 0;
1224 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1229 args
.a_vp
= cp
->c_vp
;
1235 * When we set the EA in the same txn as the file creation,
1236 * we do not have a vnode/cnode yet. Use the specified fileid.
1239 target_fileid
= fileid
;
1241 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1242 args
.a_uio
= NULL
; //pass data ptr instead
1243 args
.a_options
= options
;
1244 args
.a_context
= vfs_context_current();
1246 /* Note that it's OK to write out an XATTR without keys. */
1247 /* Disable flags that will be invalid as we're writing the EA out at this point. */
1248 tempflags
= entry
->cp_flags
;
1250 /* we're writing the EA; CP_NO_XATTR is invalid */
1251 tempflags
&= ~CP_NO_XATTR
;
1253 /* CP_SEP_WRAPPEDKEY is informational/runtime only. */
1254 tempflags
&= ~CP_SEP_WRAPPEDKEY
;
1256 switch(hfsmp
->hfs_running_cp_major_vers
) {
1257 case CP_NEW_MAJOR_VERS
: {
1258 struct cp_xattr_v4
*newxattr
= NULL
; // 70+ bytes; don't alloc on stack.
1259 MALLOC (newxattr
, struct cp_xattr_v4
*, sizeof(struct cp_xattr_v4
), M_TEMP
, M_WAITOK
);
1260 if (newxattr
== NULL
) {
1264 bzero (newxattr
, sizeof(struct cp_xattr_v4
));
1266 attrsize
= sizeof(*newxattr
) - CP_MAX_WRAPPEDKEYSIZE
+ entry
->cp_persistent_key_len
;
1268 /* Endian swap the multi-byte fields into L.E from host. */
1269 newxattr
->xattr_major_version
= OSSwapHostToLittleInt16 (hfsmp
->hfs_running_cp_major_vers
);
1270 newxattr
->xattr_minor_version
= OSSwapHostToLittleInt16(CP_MINOR_VERS
);
1271 newxattr
->key_size
= OSSwapHostToLittleInt32(entry
->cp_persistent_key_len
);
1272 newxattr
->flags
= OSSwapHostToLittleInt32(tempflags
);
1273 newxattr
->persistent_class
= OSSwapHostToLittleInt32(entry
->cp_pclass
);
1274 bcopy(entry
->cp_persistent_key
, newxattr
->persistent_key
, entry
->cp_persistent_key_len
);
1276 error
= hfs_setxattr_internal(arg_cp
, (caddr_t
)newxattr
, attrsize
, &args
, hfsmp
, target_fileid
);
1278 FREE(newxattr
, M_TEMP
);
1281 case CP_PREV_MAJOR_VERS
: {
1282 struct cp_xattr_v2
*newxattr
= NULL
;
1283 MALLOC (newxattr
, struct cp_xattr_v2
*, sizeof(struct cp_xattr_v2
), M_TEMP
, M_WAITOK
);
1284 if (newxattr
== NULL
) {
1288 bzero (newxattr
, sizeof(struct cp_xattr_v2
));
1290 attrsize
= sizeof(*newxattr
);
1292 /* Endian swap the multi-byte fields into L.E from host. */
1293 newxattr
->xattr_major_version
= OSSwapHostToLittleInt16(hfsmp
->hfs_running_cp_major_vers
);
1294 newxattr
->xattr_minor_version
= OSSwapHostToLittleInt16(CP_MINOR_VERS
);
1295 newxattr
->key_size
= OSSwapHostToLittleInt32(entry
->cp_persistent_key_len
);
1296 newxattr
->flags
= OSSwapHostToLittleInt32(tempflags
);
1297 newxattr
->persistent_class
= OSSwapHostToLittleInt32(entry
->cp_pclass
);
1298 bcopy(entry
->cp_persistent_key
, newxattr
->persistent_key
, entry
->cp_persistent_key_len
);
1300 error
= hfs_setxattr_internal(arg_cp
, (caddr_t
)newxattr
, attrsize
, &args
, hfsmp
, target_fileid
);
1302 FREE (newxattr
, M_TEMP
);
1306 printf("hfs: cp_setxattr: Unknown CP version running \n");
1311 entry
->cp_flags
&= ~CP_NO_XATTR
;
1320 * Used by an fcntl to query the underlying FS for its content protection version #
1324 cp_get_root_major_vers(vnode_t vp
, uint32_t *level
)
1327 struct hfsmount
*hfsmp
= NULL
;
1328 struct mount
*mp
= NULL
;
1332 /* check if it supports content protection */
1333 if (cp_fs_protected(mp
) == 0) {
1337 hfsmp
= VFSTOHFS(mp
);
1338 /* figure out the level */
1340 err
= cp_root_major_vers(mp
);
1343 *level
= hfsmp
->hfs_running_cp_major_vers
;
1345 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1350 /* Used by fcntl to query default protection level of FS */
1351 int cp_get_default_level (struct vnode
*vp
, uint32_t *level
) {
1353 struct hfsmount
*hfsmp
= NULL
;
1354 struct mount
*mp
= NULL
;
1358 /* check if it supports content protection */
1359 if (cp_fs_protected(mp
) == 0) {
1363 hfsmp
= VFSTOHFS(mp
);
1364 /* figure out the default */
1366 *level
= hfsmp
->default_cp_class
;
1370 /********************
1372 *******************/
1375 cp_root_major_vers(mount_t mp
)
1378 struct cp_root_xattr xattr
;
1379 struct hfsmount
*hfsmp
= NULL
;
1381 hfsmp
= vfs_fsprivate(mp
);
1382 err
= cp_getrootxattr (hfsmp
, &xattr
);
1385 hfsmp
->hfs_running_cp_major_vers
= xattr
.major_version
;
1395 cp_vnode_is_eligible(struct vnode
*vp
)
1397 return ((vp
->v_op
== hfs_vnodeop_p
) &&
1398 (!vnode_issystem(vp
)) &&
1399 (vnode_isreg(vp
) || vnode_isdir(vp
)));
1405 cp_is_valid_class(int isdir
, int32_t protectionclass
)
1408 * The valid protection classes are from 0 -> N
1409 * We use a signed argument to detect unassigned values from
1410 * directory entry creation time in HFS.
1413 /* Directories are not allowed to have F, but they can have "NONE" */
1414 return ((protectionclass
>= PROTECTION_CLASS_DIR_NONE
) &&
1415 (protectionclass
<= PROTECTION_CLASS_D
));
1418 return ((protectionclass
>= PROTECTION_CLASS_A
) &&
1419 (protectionclass
<= PROTECTION_CLASS_F
));
1424 static struct cprotect
*
1425 cp_entry_alloc(size_t keylen
)
1427 struct cprotect
*cp_entry
;
1429 if (keylen
> CP_MAX_WRAPPEDKEYSIZE
)
1432 MALLOC(cp_entry
, struct cprotect
*, sizeof(struct cprotect
) + keylen
,
1434 if (cp_entry
== NULL
)
1437 bzero(cp_entry
, sizeof(*cp_entry
) + keylen
);
1438 cp_entry
->cp_persistent_key_len
= keylen
;
1443 cp_entry_dealloc(struct cprotect
*entry
)
1445 uint32_t keylen
= entry
->cp_persistent_key_len
;
1446 bzero(entry
, (sizeof(*entry
) + keylen
));
1447 FREE(entry
, M_TEMP
);
1452 * Initializes a new cprotect entry with xattr data from the cnode.
1453 * cnode lock held shared
1456 cp_getxattr(struct cnode
*cp
, struct hfsmount
*hfsmp
, struct cprotect
**outentry
)
1461 char uio_buf
[UIO_SIZEOF(1)];
1462 struct vnop_getxattr_args args
;
1463 struct cprotect
*entry
= NULL
;
1465 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
1466 args
.a_desc
= NULL
; // unused
1467 args
.a_vp
= cp
->c_vp
;
1468 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1470 args
.a_options
= XATTR_REPLACE
;
1471 args
.a_context
= vfs_context_current(); // unused
1473 switch (hfsmp
->hfs_running_cp_major_vers
) {
1474 case CP_NEW_MAJOR_VERS
: {
1475 struct cp_xattr_v4
*xattr
= NULL
;
1476 MALLOC (xattr
, struct cp_xattr_v4
*, sizeof(struct cp_xattr_v4
), M_TEMP
, M_WAITOK
);
1477 if (xattr
== NULL
) {
1481 bzero(xattr
, sizeof (struct cp_xattr_v4
));
1482 attrsize
= sizeof(*xattr
);
1484 uio_addiov(auio
, CAST_USER_ADDR_T(xattr
), attrsize
);
1485 args
.a_size
= &attrsize
;
1487 error
= hfs_getxattr_internal(cp
, &args
, VTOHFS(cp
->c_vp
), 0);
1489 FREE (xattr
, M_TEMP
);
1493 /* Endian swap the multi-byte fields into host endianness from L.E. */
1494 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1495 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1496 xattr
->key_size
= OSSwapLittleToHostInt32(xattr
->key_size
);
1497 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1498 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1500 if (xattr
->xattr_major_version
!= hfsmp
->hfs_running_cp_major_vers
) {
1501 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1502 xattr
->xattr_major_version
, hfsmp
->hfs_running_cp_major_vers
);
1504 FREE (xattr
, M_TEMP
);
1509 * Prevent a buffer overflow, and validate the key length obtained from the
1510 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1513 if (xattr
->key_size
> CP_MAX_WRAPPEDKEYSIZE
) {
1515 FREE (xattr
, M_TEMP
);
1521 * Class F files have no backing key; their keylength should be 0,
1522 * though they should have the proper flags set.
1524 * A request to instantiate a CP for a class F file should result
1525 * in a bzero'd cp that just says class F, with key_flushed set.
1528 /* set up entry with information from xattr */
1529 entry
= cp_entry_alloc(xattr
->key_size
);
1531 FREE (xattr
, M_TEMP
);
1536 entry
->cp_pclass
= xattr
->persistent_class
;
1539 * Suppress invalid flags that should not be set.
1540 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1541 * be valid; the EA exists.
1543 xattr
->flags
&= ~CP_NO_XATTR
;
1545 entry
->cp_flags
= xattr
->flags
;
1546 if (xattr
->xattr_major_version
>= CP_NEW_MAJOR_VERS
) {
1547 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
1550 if (CP_CLASS(entry
->cp_pclass
) != PROTECTION_CLASS_F
) {
1551 bcopy(xattr
->persistent_key
, entry
->cp_persistent_key
, xattr
->key_size
);
1554 FREE (xattr
, M_TEMP
);
1558 case CP_PREV_MAJOR_VERS
: {
1559 struct cp_xattr_v2
*xattr
= NULL
;
1560 MALLOC (xattr
, struct cp_xattr_v2
*, sizeof(struct cp_xattr_v2
), M_TEMP
, M_WAITOK
);
1561 if (xattr
== NULL
) {
1565 bzero (xattr
, sizeof (struct cp_xattr_v2
));
1566 attrsize
= sizeof(*xattr
);
1568 uio_addiov(auio
, CAST_USER_ADDR_T(xattr
), attrsize
);
1569 args
.a_size
= &attrsize
;
1571 error
= hfs_getxattr_internal(cp
, &args
, VTOHFS(cp
->c_vp
), 0);
1573 FREE (xattr
, M_TEMP
);
1577 /* Endian swap the multi-byte fields into host endianness from L.E. */
1578 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1579 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1580 xattr
->key_size
= OSSwapLittleToHostInt32(xattr
->key_size
);
1581 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1582 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1584 if (xattr
->xattr_major_version
!= hfsmp
->hfs_running_cp_major_vers
) {
1585 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1586 xattr
->xattr_major_version
, hfsmp
->hfs_running_cp_major_vers
);
1588 FREE (xattr
, M_TEMP
);
1593 * Prevent a buffer overflow, and validate the key length obtained from the
1594 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1597 if (xattr
->key_size
> CP_V2_WRAPPEDKEYSIZE
) {
1599 FREE (xattr
, M_TEMP
);
1602 /* set up entry with information from xattr */
1603 entry
= cp_entry_alloc(xattr
->key_size
);
1605 FREE (xattr
, M_TEMP
);
1609 entry
->cp_pclass
= xattr
->persistent_class
;
1612 * Suppress invalid flags that should not be set.
1613 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1614 * be valid; the EA exists.
1616 xattr
->flags
&= ~CP_NO_XATTR
;
1618 entry
->cp_flags
= xattr
->flags
;
1620 if (CP_CLASS(entry
->cp_pclass
) != PROTECTION_CLASS_F
) {
1621 bcopy(xattr
->persistent_key
, entry
->cp_persistent_key
, xattr
->key_size
);
1624 FREE (xattr
, M_TEMP
);
1637 * If permitted, restore entry's unwrapped key from the persistent key.
1638 * If not, clear key and set CP_KEY_FLUSHED.
1639 * cnode lock held exclusive
1642 cp_restore_keys(struct cprotect
*entry
, struct hfsmount
*hfsmp
, struct cnode
*cp
)
1646 error
= cp_unwrap(hfsmp
, entry
, cp
);
1648 entry
->cp_flags
|= CP_KEY_FLUSHED
;
1649 bzero(entry
->cp_cache_key
, entry
->cp_cache_key_len
);
1653 /* ready for business */
1654 entry
->cp_flags
&= ~CP_KEY_FLUSHED
;
1661 cp_lock_vfs_callback(mount_t mp
, void *arg
)
1664 /* Use a pointer-width integer field for casting */
1665 unsigned long new_state
;
1666 struct hfsmount
*hfsmp
;
1669 * When iterating the various mount points that may
1670 * be present on a content-protected device, we need to skip
1671 * those that do not have it enabled.
1673 if (!cp_fs_protected(mp
)) {
1676 new_state
= (unsigned long) arg
;
1678 hfsmp
= VFSTOHFS(mp
);
1680 hfs_lock_mount(hfsmp
);
1681 /* this loses all of the upper bytes of precision; that's OK */
1682 hfsmp
->hfs_cp_lock_state
= (uint8_t) new_state
;
1683 hfs_unlock_mount(hfsmp
);
1685 if (new_state
== CP_LOCKED_STATE
) {
1687 * We respond only to lock events. Since cprotect structs
1688 * decrypt/restore keys lazily, the unlock events don't
1689 * actually cause anything to happen.
1691 return vnode_iterate(mp
, 0, cp_lock_vnode_callback
, arg
);
1693 /* Otherwise just return 0. */
1700 * Deny access to protected files if keys have been locked.
1703 cp_check_access(struct cnode
*cp
, struct hfsmount
*hfsmp
, int vnop __unused
)
1708 * For now it's OK to examine the state variable here without
1709 * holding the HFS lock. This is only a short-circuit; if the state
1710 * transitions (or is in transition) after we examine this field, we'd
1711 * have to handle that anyway.
1713 if (hfsmp
->hfs_cp_lock_state
== CP_UNLOCKED_STATE
) {
1717 if (!cp
->c_cpentry
) {
1718 /* unprotected node */
1722 if (!S_ISREG(cp
->c_mode
)) {
1726 /* Deny all access for class A files */
1727 switch (CP_CLASS(cp
->c_cpentry
->cp_pclass
)) {
1728 case PROTECTION_CLASS_A
: {
1741 * Respond to a lock or unlock event.
1742 * On lock: clear out keys from memory, then flush file contents.
1743 * On unlock: nothing (function not called).
1746 cp_lock_vnode_callback(struct vnode
*vp
, void *arg
)
1749 struct cprotect
*entry
= NULL
;
1752 unsigned long action
= 0;
1753 int took_truncate_lock
= 0;
1755 error
= vnode_getwithref (vp
);
1763 * When cleaning cnodes due to a lock event, we must
1764 * take the truncate lock AND the cnode lock. By taking
1765 * the truncate lock here, we force (nearly) all pending IOs
1766 * to drain before we can acquire the truncate lock. All HFS cluster
1767 * io calls except for swapfile IO need to acquire the truncate lock
1768 * prior to calling into the cluster layer.
1770 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1771 took_truncate_lock
= 1;
1773 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
1775 entry
= cp
->c_cpentry
;
1777 /* unprotected vnode: not a regular file */
1781 action
= (unsigned long) arg
;
1783 case CP_LOCKED_STATE
: {
1785 if (CP_CLASS(entry
->cp_pclass
) != PROTECTION_CLASS_A
||
1788 * There is no change at lock for other classes than A.
1789 * B is kept in memory for writing, and class F (for VM) does
1790 * not have a wrapped key, so there is no work needed for
1791 * wrapping/unwrapping.
1793 * Note that 'class F' is relevant here because if
1794 * hfs_vnop_strategy does not take the cnode lock
1795 * to protect the cp blob across IO operations, we rely
1796 * implicitly on the truncate lock to be held when doing IO.
1797 * The only case where the truncate lock is not held is during
1798 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1799 * directly to cluster_pageout.
1804 /* Before doing anything else, zero-fill sparse ranges as needed */
1805 ctx
= vfs_context_current();
1806 (void) hfs_filedone (vp
, ctx
, 0);
1808 /* first, sync back dirty pages */
1810 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
| UBC_INVALIDATE
| UBC_SYNC
);
1811 hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
1814 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
1815 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
1816 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
1817 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
1818 * Also verified that the cached data in IOFS is overwritten by other data, and there
1819 * is no key leakage in that layer.
1822 entry
->cp_flags
|= CP_KEY_FLUSHED
;
1823 bzero(&entry
->cp_cache_key
, entry
->cp_cache_key_len
);
1824 bzero(&entry
->cp_cache_iv_ctx
, sizeof(aes_encrypt_ctx
));
1826 /* some write may have arrived in the mean time. dump those pages */
1830 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_INVALIDATE
| UBC_SYNC
);
1833 case CP_UNLOCKED_STATE
: {
1838 panic("Content Protection: unknown lock action %lu\n", action
);
1846 if (took_truncate_lock
) {
1847 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
1858 * Generate a new wrapped key based on the existing cache key.
1862 cp_rewrap(struct cnode
*cp
, struct hfsmount
*hfsmp
, int newclass
)
1865 struct cprotect
*entry
= cp
->c_cpentry
;
1866 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
1867 size_t keylen
= CP_MAX_WRAPPEDKEYSIZE
;
1869 newclass
= CP_CLASS(newclass
);
1871 /* Structures passed between HFS and AKS */
1872 cp_cred_s access_in
;
1873 cp_wrapped_key_s wrapped_key_in
;
1874 cp_wrapped_key_s wrapped_key_out
;
1877 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1878 * key that is only good as long as the file is open. There is no
1879 * wrapped key, so there isn't anything to wrap.
1881 if (newclass
== PROTECTION_CLASS_F
) {
1885 cp_init_access(&access_in
, cp
);
1887 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1888 wrapped_key_in
.key
= entry
->cp_persistent_key
;
1889 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
1890 /* Use the persistent class when talking to AKS */
1891 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1893 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
1894 wrapped_key_out
.key
= new_persistent_key
;
1895 wrapped_key_out
.key_len
= keylen
;
1898 * inode is passed here to find the backup bag wrapped blob
1899 * from userspace. This lookup will occur shortly after creation
1900 * and only if the file still exists. Beyond this lookup the
1901 * inode is not used. Technically there is a race, we practically
1904 error
= g_cp_wrap_func
.rewrapper(&access_in
,
1905 newclass
, /* new class */
1909 keylen
= wrapped_key_out
.key_len
;
1912 struct cprotect
*newentry
= NULL
;
1914 * Verify that AKS returned to us a wrapped key of the
1915 * target class requested.
1917 /* Get the effective class here */
1918 int effective
= CP_CLASS(wrapped_key_out
.dp_class
);
1919 if (effective
!= newclass
) {
1921 * Fail the operation if defaults or some other enforcement
1922 * dictated that the class be wrapped differently.
1925 /* TODO: Invalidate the key when 12170074 unblocked */
1929 /* v2 EA's don't support the larger class B keys */
1930 if ((keylen
!= CP_V2_WRAPPEDKEYSIZE
) &&
1931 (hfsmp
->hfs_running_cp_major_vers
== CP_PREV_MAJOR_VERS
)) {
1935 /* Allocate a new cpentry */
1936 newentry
= cp_entry_alloc (keylen
);
1937 bcopy (entry
, newentry
, sizeof(struct cprotect
));
1939 /* copy the new key into the entry */
1940 bcopy (new_persistent_key
, newentry
->cp_persistent_key
, keylen
);
1941 newentry
->cp_persistent_key_len
= keylen
;
1942 newentry
->cp_backing_cnode
= cp
;
1944 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
1945 newentry
->cp_pclass
= wrapped_key_out
.dp_class
;
1947 /* Attach the new entry to the cnode */
1948 cp
->c_cpentry
= newentry
;
1950 /* destroy the old entry */
1951 cp_entry_destroy (entry
);
1962 cp_unwrap(struct hfsmount
*hfsmp
, struct cprotect
*entry
, struct cnode
*cp
)
1965 uint8_t iv_key
[CP_IV_KEYSIZE
];
1967 /* Structures passed between HFS and AKS */
1968 cp_cred_s access_in
;
1969 cp_wrapped_key_s wrapped_key_in
;
1970 cp_raw_key_s key_out
;
1973 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1974 * key that is only good as long as the file is open. There is no
1975 * wrapped key, so there isn't anything to unwrap.
1977 if (CP_CLASS(entry
->cp_pclass
) == PROTECTION_CLASS_F
) {
1981 cp_init_access(&access_in
, cp
);
1983 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1984 wrapped_key_in
.key
= entry
->cp_persistent_key
;
1985 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
1986 /* Use the persistent class when talking to AKS */
1987 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1989 bzero(&key_out
, sizeof(key_out
));
1990 key_out
.iv_key
= iv_key
;
1991 key_out
.key
= entry
->cp_cache_key
;
1993 * The unwrapper should validate/set the key length for
1994 * the IV key length and the cache key length, however we need
1995 * to supply the correct buffer length so that AKS knows how
1996 * many bytes it has to work with.
1998 key_out
.iv_key_len
= CP_IV_KEYSIZE
;
1999 key_out
.key_len
= CP_MAX_CACHEBUFLEN
;
2001 error
= g_cp_wrap_func
.unwrapper(&access_in
, &wrapped_key_in
, &key_out
);
2003 if (key_out
.key_len
== 0 || key_out
.key_len
> CP_MAX_CACHEBUFLEN
) {
2004 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out
.key_len
);
2007 if (key_out
.iv_key_len
== 0 || key_out
.iv_key_len
> CP_IV_KEYSIZE
) {
2008 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out
.iv_key_len
);
2011 entry
->cp_cache_key_len
= key_out
.key_len
;
2013 /* No need to go here for older EAs */
2014 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
2015 aes_encrypt_key128(iv_key
, &entry
->cp_cache_iv_ctx
);
2016 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
2019 /* Is the key a raw wrapped key? */
2020 if (key_out
.flags
& CP_RAW_KEY_WRAPPEDKEY
) {
2021 /* OR in the right bit for the cprotect */
2022 entry
->cp_flags
|= CP_SEP_WRAPPEDKEY
;
2032 /* Setup AES context */
2034 cp_setup_aes_ctx(struct cprotect
*entry
)
2037 uint8_t cp_cache_iv_key
[CP_IV_KEYSIZE
]; /* Kiv */
2039 /* First init the cp_cache_iv_key[] */
2040 SHA1Init(&sha1ctxt
);
2043 * We can only use this when the keys are generated in the AP; As a result
2044 * we only use the first 32 bytes of key length in the cache key
2046 SHA1Update(&sha1ctxt
, &entry
->cp_cache_key
[0], CP_MAX_KEYSIZE
);
2047 SHA1Final(&cp_cache_iv_key
[0], &sha1ctxt
);
2049 aes_encrypt_key128(&cp_cache_iv_key
[0], &entry
->cp_cache_iv_ctx
);
2057 * Take a cnode that has already been initialized and establish persistent and
2058 * cache keys for it at this time. Note that at the time this is called, the
2059 * directory entry has already been created and we are holding the cnode lock
2063 int cp_generate_keys (struct hfsmount
*hfsmp
, struct cnode
*cp
, int targetclass
,
2064 uint32_t keyflags
, struct cprotect
**newentry
)
2068 struct cprotect
*newcp
= NULL
;
2071 /* Target class must be an effective class only */
2072 targetclass
= CP_CLASS(targetclass
);
2074 /* Validate that it has a cprotect already */
2075 if (cp
->c_cpentry
== NULL
) {
2076 /* We can't do anything if it shouldn't be protected. */
2080 /* Asserts for the underlying cprotect */
2081 if (cp
->c_cpentry
->cp_flags
& CP_NO_XATTR
) {
2082 /* should already have an xattr by this point. */
2087 if (S_ISREG(cp
->c_mode
)) {
2088 if ((cp
->c_cpentry
->cp_flags
& CP_NEEDS_KEYS
) == 0){
2094 error
= cp_new (targetclass
, hfsmp
, cp
, cp
->c_mode
, keyflags
, &newcp
);
2097 * Key generation failed. This is not necessarily fatal
2098 * since the device could have transitioned into the lock
2099 * state before we called this.
2106 * If we got here, then we have a new cprotect.
2107 * Attempt to write the new one out.
2109 error
= cp_setxattr (cp
, newcp
, hfsmp
, cp
->c_fileid
, XATTR_REPLACE
);
2112 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2113 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2115 cp_entry_destroy(newcp
);
2121 * If we get here then we can assert that:
2122 * 1) generated wrapped/unwrapped keys.
2123 * 2) wrote the new keys to disk.
2124 * 3) cprotect is ready to go.
2127 newcp
->cp_flags
&= ~CP_NEEDS_KEYS
;
2135 void cp_replace_entry (struct cnode
*cp
, struct cprotect
*newentry
)
2138 if (cp
->c_cpentry
) {
2139 cp_entry_destroy (cp
->c_cpentry
);
2141 cp
->c_cpentry
= newentry
;
2142 newentry
->cp_backing_cnode
= cp
;
2151 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2152 * allocate a cprotect, and vend it back to the caller.
2154 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2155 * but they do not have keys.
2160 cp_new(int newclass_eff
, struct hfsmount
*hfsmp
, struct cnode
*cp
, mode_t cmode
,
2161 uint32_t keyflags
, struct cprotect
**output_entry
)
2163 struct cprotect
*entry
= NULL
;
2165 uint8_t new_key
[CP_MAX_CACHEBUFLEN
];
2166 size_t new_key_len
= CP_MAX_CACHEBUFLEN
; /* AKS tell us the proper key length, how much of this is used */
2167 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
2168 size_t new_persistent_len
= CP_MAX_WRAPPEDKEYSIZE
;
2169 uint8_t iv_key
[CP_IV_KEYSIZE
];
2170 size_t iv_key_len
= CP_IV_KEYSIZE
;
2173 newclass_eff
= CP_CLASS(newclass_eff
);
2175 /* Structures passed between HFS and AKS */
2176 cp_cred_s access_in
;
2177 cp_wrapped_key_s wrapped_key_out
;
2178 cp_raw_key_s key_out
;
2180 if (*output_entry
!= NULL
) {
2181 panic ("cp_new with non-null entry!");
2184 if (are_wraps_initialized
== false) {
2185 printf("hfs: cp_new: wrap/gen functions not yet set\n");
2189 /* Sanity check that it's a file or directory here */
2190 if (!(S_ISREG(cmode
)) && !(S_ISDIR(cmode
))) {
2195 * Step 1: Generate Keys if needed.
2197 * For class F files, the kernel provides the key.
2198 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2199 * key that is only good as long as the file is open. There is no
2200 * wrapped key, so there isn't anything to wrap.
2202 * For class A->D files, the key store provides the key
2204 * For Directories, we only give them a class ; no keys.
2206 if (S_ISDIR (cmode
)) {
2208 new_persistent_len
= 0;
2214 /* Must be a file */
2215 if (newclass_eff
== PROTECTION_CLASS_F
) {
2216 /* class F files are not wrapped; they can still use the max key size */
2217 new_key_len
= CP_MAX_KEYSIZE
;
2218 read_random (&new_key
[0], new_key_len
);
2219 new_persistent_len
= 0;
2225 * The keystore is provided the file ID so that it can associate
2226 * the wrapped backup blob with this key from userspace. This
2227 * lookup occurs after successful file creation. Beyond this, the
2228 * file ID is not used. Note that there is a potential race here if
2229 * the file ID is re-used.
2231 cp_init_access(&access_in
, cp
);
2233 bzero(&key_out
, sizeof(key_out
));
2234 key_out
.key
= new_key
;
2235 key_out
.iv_key
= iv_key
;
2237 * AKS will override our key length fields, but we need to supply
2238 * the length of the buffer in those length fields so that
2239 * AKS knows hoa many bytes it has to work with.
2241 key_out
.key_len
= new_key_len
;
2242 key_out
.iv_key_len
= iv_key_len
;
2244 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
2245 wrapped_key_out
.key
= new_persistent_key
;
2246 wrapped_key_out
.key_len
= new_persistent_len
;
2248 error
= g_cp_wrap_func
.new_key(&access_in
,
2254 /* keybag returned failure */
2259 /* Now sanity-check the output from new_key */
2260 if (key_out
.key_len
== 0 || key_out
.key_len
> CP_MAX_CACHEBUFLEN
) {
2261 panic ("cp_new: invalid key length! (%ul) \n", key_out
.key_len
);
2264 if (key_out
.iv_key_len
== 0 || key_out
.iv_key_len
> CP_IV_KEYSIZE
) {
2265 panic ("cp_new: invalid iv key length! (%ul) \n", key_out
.iv_key_len
);
2269 * AKS is allowed to override our preferences and wrap with a
2270 * different class key for policy reasons. If we were told that
2271 * any class other than the one specified is unacceptable then error out
2272 * if that occurred. Check that the effective class returned by
2273 * AKS is the same as our effective new class
2275 if ((int)(CP_CLASS(wrapped_key_out
.dp_class
)) != newclass_eff
) {
2276 if (keyflags
& CP_KEYWRAP_DIFFCLASS
) {
2277 newclass_eff
= CP_CLASS(wrapped_key_out
.dp_class
);
2281 /* TODO: When 12170074 fixed, release/invalidate the key! */
2286 new_key_len
= key_out
.key_len
;
2287 iv_key_len
= key_out
.iv_key_len
;
2288 new_persistent_len
= wrapped_key_out
.key_len
;
2290 /* Is the key a SEP wrapped key? */
2291 if (key_out
.flags
& CP_RAW_KEY_WRAPPEDKEY
) {
2298 * Step 2: allocate cprotect and initialize it.
2303 * v2 EA's don't support the larger class B keys
2305 if ((new_persistent_len
!= CP_V2_WRAPPEDKEYSIZE
) &&
2306 (hfsmp
->hfs_running_cp_major_vers
== CP_PREV_MAJOR_VERS
)) {
2310 entry
= cp_entry_alloc (new_persistent_len
);
2311 if (entry
== NULL
) {
2315 *output_entry
= entry
;
2318 * For directories and class F files, just store the effective new class.
2319 * AKS does not interact with us in generating keys for F files, and directories
2320 * don't actually have keys.
2322 if ( S_ISDIR (cmode
) || (newclass_eff
== PROTECTION_CLASS_F
)) {
2323 entry
->cp_pclass
= newclass_eff
;
2327 * otherwise, store what AKS actually returned back to us.
2328 * wrapped_key_out is only valid if we have round-tripped to AKS
2330 entry
->cp_pclass
= wrapped_key_out
.dp_class
;
2333 /* Copy the cache key & IV keys into place if needed. */
2334 if (new_key_len
> 0) {
2335 bcopy (new_key
, entry
->cp_cache_key
, new_key_len
);
2336 entry
->cp_cache_key_len
= new_key_len
;
2339 /* Initialize the IV key */
2340 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
2341 if (newclass_eff
== PROTECTION_CLASS_F
) {
2342 /* class F needs a full IV initialize */
2343 cp_setup_aes_ctx(entry
);
2346 /* Key store gave us an iv key. Just need to wrap it.*/
2347 aes_encrypt_key128(iv_key
, &entry
->cp_cache_iv_ctx
);
2349 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
2352 if (new_persistent_len
> 0) {
2353 bcopy(new_persistent_key
, entry
->cp_persistent_key
, new_persistent_len
);
2356 /* Mark it as a wrapped key if necessary */
2358 entry
->cp_flags
|= CP_SEP_WRAPPEDKEY
;
2365 /* Initialize the cp_cred_t structure passed to AKS */
2366 static void cp_init_access(cp_cred_t access
, struct cnode
*cp
)
2368 vfs_context_t context
= vfs_context_current();
2369 kauth_cred_t cred
= vfs_context_ucred(context
);
2370 proc_t proc
= vfs_context_proc(context
);
2372 bzero(access
, sizeof(*access
));
2374 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2375 access
->inode
= cp
->c_fileid
;
2376 access
->pid
= proc_pid(proc
);
2377 access
->uid
= kauth_cred_getuid(cred
);
2384 int cp_key_store_action(int action __unused
)
2390 int cp_register_wraps(cp_wrap_func_t key_store_func __unused
)
2395 #endif /* CONFIG_PROTECT */