2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <sys/cprotect.h>
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/uio_internal.h>
34 #include <sys/ubc_internal.h>
35 #include <sys/vnode_if.h>
36 #include <sys/vnode_internal.h>
37 #include <sys/fcntl.h>
38 #include <libkern/OSByteOrder.h>
40 #include <sys/kauth.h>
43 #include "hfs_cnode.h"
46 static struct cp_wrap_func g_cp_wrap_func
= {};
47 static struct cp_global_state g_cp_state
= {0, 0, 0};
49 extern int (**hfs_vnodeop_p
) (void *);
52 * CP private functions
54 static int cp_root_major_vers(mount_t mp
);
55 static int cp_getxattr(cnode_t
*, struct hfsmount
*hfsmp
, struct cprotect
**);
56 static struct cprotect
*cp_entry_alloc(size_t);
57 static void cp_entry_dealloc(struct cprotect
*entry
);
58 static int cp_restore_keys(struct cprotect
*, struct hfsmount
*hfsmp
, struct cnode
*);
59 static int cp_lock_vfs_callback(mount_t
, void *);
60 static int cp_lock_vnode_callback(vnode_t
, void *);
61 static int cp_vnode_is_eligible (vnode_t
);
62 static int cp_check_access (cnode_t
*, int);
63 static int cp_new(int newclass
, struct hfsmount
*hfsmp
, struct cnode
*cp
, mode_t cmode
, struct cprotect
**output_entry
);
64 static int cp_rewrap(struct cnode
*cp
, struct hfsmount
*hfsmp
, int newclass
);
65 static int cp_unwrap(struct hfsmount
*, struct cprotect
*, struct cnode
*);
66 static int cp_setup_aes_ctx(struct cprotect
*entry
);
67 static void cp_init_access(cp_cred_t access
, struct cnode
*cp
);
71 #if DEVELOPMENT || DEBUG
72 #define CP_ASSERT(x) \
74 panic("Content Protection: failed assertion in %s", __FUNCTION__); \
81 cp_key_store_action(int action
)
84 if (action
< 0 || action
> CP_MAX_STATE
) {
88 /* this truncates the upper 3 bytes */
89 g_cp_state
.lock_state
= (uint8_t)action
;
91 if (action
== CP_LOCKED_STATE
) {
93 * Upcast the value in 'action' to be a pointer-width unsigned integer.
94 * This avoids issues relating to pointer-width.
96 unsigned long action_arg
= (unsigned long) action
;
97 return vfs_iterate(0, cp_lock_vfs_callback
, (void*)action_arg
);
100 /* Do nothing on unlock events */
107 cp_register_wraps(cp_wrap_func_t key_store_func
)
109 g_cp_wrap_func
.new_key
= key_store_func
->new_key
;
110 g_cp_wrap_func
.unwrapper
= key_store_func
->unwrapper
;
111 g_cp_wrap_func
.rewrapper
= key_store_func
->rewrapper
;
112 /* do not use invalidater until rdar://12170050 goes in ! */
113 g_cp_wrap_func
.invalidater
= key_store_func
->invalidater
;
115 g_cp_state
.wrap_functions_set
= 1;
121 * Allocate and initialize a cprotect blob for a new cnode.
122 * Called from hfs_getnewvnode: cnode is locked exclusive.
124 * Read xattr data off the cnode. Then, if conditions permit,
125 * unwrap the file key and cache it in the cprotect blob.
128 cp_entry_init(struct cnode
*cp
, struct mount
*mp
)
130 struct cprotect
*entry
= NULL
;
132 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
135 * The cnode should be locked at this point, regardless of whether or not
136 * we are creating a new item in the namespace or vending a vnode on behalf
137 * of lookup. The only time we tell getnewvnode to skip the lock is when
138 * constructing a resource fork vnode. But a resource fork vnode must come
139 * after the regular data fork cnode has already been constructed.
141 if (!cp_fs_protected (mp
)) {
142 cp
->c_cpentry
= NULL
;
146 if (!S_ISREG(cp
->c_mode
) && !S_ISDIR(cp
->c_mode
)) {
147 cp
->c_cpentry
= NULL
;
151 if (!g_cp_state
.wrap_functions_set
) {
152 printf("hfs: cp_update_entry: wrap functions not yet set\n");
156 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
157 panic ("hfs cp: no running mount point version! ");
160 CP_ASSERT (cp
->c_cpentry
== NULL
);
162 error
= cp_getxattr(cp
, hfsmp
, &entry
);
165 * Success; attribute was found, though it may not have keys.
166 * If the entry is not returned without keys, we will delay generating
167 * keys until the first I/O.
169 if (S_ISREG(cp
->c_mode
)) {
170 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
171 entry
->cp_flags
&= ~CP_KEY_FLUSHED
;
174 entry
->cp_flags
|= CP_KEY_FLUSHED
;
178 else if (error
== ENOATTR
) {
180 * Normally, we should always have a CP EA for a file or directory that
181 * we are initializing here. However, there are some extenuating circumstances,
182 * such as the root directory immediately following a newfs_hfs.
184 * As a result, we leave code here to deal with an ENOATTR which will always
185 * default to a 'D/NONE' key, though we don't expect to use it much.
187 int target_class
= PROTECTION_CLASS_D
;
189 if (S_ISDIR(cp
->c_mode
)) {
190 target_class
= PROTECTION_CLASS_DIR_NONE
;
192 error
= cp_new (target_class
, hfsmp
, cp
, cp
->c_mode
, &entry
);
194 error
= cp_setxattr (cp
, entry
, hfsmp
, cp
->c_fileid
, XATTR_CREATE
);
200 * a) error was not ENOATTR (we got something bad from the getxattr call)
201 * b) we encountered an error setting the xattr above.
202 * c) we failed to generate a new cprotect data structure.
208 cp
->c_cpentry
= entry
;
212 entry
->cp_backing_cnode
= cp
;
216 cp_entry_destroy(entry
);
218 cp
->c_cpentry
= NULL
;
227 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
228 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
229 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
230 * and the file/directory is established, then we can ask it to generate keys. Note that
231 * this introduces a potential race; If the device is locked and the wrapping
232 * keys are purged between the time we call this function and the time we ask it to generate
233 * keys for us, we could have to fail the open(2) call and back out the entry.
236 int cp_setup_newentry (struct hfsmount
*hfsmp
, struct cnode
*dcp
, int32_t suppliedclass
,
237 mode_t cmode
, struct cprotect
**tmpentry
)
240 struct cprotect
*entry
= NULL
;
241 uint32_t target_class
= hfsmp
->default_cp_class
;
243 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
244 panic ("CP: major vers not set in mount!");
247 if (S_ISDIR (cmode
)) {
251 /* Decide the target class. Input argument takes priority. */
252 if (cp_is_valid_class (isdir
, suppliedclass
)) {
253 /* caller supplies -1 if it was not specified so we will default to the mount point value */
254 target_class
= suppliedclass
;
256 * One exception, F is never valid for a directory
257 * because its children may inherit and userland will be
258 * unable to read/write to the files.
261 if (target_class
== PROTECTION_CLASS_F
) {
269 * If no valid class was supplied, behave differently depending on whether or not
270 * the item being created is a file or directory.
273 * If parent directory has a non-zero class, use that.
274 * If parent directory has a zero class (not set), then attempt to
275 * apply the mount point default.
278 * Directories always inherit from the parent; if the parent
279 * has a NONE class set, then we can continue to use that.
281 if ((dcp
) && (dcp
->c_cpentry
)) {
282 uint32_t parentclass
= dcp
->c_cpentry
->cp_pclass
;
283 /* If the parent class is not valid, default to the mount point value */
284 if (cp_is_valid_class(1, parentclass
)) {
286 target_class
= parentclass
;
288 else if (parentclass
!= PROTECTION_CLASS_DIR_NONE
) {
289 /* files can inherit so long as it's not NONE */
290 target_class
= parentclass
;
293 /* Otherwise, we already defaulted to the mount point's default */
297 /* Generate the cprotect to vend out */
298 entry
= cp_entry_alloc (0);
305 * We don't have keys yet, so fill in what we can. At this point
306 * this blob has no keys and it has no backing xattr. We just know the
309 entry
->cp_flags
= (CP_NEEDS_KEYS
| CP_NO_XATTR
);
310 entry
->cp_pclass
= target_class
;
320 * Relay to caller whether or not the filesystem should generate temporary keys
321 * during resize operations.
324 int cp_needs_tempkeys (struct hfsmount
*hfsmp
, int *needs
)
327 if (hfsmp
->hfs_running_cp_major_vers
< CP_PREV_MAJOR_VERS
||
328 hfsmp
->hfs_running_cp_major_vers
> CP_NEW_MAJOR_VERS
) {
332 /* CP_NEW_MAJOR_VERS implies CP_OFF_IV_ENABLED */
333 if (hfsmp
->hfs_running_cp_major_vers
< CP_NEW_MAJOR_VERS
) {
345 * Set up an initial key/class pair for a disassociated cprotect entry.
346 * This function is used to generate transient keys that will never be
347 * written to disk. We use class F for this since it provides the exact
348 * semantics that are needed here. Because we never attach this blob to
349 * a cnode directly, we take a pointer to the cprotect struct.
351 * This function is primarily used in the HFS FS truncation codepath
352 * where we may rely on AES symmetry to relocate encrypted data from
353 * one spot in the disk to another.
355 int cp_entry_gentempkeys(struct cprotect
**entry_ptr
, struct hfsmount
*hfsmp
)
358 struct cprotect
*entry
= NULL
;
360 if (hfsmp
->hfs_running_cp_major_vers
< CP_NEW_MAJOR_VERS
) {
365 * This should only be used for files and won't be written out.
366 * We don't need a persistent key.
368 entry
= cp_entry_alloc (0);
373 entry
->cp_cache_key_len
= CP_MAX_KEYSIZE
;
374 entry
->cp_pclass
= PROTECTION_CLASS_F
;
375 entry
->cp_persistent_key_len
= 0;
377 /* Generate the class F key */
378 read_random (&entry
->cp_cache_key
[0], entry
->cp_cache_key_len
);
380 /* Generate the IV key */
381 cp_setup_aes_ctx(entry
);
382 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
390 * Tear down and clear a cprotect blob for a closing file.
391 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
394 cp_entry_destroy(struct cprotect
*entry_ptr
)
396 if (entry_ptr
== NULL
) {
397 /* nothing to clean up */
400 cp_entry_dealloc(entry_ptr
);
405 cp_fs_protected (mount_t mnt
)
407 return (vfs_flags(mnt
) & MNT_CPROTECT
);
412 * Return a pointer to underlying cnode if there is one for this vnode.
413 * Done without taking cnode lock, inspecting only vnode state.
416 cp_get_protected_cnode(struct vnode
*vp
)
418 if (!cp_vnode_is_eligible(vp
)) {
422 if (!cp_fs_protected(VTOVFS(vp
))) {
423 /* mount point doesn't support it */
427 return (struct cnode
*) vp
->v_data
;
432 * Sets *class to persistent class associated with vnode,
436 cp_vnode_getclass(struct vnode
*vp
, int *class)
438 struct cprotect
*entry
;
441 int took_truncate_lock
= 0;
442 struct hfsmount
*hfsmp
= NULL
;
444 /* Is this an interesting vp? */
445 if (!cp_vnode_is_eligible (vp
)) {
449 /* Is the mount point formatted for content protection? */
450 if (!cp_fs_protected(VTOVFS(vp
))) {
458 * Take the truncate lock up-front in shared mode because we may need
459 * to manipulate the CP blob. Pend lock events until we're done here.
461 hfs_lock_truncate (cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
462 took_truncate_lock
= 1;
465 * We take only the shared cnode lock up-front. If it turns out that
466 * we need to manipulate the CP blob to write a key out, drop the
467 * shared cnode lock and acquire an exclusive lock.
469 error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
471 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
475 /* pull the class from the live entry */
476 entry
= cp
->c_cpentry
;
479 panic("Content Protection: uninitialized cnode %p", cp
);
482 /* Note that we may not have keys yet, but we know the target class. */
485 *class = entry
->cp_pclass
;
488 if (took_truncate_lock
) {
489 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
498 * Sets persistent class for this file or directory.
499 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
500 * If the new class can't be accessed now, EPERM.
501 * Otherwise, record class and re-wrap key if the mount point is content-protected.
504 cp_vnode_setclass(struct vnode
*vp
, uint32_t newclass
)
507 struct cprotect
*entry
= 0;
509 int took_truncate_lock
= 0;
510 struct hfsmount
*hfsmp
= NULL
;
513 if (vnode_isdir (vp
)) {
517 if (!cp_is_valid_class(isdir
, newclass
)) {
518 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass
);
522 /* Is this an interesting vp? */
523 if (!cp_vnode_is_eligible(vp
)) {
527 /* Is the mount point formatted for content protection? */
528 if (!cp_fs_protected(VTOVFS(vp
))) {
533 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
538 * Take the cnode truncate lock exclusive because we want to manipulate the
539 * CP blob. The lock-event handling code is doing the same. This also forces
540 * all pending IOs to drain before we can re-write the persistent and cache keys.
543 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
544 took_truncate_lock
= 1;
546 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
550 entry
= cp
->c_cpentry
;
557 * re-wrap per-file key with new class.
558 * Generate an entirely new key if switching to F.
560 if (vnode_isreg(vp
)) {
562 * The vnode is a file. Before proceeding with the re-wrap, we need
563 * to unwrap the keys before proceeding. This is to ensure that
564 * the destination class's properties still work appropriately for the
565 * target class (since B allows I/O but an unwrap prior to the next unlock
566 * will not be allowed).
568 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
569 error
= cp_restore_keys (entry
, hfsmp
, cp
);
574 if (newclass
== PROTECTION_CLASS_F
) {
575 /* Verify that file is blockless if switching to class F */
576 if (cp
->c_datafork
->ff_size
> 0) {
581 entry
->cp_pclass
= newclass
;
582 entry
->cp_cache_key_len
= CP_MAX_KEYSIZE
;
583 read_random (&entry
->cp_cache_key
[0], entry
->cp_cache_key_len
);
584 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
585 cp_setup_aes_ctx (entry
);
586 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
588 bzero(entry
->cp_persistent_key
, entry
->cp_persistent_key_len
);
589 entry
->cp_persistent_key_len
= 0;
591 /* Deny the setclass if file is to be moved from F to something else */
592 if (entry
->cp_pclass
== PROTECTION_CLASS_F
) {
596 /* We cannot call cp_rewrap unless the keys were already in existence. */
597 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
598 struct cprotect
*newentry
= NULL
;
599 error
= cp_generate_keys (hfsmp
, cp
, newclass
, &newentry
);
601 cp_replace_entry (cp
, newentry
);
603 /* Bypass the setxattr code below since generate_keys does it for us */
607 error
= cp_rewrap(cp
, hfsmp
, newclass
);
611 /* we didn't have perms to set this class. leave file as-is and error out */
615 else if (vnode_isdir(vp
)) {
616 /* For directories, just update the pclass */
617 entry
->cp_pclass
= newclass
;
621 /* anything else, just error out */
627 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
628 * existed. If the keys were never generated, then they'll skip the setxattr calls.
631 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_REPLACE
);
632 if (error
== ENOATTR
) {
633 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_CREATE
);
638 if (took_truncate_lock
) {
639 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
646 int cp_vnode_transcode(vnode_t vp
)
649 struct cprotect
*entry
= 0;
651 int took_truncate_lock
= 0;
652 struct hfsmount
*hfsmp
= NULL
;
654 /* Structures passed between HFS and AKS */
656 cp_wrapped_key_s wrapped_key_in
;
658 /* Is this an interesting vp? */
659 if (!cp_vnode_is_eligible(vp
)) {
663 /* Is the mount point formatted for content protection? */
664 if (!cp_fs_protected(VTOVFS(vp
))) {
672 * Take the cnode truncate lock exclusive because we want to manipulate the
673 * CP blob. The lock-event handling code is doing the same. This also forces
674 * all pending IOs to drain before we can re-write the persistent and cache keys.
676 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
677 took_truncate_lock
= 1;
679 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
683 entry
= cp
->c_cpentry
;
689 if ((entry
->cp_flags
& CP_NEEDS_KEYS
)) {
691 * If we are transcoding keys for AKB, then we should have already established
692 * a set of keys for this vnode. IF we don't have keys yet, then something bad
699 /* Send the per-file key in wrapped form for re-wrap with the current class information
700 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
701 * Don't need to process any outputs, so just clear the locks and pass along the error. */
702 if (vnode_isreg(vp
)) {
704 /* Picked up the following from cp_wrap().
705 * If needed, more comments available there. */
707 if (entry
->cp_pclass
== PROTECTION_CLASS_F
) {
712 cp_init_access(&access_in
, cp
);
714 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
715 wrapped_key_in
.key
= entry
->cp_persistent_key
;
716 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
717 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
719 error
= g_cp_wrap_func
.rewrapper(&access_in
,
729 if (took_truncate_lock
) {
730 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
738 * Check permission for the given operation (read, write) on this node.
739 * Additionally, if the node needs work, do it:
740 * - create a new key for the file if one hasn't been set before
741 * - write out the xattr if it hasn't already been saved
742 * - unwrap the key if needed
744 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
746 * Note that this function does *NOT* take the cnode truncate lock. This is because
747 * the thread calling us may already have the truncate lock. It is not necessary
748 * because either we successfully finish this function before the keys are tossed
749 * and the IO will fail, or the keys are tossed and then this function will fail.
750 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
751 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
754 cp_handle_vnop(struct vnode
*vp
, int vnop
, int ioflag
)
756 struct cprotect
*entry
;
758 struct hfsmount
*hfsmp
= NULL
;
759 struct cnode
*cp
= NULL
;
762 * First, do validation against the vnode before proceeding any further:
763 * Is this vnode originating from a valid content-protected filesystem ?
765 if (cp_vnode_is_eligible(vp
) == 0) {
767 * It is either not HFS or not a file/dir. Just return success. This is a valid
768 * case if servicing i/o against another filesystem type from VFS
773 if (cp_fs_protected (VTOVFS(vp
)) == 0) {
775 * The underlying filesystem does not support content protection. This is also
776 * a valid case. Simply return success.
782 * At this point, we know we have a HFS vnode that backs a file or directory on a
783 * filesystem that supports content protection
787 if ((error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
))) {
791 entry
= cp
->c_cpentry
;
795 * If this cnode is not content protected, simply return success.
796 * Note that this function is called by all I/O-based call sites
797 * when CONFIG_PROTECT is enabled during XNU building.
801 * All files should have cprotect structs. It's possible to encounter
802 * a directory from a V2.0 CP system but all files should have protection
805 if (vnode_isreg(vp
)) {
823 if ((error
= cp_check_access(cp
, vnop
))) {
824 /* check for raw encrypted access before bailing out */
825 if ((vnop
== CP_READ_ACCESS
) && (ioflag
& IO_ENCRYPTED
)) {
827 * read access only + asking for the raw encrypted bytes
828 * is legitimate, so reset the error value to 0
837 if (entry
->cp_flags
== 0) {
838 /* no more work to do */
842 /* upgrade to exclusive lock */
843 if (lck_rw_lock_shared_to_exclusive(&cp
->c_rwlock
) == FALSE
) {
844 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
848 cp
->c_lockowner
= current_thread();
851 /* generate new keys if none have ever been saved */
852 if ((entry
->cp_flags
& CP_NEEDS_KEYS
)) {
853 struct cprotect
*newentry
= NULL
;
854 error
= cp_generate_keys (hfsmp
, cp
, cp
->c_cpentry
->cp_pclass
, &newentry
);
856 cp_replace_entry (cp
, newentry
);
864 /* unwrap keys if needed */
865 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
866 if ((vnop
== CP_READ_ACCESS
) && (ioflag
& IO_ENCRYPTED
)) {
867 /* no need to try to restore keys; they are not going to be used */
871 error
= cp_restore_keys(entry
, hfsmp
, cp
);
878 /* write out the xattr if it's new */
879 if (entry
->cp_flags
& CP_NO_XATTR
)
880 error
= cp_setxattr(cp
, entry
, VTOHFS(cp
->c_vp
), 0, XATTR_CREATE
);
890 cp_handle_open(struct vnode
*vp
, int mode
)
892 struct cnode
*cp
= NULL
;
893 struct cprotect
*entry
= NULL
;
894 struct hfsmount
*hfsmp
;
897 /* If vnode not eligible, just return success */
898 if (!cp_vnode_is_eligible(vp
)) {
902 /* If mount point not properly set up, then also return success */
903 if (!cp_fs_protected(VTOVFS(vp
))) {
907 /* We know the vnode is in a valid state. acquire cnode and validate */
911 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
915 entry
= cp
->c_cpentry
;
918 * If the mount is protected and we couldn't get a cprotect for this vnode,
919 * then it's not valid for opening.
921 if (vnode_isreg(vp
)) {
927 if (!S_ISREG(cp
->c_mode
))
931 * Does the cnode have keys yet? If not, then generate them.
933 if (entry
->cp_flags
& CP_NEEDS_KEYS
) {
934 struct cprotect
*newentry
= NULL
;
935 error
= cp_generate_keys (hfsmp
, cp
, cp
->c_cpentry
->cp_pclass
, &newentry
);
937 cp_replace_entry (cp
, newentry
);
946 * We want to minimize the number of unwraps that we'll have to do since
947 * the cost can vary, depending on the platform we're running.
949 switch (entry
->cp_pclass
) {
950 case PROTECTION_CLASS_B
:
951 if (mode
& O_CREAT
) {
953 * Class B always allows creation. Since O_CREAT was passed through
954 * we infer that this was a newly created vnode/cnode. Even though a potential
955 * race exists when multiple threads attempt to create/open a particular
956 * file, only one can "win" and actually create it. VFS will unset the
957 * O_CREAT bit on the loser.
959 * Note that skipping the unwrap check here is not a security issue --
960 * we have to unwrap the key permanently upon the first I/O.
965 if ((entry
->cp_flags
& CP_KEY_FLUSHED
) == 0) {
967 * For a class B file, attempt the unwrap if we have the key in
969 * The device could have just transitioned into the lock state, and
970 * this vnode may not yet have been purged from the vnode cache (which would
974 cp_wrapped_key_s wrapped_key_in
;
976 cp_init_access(&access_in
, cp
);
977 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
978 wrapped_key_in
.key
= entry
->cp_persistent_key
;
979 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
980 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
981 error
= g_cp_wrap_func
.unwrapper(&access_in
, &wrapped_key_in
, NULL
);
987 /* otherwise, fall through to attempt the unwrap/restore */
988 case PROTECTION_CLASS_A
:
989 case PROTECTION_CLASS_C
:
991 * At this point, we know that we need to attempt an unwrap if needed; we want
992 * to makes sure that open(2) fails properly if the device is either just-locked
993 * or never made it past first unlock. Since the keybag serializes access to the
994 * unwrapping keys for us and only calls our VFS callback once they've been purged,
995 * we will get here in two cases:
997 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
998 * purged, the vnode will get flushed if needed.
1000 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1002 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1003 * we can always attempt the restore.
1005 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
1006 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1015 case PROTECTION_CLASS_D
:
1027 * During hfs resize operations, we have slightly different constraints than during
1028 * normal VNOPS that read/write data to files. Specifically, we already have the cnode
1029 * locked (so nobody else can modify it), and we are doing the IO with root privileges, since
1030 * we are moving the data behind the user's back. So, we skip access checks here (for unlock
1031 * vs. lock), and don't worry about non-existing keys. If the file exists on-disk with valid
1032 * payload, then it must have keys set up already by definition.
1035 cp_handle_relocate (struct cnode
*cp
, struct hfsmount
*hfsmp
)
1037 struct cprotect
*entry
;
1040 /* cp is already locked */
1041 entry
= cp
->c_cpentry
;
1046 * Still need to validate whether to permit access to the file or not
1047 * based on lock status
1049 if ((error
= cp_check_access(cp
, CP_READ_ACCESS
| CP_WRITE_ACCESS
))) {
1053 if (entry
->cp_flags
== 0) {
1054 /* no more work to do */
1059 /* it must have keys since it is an existing file with actual payload */
1061 /* unwrap keys if needed */
1062 if (entry
->cp_flags
& CP_KEY_FLUSHED
) {
1063 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1067 * Don't need to write out the EA since if the file has actual extents,
1068 * it must have an EA
1072 /* return the cp still locked */
1078 * Gets the EA we set on the root folder (fileid 1) to get information about the
1079 * version of Content Protection that was used to write to this filesystem.
1080 * Note that all multi-byte fields are written to disk little endian so they must be
1081 * converted to native endian-ness as needed.
1084 cp_getrootxattr(struct hfsmount
* hfsmp
, struct cp_root_xattr
*outxattr
)
1087 char uio_buf
[UIO_SIZEOF(1)];
1088 size_t attrsize
= sizeof(struct cp_root_xattr
);
1090 struct vnop_getxattr_args args
;
1093 panic("Content Protection: cp_xattr called with xattr == NULL");
1096 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
1097 uio_addiov(auio
, CAST_USER_ADDR_T(outxattr
), attrsize
);
1099 args
.a_desc
= NULL
; // unused
1100 args
.a_vp
= NULL
; //unused since we're writing EA to root folder.
1101 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1103 args
.a_size
= &attrsize
;
1104 args
.a_options
= XATTR_REPLACE
;
1105 args
.a_context
= NULL
; // unused
1107 error
= hfs_getxattr_internal(NULL
, &args
, hfsmp
, 1);
1109 /* Now convert the multi-byte fields to native endianness */
1110 outxattr
->major_version
= OSSwapLittleToHostInt16(outxattr
->major_version
);
1111 outxattr
->minor_version
= OSSwapLittleToHostInt16(outxattr
->minor_version
);
1112 outxattr
->flags
= OSSwapLittleToHostInt64(outxattr
->flags
);
1125 * Sets the EA we set on the root folder (fileid 1) to get information about the
1126 * version of Content Protection that was used to write to this filesystem.
1127 * Note that all multi-byte fields are written to disk little endian so they must be
1128 * converted to little endian as needed.
1130 * This will be written to the disk when it detects the EA is not there, or when we need
1131 * to make a modification to the on-disk version that can be done in-place.
1134 cp_setrootxattr(struct hfsmount
*hfsmp
, struct cp_root_xattr
*newxattr
)
1137 struct vnop_setxattr_args args
;
1141 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1142 args
.a_uio
= NULL
; //pass data ptr instead
1144 args
.a_context
= NULL
; //no context needed, only done from mount.
1146 /* Now convert the multi-byte fields to little endian before writing to disk. */
1147 newxattr
->major_version
= OSSwapHostToLittleInt16(newxattr
->major_version
);
1148 newxattr
->minor_version
= OSSwapHostToLittleInt16(newxattr
->minor_version
);
1149 newxattr
->flags
= OSSwapHostToLittleInt64(newxattr
->flags
);
1151 error
= hfs_setxattr_internal(NULL
, (caddr_t
)newxattr
,
1152 sizeof(struct cp_root_xattr
), &args
, hfsmp
, 1);
1158 * Stores new xattr data on the cnode.
1159 * cnode lock held exclusive (if available).
1161 * This function is also invoked during file creation.
1163 int cp_setxattr(struct cnode
*cp
, struct cprotect
*entry
, struct hfsmount
*hfsmp
, uint32_t fileid
, int options
)
1167 struct vnop_setxattr_args args
;
1168 uint32_t target_fileid
;
1169 struct cnode
*arg_cp
= NULL
;
1170 uint32_t tempflags
= 0;
1174 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1179 args
.a_vp
= cp
->c_vp
;
1185 * When we set the EA in the same txn as the file creation,
1186 * we do not have a vnode/cnode yet. Use the specified fileid.
1189 target_fileid
= fileid
;
1191 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1192 args
.a_uio
= NULL
; //pass data ptr instead
1193 args
.a_options
= options
;
1194 args
.a_context
= vfs_context_current();
1196 /* Note that it's OK to write out an XATTR without keys. */
1197 /* Disable flags that will be invalid as we're writing the EA out at this point. */
1198 tempflags
= entry
->cp_flags
;
1199 tempflags
&= ~CP_NO_XATTR
;
1201 switch(hfsmp
->hfs_running_cp_major_vers
) {
1202 case CP_NEW_MAJOR_VERS
: {
1203 struct cp_xattr_v4
*newxattr
= NULL
; // 70+ bytes; don't alloc on stack.
1204 MALLOC (newxattr
, struct cp_xattr_v4
*, sizeof(struct cp_xattr_v4
), M_TEMP
, M_WAITOK
);
1205 if (newxattr
== NULL
) {
1209 bzero (newxattr
, sizeof(struct cp_xattr_v4
));
1211 attrsize
= sizeof(*newxattr
) - CP_MAX_WRAPPEDKEYSIZE
+ entry
->cp_persistent_key_len
;
1213 /* Endian swap the multi-byte fields into L.E from host. */
1214 newxattr
->xattr_major_version
= OSSwapHostToLittleInt16 (hfsmp
->hfs_running_cp_major_vers
);
1215 newxattr
->xattr_minor_version
= OSSwapHostToLittleInt16(CP_MINOR_VERS
);
1216 newxattr
->key_size
= OSSwapHostToLittleInt32(entry
->cp_persistent_key_len
);
1217 newxattr
->flags
= OSSwapHostToLittleInt32(tempflags
);
1218 newxattr
->persistent_class
= OSSwapHostToLittleInt32(entry
->cp_pclass
);
1219 bcopy(entry
->cp_persistent_key
, newxattr
->persistent_key
, entry
->cp_persistent_key_len
);
1221 error
= hfs_setxattr_internal(arg_cp
, (caddr_t
)newxattr
, attrsize
, &args
, hfsmp
, target_fileid
);
1223 FREE(newxattr
, M_TEMP
);
1226 case CP_PREV_MAJOR_VERS
: {
1227 struct cp_xattr_v2
*newxattr
= NULL
;
1228 MALLOC (newxattr
, struct cp_xattr_v2
*, sizeof(struct cp_xattr_v2
), M_TEMP
, M_WAITOK
);
1229 if (newxattr
== NULL
) {
1233 bzero (newxattr
, sizeof(struct cp_xattr_v2
));
1235 attrsize
= sizeof(*newxattr
);
1237 /* Endian swap the multi-byte fields into L.E from host. */
1238 newxattr
->xattr_major_version
= OSSwapHostToLittleInt16(hfsmp
->hfs_running_cp_major_vers
);
1239 newxattr
->xattr_minor_version
= OSSwapHostToLittleInt16(CP_MINOR_VERS
);
1240 newxattr
->key_size
= OSSwapHostToLittleInt32(entry
->cp_persistent_key_len
);
1241 newxattr
->flags
= OSSwapHostToLittleInt32(tempflags
);
1242 newxattr
->persistent_class
= OSSwapHostToLittleInt32(entry
->cp_pclass
);
1243 bcopy(entry
->cp_persistent_key
, newxattr
->persistent_key
, entry
->cp_persistent_key_len
);
1245 error
= hfs_setxattr_internal(arg_cp
, (caddr_t
)newxattr
, attrsize
, &args
, hfsmp
, target_fileid
);
1247 FREE (newxattr
, M_TEMP
);
1251 printf("hfs: cp_setxattr: Unknown CP version running \n");
1256 entry
->cp_flags
&= ~CP_NO_XATTR
;
1265 * Used by an fcntl to query the underlying FS for its content protection version #
1269 cp_get_root_major_vers(vnode_t vp
, uint32_t *level
)
1272 struct hfsmount
*hfsmp
= NULL
;
1273 struct mount
*mp
= NULL
;
1277 /* check if it supports content protection */
1278 if (cp_fs_protected(mp
) == 0) {
1282 hfsmp
= VFSTOHFS(mp
);
1283 /* figure out the level */
1285 err
= cp_root_major_vers(mp
);
1288 *level
= hfsmp
->hfs_running_cp_major_vers
;
1290 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1295 /* Used by fcntl to query default protection level of FS */
1296 int cp_get_default_level (struct vnode
*vp
, uint32_t *level
) {
1298 struct hfsmount
*hfsmp
= NULL
;
1299 struct mount
*mp
= NULL
;
1303 /* check if it supports content protection */
1304 if (cp_fs_protected(mp
) == 0) {
1308 hfsmp
= VFSTOHFS(mp
);
1309 /* figure out the default */
1311 *level
= hfsmp
->default_cp_class
;
1315 /********************
1317 *******************/
1320 cp_root_major_vers(mount_t mp
)
1323 struct cp_root_xattr xattr
;
1324 struct hfsmount
*hfsmp
= NULL
;
1326 hfsmp
= vfs_fsprivate(mp
);
1327 err
= cp_getrootxattr (hfsmp
, &xattr
);
1330 hfsmp
->hfs_running_cp_major_vers
= xattr
.major_version
;
1340 cp_vnode_is_eligible(struct vnode
*vp
)
1342 return ((vp
->v_op
== hfs_vnodeop_p
) &&
1343 (!vnode_issystem(vp
)) &&
1344 (vnode_isreg(vp
) || vnode_isdir(vp
)));
1350 cp_is_valid_class(int isdir
, int32_t protectionclass
)
1353 * The valid protection classes are from 0 -> N
1354 * We use a signed argument to detect unassigned values from
1355 * directory entry creation time in HFS.
1358 /* Directories are not allowed to have F, but they can have "NONE" */
1359 return ((protectionclass
>= PROTECTION_CLASS_DIR_NONE
) &&
1360 (protectionclass
<= PROTECTION_CLASS_D
));
1363 return ((protectionclass
>= PROTECTION_CLASS_A
) &&
1364 (protectionclass
<= PROTECTION_CLASS_F
));
1369 static struct cprotect
*
1370 cp_entry_alloc(size_t keylen
)
1372 struct cprotect
*cp_entry
;
1374 if (keylen
> CP_MAX_WRAPPEDKEYSIZE
)
1377 MALLOC(cp_entry
, struct cprotect
*, sizeof(struct cprotect
) + keylen
,
1379 if (cp_entry
== NULL
)
1382 bzero(cp_entry
, sizeof(*cp_entry
) + keylen
);
1383 cp_entry
->cp_persistent_key_len
= keylen
;
1388 cp_entry_dealloc(struct cprotect
*entry
)
1390 uint32_t keylen
= entry
->cp_persistent_key_len
;
1391 bzero(entry
, (sizeof(*entry
) + keylen
));
1392 FREE(entry
, M_TEMP
);
1397 * Initializes a new cprotect entry with xattr data from the cnode.
1398 * cnode lock held shared
1401 cp_getxattr(struct cnode
*cp
, struct hfsmount
*hfsmp
, struct cprotect
**outentry
)
1406 char uio_buf
[UIO_SIZEOF(1)];
1407 struct vnop_getxattr_args args
;
1408 struct cprotect
*entry
= NULL
;
1410 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
1411 args
.a_desc
= NULL
; // unused
1412 args
.a_vp
= cp
->c_vp
;
1413 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1415 args
.a_options
= XATTR_REPLACE
;
1416 args
.a_context
= vfs_context_current(); // unused
1418 switch (hfsmp
->hfs_running_cp_major_vers
) {
1419 case CP_NEW_MAJOR_VERS
: {
1420 struct cp_xattr_v4
*xattr
= NULL
;
1421 MALLOC (xattr
, struct cp_xattr_v4
*, sizeof(struct cp_xattr_v4
), M_TEMP
, M_WAITOK
);
1422 if (xattr
== NULL
) {
1426 bzero(xattr
, sizeof (struct cp_xattr_v4
));
1427 attrsize
= sizeof(*xattr
);
1429 uio_addiov(auio
, CAST_USER_ADDR_T(xattr
), attrsize
);
1430 args
.a_size
= &attrsize
;
1432 error
= hfs_getxattr_internal(cp
, &args
, VTOHFS(cp
->c_vp
), 0);
1434 FREE (xattr
, M_TEMP
);
1438 /* Endian swap the multi-byte fields into host endianness from L.E. */
1439 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1440 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1441 xattr
->key_size
= OSSwapLittleToHostInt32(xattr
->key_size
);
1442 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1443 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1445 if (xattr
->xattr_major_version
!= hfsmp
->hfs_running_cp_major_vers
) {
1446 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1447 xattr
->xattr_major_version
, hfsmp
->hfs_running_cp_major_vers
);
1449 FREE (xattr
, M_TEMP
);
1454 * Prevent a buffer overflow, and validate the key length obtained from the
1455 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1458 if (xattr
->key_size
> CP_MAX_WRAPPEDKEYSIZE
) {
1460 FREE (xattr
, M_TEMP
);
1466 * Class F files have no backing key; their keylength should be 0,
1467 * though they should have the proper flags set.
1469 * A request to instantiate a CP for a class F file should result
1470 * in a bzero'd cp that just says class F, with key_flushed set.
1473 /* set up entry with information from xattr */
1474 entry
= cp_entry_alloc(xattr
->key_size
);
1476 FREE (xattr
, M_TEMP
);
1481 entry
->cp_pclass
= xattr
->persistent_class
;
1484 * Suppress invalid flags that should not be set.
1485 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1486 * be valid; the EA exists.
1488 xattr
->flags
&= ~CP_NO_XATTR
;
1490 entry
->cp_flags
= xattr
->flags
;
1491 if (xattr
->xattr_major_version
>= CP_NEW_MAJOR_VERS
) {
1492 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
1495 if (entry
->cp_pclass
!= PROTECTION_CLASS_F
) {
1496 bcopy(xattr
->persistent_key
, entry
->cp_persistent_key
, xattr
->key_size
);
1499 FREE (xattr
, M_TEMP
);
1503 case CP_PREV_MAJOR_VERS
: {
1504 struct cp_xattr_v2
*xattr
= NULL
;
1505 MALLOC (xattr
, struct cp_xattr_v2
*, sizeof(struct cp_xattr_v2
), M_TEMP
, M_WAITOK
);
1506 if (xattr
== NULL
) {
1510 bzero (xattr
, sizeof (struct cp_xattr_v2
));
1511 attrsize
= sizeof(*xattr
);
1513 uio_addiov(auio
, CAST_USER_ADDR_T(xattr
), attrsize
);
1514 args
.a_size
= &attrsize
;
1516 error
= hfs_getxattr_internal(cp
, &args
, VTOHFS(cp
->c_vp
), 0);
1518 FREE (xattr
, M_TEMP
);
1522 /* Endian swap the multi-byte fields into host endianness from L.E. */
1523 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1524 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1525 xattr
->key_size
= OSSwapLittleToHostInt32(xattr
->key_size
);
1526 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1527 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1529 if (xattr
->xattr_major_version
!= hfsmp
->hfs_running_cp_major_vers
) {
1530 printf("hfs: cp_getxattr: bad xattr version %d expecting %d\n",
1531 xattr
->xattr_major_version
, hfsmp
->hfs_running_cp_major_vers
);
1533 FREE (xattr
, M_TEMP
);
1538 * Prevent a buffer overflow, and validate the key length obtained from the
1539 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1542 if (xattr
->key_size
> CP_V2_WRAPPEDKEYSIZE
) {
1544 FREE (xattr
, M_TEMP
);
1547 /* set up entry with information from xattr */
1548 entry
= cp_entry_alloc(xattr
->key_size
);
1550 FREE (xattr
, M_TEMP
);
1554 entry
->cp_pclass
= xattr
->persistent_class
;
1557 * Suppress invalid flags that should not be set.
1558 * If we have gotten this far, then CP_NO_XATTR cannot possibly
1559 * be valid; the EA exists.
1561 xattr
->flags
&= ~CP_NO_XATTR
;
1563 entry
->cp_flags
= xattr
->flags
;
1565 if (entry
->cp_pclass
!= PROTECTION_CLASS_F
) {
1566 bcopy(xattr
->persistent_key
, entry
->cp_persistent_key
, xattr
->key_size
);
1569 FREE (xattr
, M_TEMP
);
1582 * If permitted, restore entry's unwrapped key from the persistent key.
1583 * If not, clear key and set CP_KEY_FLUSHED.
1584 * cnode lock held exclusive
1587 cp_restore_keys(struct cprotect
*entry
, struct hfsmount
*hfsmp
, struct cnode
*cp
)
1591 error
= cp_unwrap(hfsmp
, entry
, cp
);
1593 entry
->cp_flags
|= CP_KEY_FLUSHED
;
1594 bzero(entry
->cp_cache_key
, entry
->cp_cache_key_len
);
1598 /* ready for business */
1599 entry
->cp_flags
&= ~CP_KEY_FLUSHED
;
1606 cp_lock_vfs_callback(mount_t mp
, void *arg
)
1609 /* Use a pointer-width integer field for casting */
1610 unsigned long new_state
;
1613 * When iterating the various mount points that may
1614 * be present on a content-protected device, we need to skip
1615 * those that do not have it enabled.
1617 if (!cp_fs_protected(mp
)) {
1621 new_state
= (unsigned long) arg
;
1622 if (new_state
== CP_LOCKED_STATE
) {
1624 * We respond only to lock events. Since cprotect structs
1625 * decrypt/restore keys lazily, the unlock events don't
1626 * actually cause anything to happen.
1628 return vnode_iterate(mp
, 0, cp_lock_vnode_callback
, arg
);
1630 /* Otherwise just return 0. */
1637 * Deny access to protected files if keys have been locked.
1640 cp_check_access(struct cnode
*cp
, int vnop __unused
)
1644 if (g_cp_state
.lock_state
== CP_UNLOCKED_STATE
) {
1648 if (!cp
->c_cpentry
) {
1649 /* unprotected node */
1653 if (!S_ISREG(cp
->c_mode
)) {
1657 /* Deny all access for class A files */
1658 switch (cp
->c_cpentry
->cp_pclass
) {
1659 case PROTECTION_CLASS_A
: {
1672 * Respond to a lock or unlock event.
1673 * On lock: clear out keys from memory, then flush file contents.
1674 * On unlock: nothing (function not called).
1677 cp_lock_vnode_callback(struct vnode
*vp
, void *arg
)
1680 struct cprotect
*entry
= NULL
;
1683 unsigned long action
= 0;
1684 int took_truncate_lock
= 0;
1686 error
= vnode_getwithref (vp
);
1694 * When cleaning cnodes due to a lock event, we must
1695 * take the truncate lock AND the cnode lock. By taking
1696 * the truncate lock here, we force (nearly) all pending IOs
1697 * to drain before we can acquire the truncate lock. All HFS cluster
1698 * io calls except for swapfile IO need to acquire the truncate lock
1699 * prior to calling into the cluster layer.
1701 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1702 took_truncate_lock
= 1;
1704 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
1706 entry
= cp
->c_cpentry
;
1708 /* unprotected vnode: not a regular file */
1712 action
= (unsigned long) arg
;
1714 case CP_LOCKED_STATE
: {
1716 if (entry
->cp_pclass
!= PROTECTION_CLASS_A
||
1719 * There is no change at lock for other classes than A.
1720 * B is kept in memory for writing, and class F (for VM) does
1721 * not have a wrapped key, so there is no work needed for
1722 * wrapping/unwrapping.
1724 * Note that 'class F' is relevant here because if
1725 * hfs_vnop_strategy does not take the cnode lock
1726 * to protect the cp blob across IO operations, we rely
1727 * implicitly on the truncate lock to be held when doing IO.
1728 * The only case where the truncate lock is not held is during
1729 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1730 * directly to cluster_pageout.
1735 /* Before doing anything else, zero-fill sparse ranges as needed */
1736 ctx
= vfs_context_current();
1737 (void) hfs_filedone (vp
, ctx
);
1739 /* first, sync back dirty pages */
1741 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
| UBC_INVALIDATE
| UBC_SYNC
);
1742 hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
1745 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
1746 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
1747 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
1748 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
1749 * Also verified that the cached data in IOFS is overwritten by other data, and there
1750 * is no key leakage in that layer.
1753 entry
->cp_flags
|= CP_KEY_FLUSHED
;
1754 bzero(&entry
->cp_cache_key
, entry
->cp_cache_key_len
);
1755 bzero(&entry
->cp_cache_iv_ctx
, sizeof(aes_encrypt_ctx
));
1757 /* some write may have arrived in the mean time. dump those pages */
1761 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_INVALIDATE
| UBC_SYNC
);
1764 case CP_UNLOCKED_STATE
: {
1769 panic("Content Protection: unknown lock action %lu\n", action
);
1777 if (took_truncate_lock
) {
1778 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
1789 * Generate a new wrapped key based on the existing cache key.
1793 cp_rewrap(struct cnode
*cp
, struct hfsmount
*hfsmp
, int newclass
)
1796 struct cprotect
*entry
= cp
->c_cpentry
;
1797 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
1798 size_t keylen
= CP_MAX_WRAPPEDKEYSIZE
;
1801 /* Structures passed between HFS and AKS */
1802 cp_cred_s access_in
;
1803 cp_wrapped_key_s wrapped_key_in
;
1804 cp_wrapped_key_s wrapped_key_out
;
1807 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1808 * key that is only good as long as the file is open. There is no
1809 * wrapped key, so there isn't anything to wrap.
1811 if (newclass
== PROTECTION_CLASS_F
) {
1815 cp_init_access(&access_in
, cp
);
1817 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1818 wrapped_key_in
.key
= entry
->cp_persistent_key
;
1819 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
1820 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1822 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
1823 wrapped_key_out
.key
= new_persistent_key
;
1824 wrapped_key_out
.key_len
= keylen
;
1827 * inode is passed here to find the backup bag wrapped blob
1828 * from userspace. This lookup will occur shortly after creation
1829 * and only if the file still exists. Beyond this lookup the
1830 * inode is not used. Technically there is a race, we practically
1833 error
= g_cp_wrap_func
.rewrapper(&access_in
,
1834 newclass
, /* new class */
1838 keylen
= wrapped_key_out
.key_len
;
1841 struct cprotect
*newentry
= NULL
;
1843 * v2 EA's don't support the larger class B keys
1845 if ((keylen
!= CP_V2_WRAPPEDKEYSIZE
) &&
1846 (hfsmp
->hfs_running_cp_major_vers
== CP_PREV_MAJOR_VERS
)) {
1850 /* Allocate a new cpentry */
1851 newentry
= cp_entry_alloc (keylen
);
1852 bcopy (entry
, newentry
, sizeof(struct cprotect
));
1854 /* copy the new key into the entry */
1855 bcopy (new_persistent_key
, newentry
->cp_persistent_key
, keylen
);
1856 newentry
->cp_persistent_key_len
= keylen
;
1857 newentry
->cp_backing_cnode
= cp
;
1858 newentry
->cp_pclass
= newclass
;
1860 /* Attach the new entry to the cnode */
1861 cp
->c_cpentry
= newentry
;
1863 /* destroy the old entry */
1864 cp_entry_destroy (entry
);
1875 cp_unwrap(struct hfsmount
*hfsmp
, struct cprotect
*entry
, struct cnode
*cp
)
1878 uint8_t iv_key
[CP_IV_KEYSIZE
];
1880 /* Structures passed between HFS and AKS */
1881 cp_cred_s access_in
;
1882 cp_wrapped_key_s wrapped_key_in
;
1883 cp_raw_key_s key_out
;
1886 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
1887 * key that is only good as long as the file is open. There is no
1888 * wrapped key, so there isn't anything to unwrap.
1890 if (entry
->cp_pclass
== PROTECTION_CLASS_F
) {
1894 cp_init_access(&access_in
, cp
);
1896 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1897 wrapped_key_in
.key
= entry
->cp_persistent_key
;
1898 wrapped_key_in
.key_len
= entry
->cp_persistent_key_len
;
1899 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1901 bzero(&key_out
, sizeof(key_out
));
1902 key_out
.key
= entry
->cp_cache_key
;
1903 key_out
.key_len
= CP_MAX_KEYSIZE
;
1904 key_out
.iv_key
= iv_key
;
1905 key_out
.iv_key_len
= CP_IV_KEYSIZE
;
1907 error
= g_cp_wrap_func
.unwrapper(&access_in
, &wrapped_key_in
, &key_out
);
1909 entry
->cp_cache_key_len
= key_out
.key_len
;
1911 /* No need to go here for older EAs */
1912 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
1913 aes_encrypt_key128(iv_key
, &entry
->cp_cache_iv_ctx
);
1914 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
1923 /* Setup AES context */
1925 cp_setup_aes_ctx(struct cprotect
*entry
)
1928 uint8_t cp_cache_iv_key
[CP_IV_KEYSIZE
]; /* Kiv */
1930 /* First init the cp_cache_iv_key[] */
1931 SHA1Init(&sha1ctxt
);
1932 SHA1Update(&sha1ctxt
, &entry
->cp_cache_key
[0], CP_MAX_KEYSIZE
);
1933 SHA1Final(&cp_cache_iv_key
[0], &sha1ctxt
);
1935 aes_encrypt_key128(&cp_cache_iv_key
[0], &entry
->cp_cache_iv_ctx
);
1943 * Take a cnode that has already been initialized and establish persistent and
1944 * cache keys for it at this time. Note that at the time this is called, the
1945 * directory entry has already been created and we are holding the cnode lock
1949 int cp_generate_keys (struct hfsmount
*hfsmp
, struct cnode
*cp
, int targetclass
, struct cprotect
**newentry
)
1953 struct cprotect
*newcp
= NULL
;
1956 /* Validate that it has a cprotect already */
1957 if (cp
->c_cpentry
== NULL
) {
1958 /* We can't do anything if it shouldn't be protected. */
1962 /* Asserts for the underlying cprotect */
1963 if (cp
->c_cpentry
->cp_flags
& CP_NO_XATTR
) {
1964 /* should already have an xattr by this point. */
1969 if (S_ISREG(cp
->c_mode
)) {
1970 if ((cp
->c_cpentry
->cp_flags
& CP_NEEDS_KEYS
) == 0){
1976 error
= cp_new (targetclass
, hfsmp
, cp
, cp
->c_mode
, &newcp
);
1979 * Key generation failed. This is not necessarily fatal
1980 * since the device could have transitioned into the lock
1981 * state before we called this.
1988 * If we got here, then we have a new cprotect.
1989 * Attempt to write the new one out.
1991 error
= cp_setxattr (cp
, newcp
, hfsmp
, cp
->c_fileid
, XATTR_REPLACE
);
1994 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
1995 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
1997 cp_entry_destroy(newcp
);
2003 * If we get here then we can assert that:
2004 * 1) generated wrapped/unwrapped keys.
2005 * 2) wrote the new keys to disk.
2006 * 3) cprotect is ready to go.
2009 newcp
->cp_flags
&= ~CP_NEEDS_KEYS
;
2017 void cp_replace_entry (struct cnode
*cp
, struct cprotect
*newentry
)
2020 if (cp
->c_cpentry
) {
2021 cp_entry_destroy (cp
->c_cpentry
);
2023 cp
->c_cpentry
= newentry
;
2024 newentry
->cp_backing_cnode
= cp
;
2033 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2034 * allocate a cprotect, and vend it back to the caller.
2036 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2037 * but they do not have keys.
2042 cp_new(int newclass
, struct hfsmount
*hfsmp
, struct cnode
*cp
, mode_t cmode
, struct cprotect
**output_entry
)
2044 struct cprotect
*entry
= NULL
;
2046 uint8_t new_key
[CP_MAX_KEYSIZE
];
2047 size_t new_key_len
= CP_MAX_KEYSIZE
;
2048 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
2049 size_t new_persistent_len
= CP_MAX_WRAPPEDKEYSIZE
;
2050 uint8_t iv_key
[CP_IV_KEYSIZE
];
2051 size_t iv_key_len
= CP_IV_KEYSIZE
;
2053 /* Structures passed between HFS and AKS */
2054 cp_cred_s access_in
;
2055 cp_wrapped_key_s wrapped_key_out
;
2056 cp_raw_key_s key_out
;
2058 if (*output_entry
!= NULL
) {
2059 panic ("cp_new with non-null entry!");
2062 if (!g_cp_state
.wrap_functions_set
) {
2063 printf("hfs: cp_new: wrap/gen functions not yet set\n");
2068 * Step 1: Generate Keys if needed.
2070 * For class F files, the kernel provides the key.
2071 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2072 * key that is only good as long as the file is open. There is no
2073 * wrapped key, so there isn't anything to wrap.
2075 * For class A->D files, the key store provides the key
2077 * For Directories, we only give them a class ; no keys.
2079 if (S_ISDIR (cmode
)) {
2081 new_persistent_len
= 0;
2086 else if (S_ISREG(cmode
)) {
2088 if (newclass
== PROTECTION_CLASS_F
) {
2089 new_key_len
= CP_MAX_KEYSIZE
;
2090 read_random (&new_key
[0], new_key_len
);
2091 new_persistent_len
= 0;
2097 * The keystore is provided the file ID so that it can associate
2098 * the wrapped backup blob with this key from userspace. This
2099 * lookup occurs after successful file creation. Beyond this, the
2100 * file ID is not used. Note that there is a potential race here if
2101 * the file ID is re-used.
2103 cp_init_access(&access_in
, cp
);
2105 bzero(&key_out
, sizeof(key_out
));
2106 key_out
.key
= new_key
;
2107 key_out
.key_len
= new_key_len
;
2108 key_out
.iv_key
= iv_key
;
2109 key_out
.iv_key_len
= iv_key_len
;
2111 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
2112 wrapped_key_out
.key
= new_persistent_key
;
2113 wrapped_key_out
.key_len
= new_persistent_len
;
2115 error
= g_cp_wrap_func
.new_key(&access_in
,
2120 new_key_len
= key_out
.key_len
;
2121 iv_key_len
= key_out
.iv_key_len
;
2122 new_persistent_len
= wrapped_key_out
.key_len
;
2127 /* Something other than file or dir? */
2132 * Step 2: Allocate cprotect and initialize it.
2137 * v2 EA's don't support the larger class B keys
2139 if ((new_persistent_len
!= CP_V2_WRAPPEDKEYSIZE
) &&
2140 (hfsmp
->hfs_running_cp_major_vers
== CP_PREV_MAJOR_VERS
)) {
2144 entry
= cp_entry_alloc (new_persistent_len
);
2145 if (entry
== NULL
) {
2149 *output_entry
= entry
;
2151 entry
->cp_pclass
= newclass
;
2153 /* Copy the cache key & IV keys into place if needed. */
2154 if (new_key_len
> 0) {
2155 bcopy (new_key
, entry
->cp_cache_key
, new_key_len
);
2156 entry
->cp_cache_key_len
= new_key_len
;
2158 /* Initialize the IV key */
2159 if (hfsmp
->hfs_running_cp_major_vers
== CP_NEW_MAJOR_VERS
) {
2160 if (newclass
== PROTECTION_CLASS_F
) {
2161 /* class F needs a full IV initialize */
2162 cp_setup_aes_ctx(entry
);
2165 /* Key store gave us an iv key. Just need to wrap it.*/
2166 aes_encrypt_key128(iv_key
, &entry
->cp_cache_iv_ctx
);
2168 entry
->cp_flags
|= CP_OFF_IV_ENABLED
;
2171 if (new_persistent_len
> 0) {
2172 bcopy(new_persistent_key
, entry
->cp_persistent_key
, new_persistent_len
);
2182 /* Initialize the cp_cred_t structure passed to AKS */
2183 static void cp_init_access(cp_cred_t access
, struct cnode
*cp
)
2185 vfs_context_t context
= vfs_context_current();
2186 kauth_cred_t cred
= vfs_context_ucred(context
);
2187 proc_t proc
= vfs_context_proc(context
);
2189 bzero(access
, sizeof(*access
));
2191 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2192 access
->inode
= cp
->c_fileid
;
2193 access
->pid
= proc_pid(proc
);
2194 access
->uid
= kauth_cred_getuid(cred
);
2201 int cp_key_store_action(int action __unused
)
2207 int cp_register_wraps(cp_wrap_func_t key_store_func __unused
)
2212 #endif /* CONFIG_PROTECT */