2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/vnode_if.h>
34 #include <sys/fcntl.h>
35 #include <libkern/OSByteOrder.h>
36 #include <libkern/crypto/sha1.h>
38 #include <sys/kauth.h>
39 #include <sys/sysctl.h>
41 #include <uuid/uuid.h>
44 #include "hfs_cnode.h"
45 #include "hfs_fsctl.h"
46 #include "hfs_cprotect.h"
47 #include "hfs_iokit.h"
49 #if HFS_CONFIG_KEY_ROLL
50 #include "hfs_key_roll.h"
53 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
55 extern int (**hfs_vnodeop_p
) (void *);
58 * CP private functions
60 static int cp_root_major_vers(mount_t mp
);
61 static int cp_getxattr(cnode_t
*, struct hfsmount
*hfsmp
, struct cprotect
**);
62 static void cp_entry_dealloc(hfsmount_t
*hfsmp
, struct cprotect
*entry
);
63 static int cp_restore_keys(struct cprotect
*, struct hfsmount
*hfsmp
, struct cnode
*);
64 static int cp_lock_vnode_callback(vnode_t
, void *);
65 static int cp_vnode_is_eligible (vnode_t
);
66 static int cp_check_access (cnode_t
*cp
, struct hfsmount
*hfsmp
, int vnop
);
67 static int cp_unwrap(struct hfsmount
*, struct cprotect
*, struct cnode
*);
68 static void cp_init_access(aks_cred_t access
, struct cnode
*cp
);
70 // -- cp_key_pair accessors --
72 void cpkp_init(cp_key_pair_t
*cpkp
, uint16_t max_pers_key_len
,
73 uint16_t max_cached_key_len
)
75 cpkp
->cpkp_max_pers_key_len
= max_pers_key_len
;
76 cpkp
->cpkp_pers_key_len
= 0;
78 cpx_t embedded_cpx
= cpkp_cpx(cpkp
);
79 /* XNU requires us to allocate the AES context separately */
80 cpx_alloc_ctx (embedded_cpx
);
82 cpx_init(cpkp_cpx(cpkp
), max_cached_key_len
);
84 // Default to using offsets
85 cpx_set_use_offset_for_iv(cpkp_cpx(cpkp
), true);
88 uint16_t cpkp_max_pers_key_len(const cp_key_pair_t
*cpkp
)
90 return cpkp
->cpkp_max_pers_key_len
;
93 uint16_t cpkp_pers_key_len(const cp_key_pair_t
*cpkp
)
95 return cpkp
->cpkp_pers_key_len
;
98 static bool cpkp_has_pers_key(const cp_key_pair_t
*cpkp
)
100 return cpkp
->cpkp_pers_key_len
> 0;
103 static void *cpkp_pers_key(const cp_key_pair_t
*cpkp
)
105 return PTR_ADD(void *, &cpkp
->cpkp_cpx
, cpx_sizex(cpkp_cpx(cpkp
)));
108 static void cpkp_set_pers_key_len(cp_key_pair_t
*cpkp
, uint16_t key_len
)
110 if (key_len
> cpkp
->cpkp_max_pers_key_len
)
111 panic("hfs_cprotect: key too big!");
112 cpkp
->cpkp_pers_key_len
= key_len
;
115 #pragma clang diagnostic push
116 #pragma clang diagnostic ignored "-Wcast-qual"
117 cpx_t
cpkp_cpx(const cp_key_pair_t
*cpkp
)
119 // Cast to remove const qualifier
120 return (cpx_t
)&cpkp
->cpkp_cpx
;
122 #pragma clang diagnostic pop
124 size_t cpkp_size(uint16_t pers_key_len
, uint16_t cached_key_len
)
126 return sizeof(cp_key_pair_t
) + pers_key_len
+ cpx_size(cached_key_len
);
129 size_t cpkp_sizex(const cp_key_pair_t
*cpkp
)
131 return cpkp_size(cpkp
->cpkp_max_pers_key_len
, cpx_max_key_len(cpkp_cpx(cpkp
)));
134 void cpkp_flush(cp_key_pair_t
*cpkp
)
136 cpx_flush(cpkp_cpx(cpkp
));
137 cpkp
->cpkp_pers_key_len
= 0;
138 bzero(cpkp_pers_key(cpkp
), cpkp
->cpkp_max_pers_key_len
);
141 bool cpkp_can_copy(const cp_key_pair_t
*src
, const cp_key_pair_t
*dst
)
143 return (cpkp_pers_key_len(src
) <= dst
->cpkp_max_pers_key_len
144 && cpx_can_copy(cpkp_cpx(src
), cpkp_cpx(dst
)));
147 void cpkp_copy(const cp_key_pair_t
*src
, cp_key_pair_t
*dst
)
149 const uint16_t key_len
= cpkp_pers_key_len(src
);
150 cpkp_set_pers_key_len(dst
, key_len
);
151 memcpy(cpkp_pers_key(dst
), cpkp_pers_key(src
), key_len
);
152 cpx_copy(cpkp_cpx(src
), cpkp_cpx(dst
));
157 bool cp_is_supported_version(uint16_t vers
)
159 return vers
== CP_VERS_4
|| vers
== CP_VERS_5
;
163 * Return the appropriate key and, if requested, the physical offset and
164 * maximum length for a particular I/O operation.
166 void cp_io_params(__unused hfsmount_t
*hfsmp
, cprotect_t cpr
,
167 __unused off_rsrc_t off_rsrc
,
168 __unused
int direction
, cp_io_params_t
*io_params
)
170 #if HFS_CONFIG_KEY_ROLL
171 hfs_cp_key_roll_ctx_t
*ckr
= cpr
->cp_key_roll_ctx
;
173 if (ckr
&& off_rsrc
< ckr
->ckr_off_rsrc
) {
175 * When we're in the process of rolling an extent, ckr_off_rsrc will
176 * indicate the end of the extent.
178 const off_rsrc_t roll_loc
= ckr
->ckr_off_rsrc
179 - hfs_blk_to_bytes(ckr
->ckr_roll_extent
.blockCount
,
182 if (off_rsrc
< roll_loc
) {
183 io_params
->max_len
= roll_loc
- off_rsrc
;
184 io_params
->phys_offset
= -1;
187 * We should never get reads to the extent we're rolling
188 * because the pages should be locked in the UBC. If we
189 * did get reads it's not obvious what the right thing to
190 * do is either: we could read from the old location, but
191 * we might have written later data to the new location,
192 * or we could read from the new location, but data might
193 * not have been written there yet.
195 * Note that whilst raw encrypted reads don't lock any
196 * pages, or take a cluster_read_direct lock, the call to
197 * hfs_key_roll_up_to in hfs_vnop_read will have ensured
198 * that the file has been rolled beyond the offset being
199 * read so this path should never be taken in that case.
201 hfs_assert(direction
== VNODE_WRITE
);
203 // For release builds, just in case...
204 if (direction
== VNODE_READ
) {
205 // Use the old key and offset
209 io_params
->max_len
= ckr
->ckr_off_rsrc
- off_rsrc
;
210 io_params
->phys_offset
= hfs_blk_to_bytes(ckr
->ckr_roll_extent
.startBlock
,
211 hfsmp
->blockSize
) + off_rsrc
- roll_loc
;
215 io_params
->cpx
= cpkp_cpx(&ckr
->ckr_keys
);
222 io_params
->max_len
= INT64_MAX
;
223 io_params
->phys_offset
= -1;
224 io_params
->cpx
= cpkp_cpx(&cpr
->cp_keys
);
227 static void cp_flush_cached_keys(cprotect_t cpr
)
229 cpx_flush(cpkp_cpx(&cpr
->cp_keys
));
230 #if HFS_CONFIG_KEY_ROLL
231 if (cpr
->cp_key_roll_ctx
)
232 cpx_flush(cpkp_cpx(&cpr
->cp_key_roll_ctx
->ckr_keys
));
236 static bool cp_needs_pers_key(cprotect_t cpr
)
238 if (CP_CLASS(cpr
->cp_pclass
) == PROTECTION_CLASS_F
)
239 return !cpx_has_key(cpkp_cpx(&cpr
->cp_keys
));
241 return !cpkp_has_pers_key(&cpr
->cp_keys
);
244 static cp_key_revision_t
cp_initial_key_revision(__unused hfsmount_t
*hfsmp
)
249 cp_key_revision_t
cp_next_key_revision(cp_key_revision_t rev
)
251 rev
= (rev
+ 0x0100) ^ (mach_absolute_time() & 0xff);
258 * Allocate and initialize a cprotect blob for a new cnode.
259 * Called from hfs_getnewvnode: cnode is locked exclusive.
261 * Read xattr data off the cnode. Then, if conditions permit,
262 * unwrap the file key and cache it in the cprotect blob.
265 cp_entry_init(struct cnode
*cp
, struct mount
*mp
)
267 struct cprotect
*entry
= NULL
;
269 struct hfsmount
*hfsmp
= VFSTOHFS(mp
);
272 * The cnode should be locked at this point, regardless of whether or not
273 * we are creating a new item in the namespace or vending a vnode on behalf
274 * of lookup. The only time we tell getnewvnode to skip the lock is when
275 * constructing a resource fork vnode. But a resource fork vnode must come
276 * after the regular data fork cnode has already been constructed.
278 if (!cp_fs_protected (mp
)) {
279 cp
->c_cpentry
= NULL
;
283 if (!S_ISREG(cp
->c_mode
) && !S_ISDIR(cp
->c_mode
)) {
284 cp
->c_cpentry
= NULL
;
288 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
289 panic ("hfs cp: no running mount point version! ");
292 hfs_assert(cp
->c_cpentry
== NULL
);
294 error
= cp_getxattr(cp
, hfsmp
, &entry
);
295 if (error
== ENOATTR
) {
297 * Normally, we should always have a CP EA for a file or directory that
298 * we are initializing here. However, there are some extenuating circumstances,
299 * such as the root directory immediately following a newfs_hfs.
301 * As a result, we leave code here to deal with an ENOATTR which will always
302 * default to a 'D/NONE' key, though we don't expect to use it much.
304 cp_key_class_t target_class
= PROTECTION_CLASS_D
;
306 if (S_ISDIR(cp
->c_mode
)) {
307 target_class
= PROTECTION_CLASS_DIR_NONE
;
310 cp_key_revision_t key_revision
= cp_initial_key_revision(hfsmp
);
312 /* allow keybag to override our class preferences */
313 error
= cp_new (&target_class
, hfsmp
, cp
, cp
->c_mode
, CP_KEYWRAP_DIFFCLASS
,
314 key_revision
, (cp_new_alloc_fn
)cp_entry_alloc
, (void **)&entry
);
316 entry
->cp_pclass
= target_class
;
317 entry
->cp_key_os_version
= cp_os_version();
318 entry
->cp_key_revision
= key_revision
;
319 error
= cp_setxattr (cp
, entry
, hfsmp
, cp
->c_fileid
, XATTR_CREATE
);
325 * a) error was not ENOATTR (we got something bad from the getxattr call)
326 * b) we encountered an error setting the xattr above.
327 * c) we failed to generate a new cprotect data structure.
333 cp
->c_cpentry
= entry
;
337 entry
->cp_backing_cnode
= cp
;
341 cp_entry_destroy(hfsmp
, entry
);
343 cp
->c_cpentry
= NULL
;
352 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
353 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
354 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
355 * and the file/directory is established, then we can ask it to generate keys. Note that
356 * this introduces a potential race; If the device is locked and the wrapping
357 * keys are purged between the time we call this function and the time we ask it to generate
358 * keys for us, we could have to fail the open(2) call and back out the entry.
361 int cp_setup_newentry (struct hfsmount
*hfsmp
, struct cnode
*dcp
,
362 cp_key_class_t suppliedclass
, mode_t cmode
,
363 struct cprotect
**tmpentry
)
366 struct cprotect
*entry
= NULL
;
367 uint32_t target_class
= hfsmp
->default_cp_class
;
368 suppliedclass
= CP_CLASS(suppliedclass
);
370 if (hfsmp
->hfs_running_cp_major_vers
== 0) {
371 panic ("CP: major vers not set in mount!");
374 if (S_ISDIR (cmode
)) {
378 /* Decide the target class. Input argument takes priority. */
379 if (cp_is_valid_class (isdir
, suppliedclass
)) {
380 /* caller supplies -1 if it was not specified so we will default to the mount point value */
381 target_class
= suppliedclass
;
383 * One exception, F is never valid for a directory
384 * because its children may inherit and userland will be
385 * unable to read/write to the files.
388 if (target_class
== PROTECTION_CLASS_F
) {
396 * If no valid class was supplied, behave differently depending on whether or not
397 * the item being created is a file or directory.
400 * If parent directory has a non-zero class, use that.
401 * If parent directory has a zero class (not set), then attempt to
402 * apply the mount point default.
405 * Directories always inherit from the parent; if the parent
406 * has a NONE class set, then we can continue to use that.
408 if ((dcp
) && (dcp
->c_cpentry
)) {
409 uint32_t parentclass
= CP_CLASS(dcp
->c_cpentry
->cp_pclass
);
410 /* If the parent class is not valid, default to the mount point value */
411 if (cp_is_valid_class(1, parentclass
)) {
413 target_class
= parentclass
;
415 else if (parentclass
!= PROTECTION_CLASS_DIR_NONE
) {
416 /* files can inherit so long as it's not NONE */
417 target_class
= parentclass
;
420 /* Otherwise, we already defaulted to the mount point's default */
424 /* Generate the cprotect to vend out */
425 entry
= cp_entry_alloc(NULL
, 0, 0, NULL
);
432 * We don't have keys yet, so fill in what we can. At this point
433 * this blob has no keys and it has no backing xattr. We just know the
436 entry
->cp_flags
= CP_NO_XATTR
;
437 /* Note this is only the effective class */
438 entry
->cp_pclass
= target_class
;
445 * Set up an initial key/class pair for a disassociated cprotect entry.
446 * This function is used to generate transient keys that will never be
447 * written to disk. We use class F for this since it provides the exact
448 * semantics that are needed here. Because we never attach this blob to
449 * a cnode directly, we take a pointer to the cprotect struct.
451 * This function is primarily used in the HFS FS truncation codepath
452 * where we may rely on AES symmetry to relocate encrypted data from
453 * one spot in the disk to another.
455 int cpx_gentempkeys(cpx_t
*pcpx
, __unused
struct hfsmount
*hfsmp
)
457 cpx_t cpx
= cpx_alloc(CP_MAX_KEYSIZE
, true);
459 cpx_set_key_len(cpx
, CP_MAX_KEYSIZE
);
460 read_random(cpx_key(cpx
), CP_MAX_KEYSIZE
);
461 cpx_set_use_offset_for_iv(cpx
, true);
469 * Tear down and clear a cprotect blob for a closing file.
470 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
473 cp_entry_destroy(hfsmount_t
*hfsmp
, struct cprotect
*entry_ptr
)
475 if (entry_ptr
== NULL
) {
476 /* nothing to clean up */
479 cp_entry_dealloc(hfsmp
, entry_ptr
);
484 cp_fs_protected (mount_t mnt
)
486 return (vfs_flags(mnt
) & MNT_CPROTECT
);
491 * Return a pointer to underlying cnode if there is one for this vnode.
492 * Done without taking cnode lock, inspecting only vnode state.
495 cp_get_protected_cnode(struct vnode
*vp
)
497 if (!cp_vnode_is_eligible(vp
)) {
501 if (!cp_fs_protected(VTOVFS(vp
))) {
502 /* mount point doesn't support it */
506 return vnode_fsnode(vp
);
511 * Sets *class to persistent class associated with vnode,
515 cp_vnode_getclass(struct vnode
*vp
, cp_key_class_t
*class)
517 struct cprotect
*entry
;
520 int took_truncate_lock
= 0;
521 struct hfsmount
*hfsmp
= NULL
;
523 /* Is this an interesting vp? */
524 if (!cp_vnode_is_eligible (vp
)) {
528 /* Is the mount point formatted for content protection? */
529 if (!cp_fs_protected(VTOVFS(vp
))) {
537 * Take the truncate lock up-front in shared mode because we may need
538 * to manipulate the CP blob. Pend lock events until we're done here.
540 hfs_lock_truncate (cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
541 took_truncate_lock
= 1;
544 * We take only the shared cnode lock up-front. If it turns out that
545 * we need to manipulate the CP blob to write a key out, drop the
546 * shared cnode lock and acquire an exclusive lock.
548 error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
550 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
554 /* pull the class from the live entry */
555 entry
= cp
->c_cpentry
;
558 panic("Content Protection: uninitialized cnode %p", cp
);
561 /* Note that we may not have keys yet, but we know the target class. */
564 *class = CP_CLASS(entry
->cp_pclass
);
567 if (took_truncate_lock
) {
568 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
576 * Sets persistent class for this file or directory.
577 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
578 * If the new class can't be accessed now, EPERM.
579 * Otherwise, record class and re-wrap key if the mount point is content-protected.
582 cp_vnode_setclass(struct vnode
*vp
, cp_key_class_t newclass
)
585 struct cprotect
*entry
= 0;
587 int took_truncate_lock
= 0;
588 struct hfsmount
*hfsmp
= NULL
;
591 if (vnode_isdir (vp
)) {
595 /* Ensure we only use the effective class here */
596 newclass
= CP_CLASS(newclass
);
598 if (!cp_is_valid_class(isdir
, newclass
)) {
599 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass
);
603 /* Is this an interesting vp? */
604 if (!cp_vnode_is_eligible(vp
)) {
608 /* Is the mount point formatted for content protection? */
609 if (!cp_fs_protected(VTOVFS(vp
))) {
614 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
619 * Take the cnode truncate lock exclusive because we want to manipulate the
620 * CP blob. The lock-event handling code is doing the same. This also forces
621 * all pending IOs to drain before we can re-write the persistent and cache keys.
624 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
625 took_truncate_lock
= 1;
628 * The truncate lock is not sufficient to guarantee the CP blob
629 * isn't being used. We must wait for existing writes to finish.
631 vnode_waitforwrites(vp
, 0, 0, 0, "cp_vnode_setclass");
633 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
637 entry
= cp
->c_cpentry
;
644 * re-wrap per-file key with new class.
645 * Generate an entirely new key if switching to F.
647 if (vnode_isreg(vp
)) {
649 * The vnode is a file. Before proceeding with the re-wrap, we need
650 * to unwrap the keys before proceeding. This is to ensure that
651 * the destination class's properties still work appropriately for the
652 * target class (since B allows I/O but an unwrap prior to the next unlock
653 * will not be allowed).
655 if (!cpx_has_key(cpkp_cpx(&entry
->cp_keys
))) {
656 error
= cp_restore_keys (entry
, hfsmp
, cp
);
662 if (newclass
== PROTECTION_CLASS_F
) {
663 /* Verify that file is blockless if switching to class F */
664 if (cp
->c_datafork
->ff_size
> 0) {
669 cp_key_pair_t
*cpkp
= NULL
;
670 cprotect_t new_entry
= cp_entry_alloc(NULL
, 0, CP_MAX_KEYSIZE
, &cpkp
);
677 /* newclass is only the effective class */
678 new_entry
->cp_pclass
= newclass
;
679 new_entry
->cp_key_os_version
= cp_os_version();
680 new_entry
->cp_key_revision
= cp_next_key_revision(entry
->cp_key_revision
);
682 cpx_t cpx
= cpkp_cpx(cpkp
);
684 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
685 cpx_set_key_len(cpx
, CP_MAX_KEYSIZE
);
686 read_random (cpx_key(cpx
), CP_MAX_KEYSIZE
);
688 cp_replace_entry(hfsmp
, cp
, new_entry
);
694 /* Deny the setclass if file is to be moved from F to something else */
695 if (entry
->cp_pclass
== PROTECTION_CLASS_F
) {
700 if (!cpkp_has_pers_key(&entry
->cp_keys
)) {
701 struct cprotect
*new_entry
= NULL
;
703 * We want to fail if we can't wrap to the target class. By not setting
704 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
705 * to 'newclass' then error out.
708 error
= cp_generate_keys (hfsmp
, cp
, newclass
, flags
, &new_entry
);
710 cp_replace_entry (hfsmp
, cp
, new_entry
);
712 /* Bypass the setxattr code below since generate_keys does it for us */
716 cprotect_t new_entry
;
717 error
= cp_rewrap(cp
, hfsmp
, &newclass
, &entry
->cp_keys
, entry
,
718 (cp_new_alloc_fn
)cp_entry_alloc
, (void **)&new_entry
);
720 /* we didn't have perms to set this class. leave file as-is and error out */
724 #if HFS_CONFIG_KEY_ROLL
725 hfs_cp_key_roll_ctx_t
*new_key_roll_ctx
= NULL
;
726 if (entry
->cp_key_roll_ctx
) {
727 error
= cp_rewrap(cp
, hfsmp
, &newclass
, &entry
->cp_key_roll_ctx
->ckr_keys
,
728 entry
->cp_key_roll_ctx
,
729 (cp_new_alloc_fn
)hfs_key_roll_ctx_alloc
,
730 (void **)&new_key_roll_ctx
);
733 cp_entry_dealloc(hfsmp
, new_entry
);
737 new_entry
->cp_key_roll_ctx
= new_key_roll_ctx
;
741 new_entry
->cp_pclass
= newclass
;
743 cp_replace_entry(hfsmp
, cp
, new_entry
);
746 else if (vnode_isdir(vp
)) {
747 /* For directories, just update the pclass. newclass is only effective class */
748 entry
->cp_pclass
= newclass
;
752 /* anything else, just error out */
758 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
759 * existed. If the keys were never generated, then they'll skip the setxattr calls.
762 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_REPLACE
);
763 if (error
== ENOATTR
) {
764 error
= cp_setxattr(cp
, cp
->c_cpentry
, VTOHFS(vp
), 0, XATTR_CREATE
);
769 if (took_truncate_lock
) {
770 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
777 int cp_vnode_transcode(vnode_t vp
, cp_key_t
*k
)
780 struct cprotect
*entry
= 0;
782 int took_truncate_lock
= 0;
783 struct hfsmount
*hfsmp
= NULL
;
785 /* Structures passed between HFS and AKS */
786 struct aks_cred_s access_in
;
787 struct aks_wrapped_key_s wrapped_key_in
, wrapped_key_out
;
789 /* Is this an interesting vp? */
790 if (!cp_vnode_is_eligible(vp
)) {
794 /* Is the mount point formatted for content protection? */
795 if (!cp_fs_protected(VTOVFS(vp
))) {
803 * Take the cnode truncate lock exclusive because we want to manipulate the
804 * CP blob. The lock-event handling code is doing the same. This also forces
805 * all pending IOs to drain before we can re-write the persistent and cache keys.
807 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
808 took_truncate_lock
= 1;
810 if (hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
)) {
814 entry
= cp
->c_cpentry
;
820 /* Send the per-file key in wrapped form for re-wrap with the current class information
821 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
822 * Don't need to process any outputs, so just clear the locks and pass along the error. */
823 if (vnode_isreg(vp
)) {
825 /* Picked up the following from cp_wrap().
826 * If needed, more comments available there. */
828 if (CP_CLASS(entry
->cp_pclass
) == PROTECTION_CLASS_F
) {
833 cp_init_access(&access_in
, cp
);
835 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
836 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
838 cp_key_pair_t
*cpkp
= &entry
->cp_keys
;
840 #if HFS_CONFIG_KEY_ROLL
841 if (entry
->cp_key_roll_ctx
)
842 cpkp
= &entry
->cp_key_roll_ctx
->ckr_keys
;
845 wrapped_key_in
.key
= cpkp_pers_key(cpkp
);
846 wrapped_key_in
.key_len
= cpkp_pers_key_len(cpkp
);
848 if (!wrapped_key_in
.key_len
) {
853 /* Use the actual persistent class when talking to AKS */
854 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
855 wrapped_key_out
.key
= k
->key
;
856 wrapped_key_out
.key_len
= k
->len
;
858 error
= hfs_backup_key(&access_in
,
865 k
->len
= wrapped_key_out
.key_len
;
869 if (took_truncate_lock
) {
870 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
878 * Check permission for the given operation (read, write) on this node.
879 * Additionally, if the node needs work, do it:
880 * - create a new key for the file if one hasn't been set before
881 * - write out the xattr if it hasn't already been saved
882 * - unwrap the key if needed
884 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
886 * Note that this function does *NOT* take the cnode truncate lock. This is because
887 * the thread calling us may already have the truncate lock. It is not necessary
888 * because either we successfully finish this function before the keys are tossed
889 * and the IO will fail, or the keys are tossed and then this function will fail.
890 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
891 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
894 cp_handle_vnop(struct vnode
*vp
, int vnop
, int ioflag
)
896 struct cprotect
*entry
;
898 struct hfsmount
*hfsmp
= NULL
;
899 struct cnode
*cp
= NULL
;
902 * First, do validation against the vnode before proceeding any further:
903 * Is this vnode originating from a valid content-protected filesystem ?
905 if (cp_vnode_is_eligible(vp
) == 0) {
907 * It is either not HFS or not a file/dir. Just return success. This is a valid
908 * case if servicing i/o against another filesystem type from VFS
913 if (cp_fs_protected (VTOVFS(vp
)) == 0) {
915 * The underlying filesystem does not support content protection. This is also
916 * a valid case. Simply return success.
922 * At this point, we know we have a HFS vnode that backs a file or directory on a
923 * filesystem that supports content protection
927 if ((error
= hfs_lock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
))) {
931 entry
= cp
->c_cpentry
;
935 * If this cnode is not content protected, simply return success.
936 * Note that this function is called by all I/O-based call sites
937 * when CONFIG_PROTECT is enabled during XNU building.
941 * All files should have cprotect structs. It's possible to encounter
942 * a directory from a V2.0 CP system but all files should have protection
945 if (vnode_isreg(vp
)) {
963 if ((error
= cp_check_access(cp
, hfsmp
, vnop
))) {
964 /* check for raw encrypted access before bailing out */
965 if ((ioflag
& IO_ENCRYPTED
)
966 #if HFS_CONFIG_KEY_ROLL
967 // If we're rolling, we need the keys
968 && !hfs_is_key_rolling(cp
)
970 && (vnop
== CP_READ_ACCESS
)) {
972 * read access only + asking for the raw encrypted bytes
973 * is legitimate, so reset the error value to 0
982 if (!ISSET(entry
->cp_flags
, CP_NO_XATTR
)) {
983 if (!S_ISREG(cp
->c_mode
))
986 // If we have a persistent key and the cached key, we're done
987 if (!cp_needs_pers_key(entry
)
988 && cpx_has_key(cpkp_cpx(&entry
->cp_keys
))) {
993 /* upgrade to exclusive lock */
994 if (lck_rw_lock_shared_to_exclusive(&cp
->c_rwlock
) == FALSE
) {
995 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
999 cp
->c_lockowner
= current_thread();
1002 /* generate new keys if none have ever been saved */
1003 if (cp_needs_pers_key(entry
)) {
1004 struct cprotect
*newentry
= NULL
;
1006 * It's ok if this ends up being wrapped in a different class than 'pclass'.
1007 * class modification is OK here.
1009 uint32_t flags
= CP_KEYWRAP_DIFFCLASS
;
1011 error
= cp_generate_keys (hfsmp
, cp
, CP_CLASS(cp
->c_cpentry
->cp_pclass
), flags
, &newentry
);
1013 cp_replace_entry (hfsmp
, cp
, newentry
);
1021 /* unwrap keys if needed */
1022 if (!cpx_has_key(cpkp_cpx(&entry
->cp_keys
))) {
1023 if ((vnop
== CP_READ_ACCESS
) && (ioflag
& IO_ENCRYPTED
)) {
1024 /* no need to try to restore keys; they are not going to be used */
1028 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1035 /* write out the xattr if it's new */
1036 if (entry
->cp_flags
& CP_NO_XATTR
)
1037 error
= cp_setxattr(cp
, entry
, VTOHFS(cp
->c_vp
), 0, XATTR_CREATE
);
1047 static void cp_log_eperm (struct vnode
* vp
, int pclass
, boolean_t create
) {
1048 char procname
[256] = {};
1049 const char *fname
= "unknown";
1050 const char *dbgop
= "open";
1052 int ppid
= proc_selfpid();
1053 /* selfname does a strlcpy so we're OK */
1054 proc_selfname(procname
, sizeof(procname
));
1055 if (vp
&& vp
->v_name
) {
1056 /* steal from the namecache */
1064 printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname
, ppid
, pclass
, dbgop
, fname
);
1071 cp_handle_open(struct vnode
*vp
, int mode
)
1073 struct cnode
*cp
= NULL
;
1074 struct cprotect
*entry
= NULL
;
1075 struct hfsmount
*hfsmp
;
1078 /* If vnode not eligible, just return success */
1079 if (!cp_vnode_is_eligible(vp
)) {
1083 /* If mount point not properly set up, then also return success */
1084 if (!cp_fs_protected(VTOVFS(vp
))) {
1090 // Allow if raw encrypted mode requested
1091 if (ISSET(mode
, FENCRYPTED
)) {
1092 #if HFS_CONFIG_KEY_ROLL
1093 // If we're rolling, we need the keys
1094 hfs_lock_always(cp
, HFS_SHARED_LOCK
);
1095 bool rolling
= hfs_is_key_rolling(cp
);
1103 if (ISSET(mode
, FUNENCRYPTED
)) {
1107 /* We know the vnode is in a valid state. Acquire cnode and validate */
1110 if ((error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
1114 entry
= cp
->c_cpentry
;
1115 if (entry
== NULL
) {
1117 * If the mount is protected and we couldn't get a cprotect for this vnode,
1118 * then it's not valid for opening.
1120 if (vnode_isreg(vp
)) {
1126 if (!S_ISREG(cp
->c_mode
))
1130 * Does the cnode have keys yet? If not, then generate them.
1132 if (cp_needs_pers_key(entry
)) {
1133 struct cprotect
*newentry
= NULL
;
1134 /* Allow the keybag to override our class preferences */
1135 uint32_t flags
= CP_KEYWRAP_DIFFCLASS
;
1136 error
= cp_generate_keys (hfsmp
, cp
, CP_CLASS(cp
->c_cpentry
->cp_pclass
), flags
, &newentry
);
1138 cp_replace_entry (hfsmp
, cp
, newentry
);
1147 * We want to minimize the number of unwraps that we'll have to do since
1148 * the cost can vary, depending on the platform we're running.
1150 switch (CP_CLASS(entry
->cp_pclass
)) {
1151 case PROTECTION_CLASS_B
:
1152 if (mode
& O_CREAT
) {
1154 * Class B always allows creation. Since O_CREAT was passed through
1155 * we infer that this was a newly created vnode/cnode. Even though a potential
1156 * race exists when multiple threads attempt to create/open a particular
1157 * file, only one can "win" and actually create it. VFS will unset the
1158 * O_CREAT bit on the loser.
1160 * Note that skipping the unwrap check here is not a security issue --
1161 * we have to unwrap the key permanently upon the first I/O.
1166 if (cpx_has_key(cpkp_cpx(&entry
->cp_keys
)) && !ISSET(mode
, FENCRYPTED
)) {
1168 * For a class B file, attempt the unwrap if we have the key in
1170 * The device could have just transitioned into the lock state, and
1171 * this vnode may not yet have been purged from the vnode cache (which would
1174 struct aks_cred_s access_in
;
1175 struct aks_wrapped_key_s wrapped_key_in
;
1177 cp_init_access(&access_in
, cp
);
1178 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
1179 wrapped_key_in
.key
= cpkp_pers_key(&entry
->cp_keys
);
1180 wrapped_key_in
.key_len
= cpkp_pers_key_len(&entry
->cp_keys
);
1181 /* Use the persistent class when talking to AKS */
1182 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
1183 error
= hfs_unwrap_key(&access_in
, &wrapped_key_in
, NULL
);
1189 /* otherwise, fall through to attempt the unwrap/restore */
1190 case PROTECTION_CLASS_A
:
1191 case PROTECTION_CLASS_C
:
1193 * At this point, we know that we need to attempt an unwrap if needed; we want
1194 * to makes sure that open(2) fails properly if the device is either just-locked
1195 * or never made it past first unlock. Since the keybag serializes access to the
1196 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1197 * we will get here in two cases:
1199 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1200 * purged, the vnode will get flushed if needed.
1202 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1204 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1205 * we can always attempt the restore.
1207 if (!cpx_has_key(cpkp_cpx(&entry
->cp_keys
))) {
1208 error
= cp_restore_keys(entry
, hfsmp
, cp
);
1217 case PROTECTION_CLASS_D
:
1226 if ((hfsmp
->hfs_cp_verbose
) && (error
== EPERM
)) {
1227 cp_log_eperm (vp
, CP_CLASS(entry
->cp_pclass
), false);
1239 * Gets the EA we set on the root folder (fileid 1) to get information about the
1240 * version of Content Protection that was used to write to this filesystem.
1241 * Note that all multi-byte fields are written to disk little endian so they must be
1242 * converted to native endian-ness as needed.
1245 cp_getrootxattr(struct hfsmount
* hfsmp
, struct cp_root_xattr
*outxattr
)
1250 * We allow for an extra 64 bytes to cater for upgrades. This wouldn't
1251 * be necessary if the xattr routines just returned what we asked for.
1253 size_t bufsize
= roundup(sizeof(struct cp_root_xattr
) + 64, 64);
1257 hfs_assert(outxattr
);
1259 buf
= hfs_malloc(bufsize
);
1261 uio_t uio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
);
1263 uio_addiov(uio
, CAST_USER_ADDR_T(buf
), bufsize
);
1265 size_t attrsize
= bufsize
;
1267 struct vnop_getxattr_args args
= {
1269 .a_name
= CONTENT_PROTECTION_XATTR_NAME
,
1273 error
= hfs_getxattr_internal(NULL
, &args
, hfsmp
, 1);
1281 if (attrsize
< CP_ROOT_XATTR_MIN_LEN
) {
1282 error
= HFS_EINCONSISTENT
;
1286 const struct cp_root_xattr
*xattr
= buf
;
1288 bzero(outxattr
, sizeof(*outxattr
));
1290 /* Now convert the multi-byte fields to native endianness */
1291 outxattr
->major_version
= OSSwapLittleToHostInt16(xattr
->major_version
);
1292 outxattr
->minor_version
= OSSwapLittleToHostInt16(xattr
->minor_version
);
1293 outxattr
->flags
= OSSwapLittleToHostInt64(xattr
->flags
);
1295 if (outxattr
->major_version
>= CP_VERS_5
) {
1296 if (attrsize
< sizeof(struct cp_root_xattr
)) {
1297 error
= HFS_EINCONSISTENT
;
1300 #if HFS_CONFIG_KEY_ROLL
1301 outxattr
->auto_roll_min_version
= OSSwapLittleToHostInt32(xattr
->auto_roll_min_version
);
1302 outxattr
->auto_roll_max_version
= OSSwapLittleToHostInt32(xattr
->auto_roll_max_version
);
1307 hfs_free(buf
, bufsize
);
1313 * Sets the EA we set on the root folder (fileid 1) to get information about the
1314 * version of Content Protection that was used to write to this filesystem.
1315 * Note that all multi-byte fields are written to disk little endian so they must be
1316 * converted to little endian as needed.
1318 * This will be written to the disk when it detects the EA is not there, or when we need
1319 * to make a modification to the on-disk version that can be done in-place.
1322 cp_setrootxattr(struct hfsmount
*hfsmp
, struct cp_root_xattr
*newxattr
)
1325 struct vnop_setxattr_args args
;
1329 args
.a_name
= CONTENT_PROTECTION_XATTR_NAME
;
1330 args
.a_uio
= NULL
; //pass data ptr instead
1332 args
.a_context
= NULL
; //no context needed, only done from mount.
1334 const uint64_t flags
= newxattr
->flags
;
1336 /* Now convert the multi-byte fields to little endian before writing to disk. */
1337 newxattr
->flags
= OSSwapHostToLittleInt64(newxattr
->flags
);
1339 int xattr_size
= sizeof(struct cp_root_xattr
);
1341 #if HFS_CONFIG_KEY_ROLL
1342 bool upgraded
= false;
1344 if (newxattr
->auto_roll_min_version
|| newxattr
->auto_roll_max_version
) {
1345 if (newxattr
->major_version
< CP_VERS_5
) {
1346 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS
);
1348 newxattr
->major_version
= CP_CURRENT_VERS
;
1349 newxattr
->minor_version
= CP_MINOR_VERS
;
1354 newxattr
->auto_roll_min_version
= OSSwapHostToLittleInt32(newxattr
->auto_roll_min_version
);
1355 newxattr
->auto_roll_max_version
= OSSwapHostToLittleInt32(newxattr
->auto_roll_max_version
);
1356 } else if (newxattr
->major_version
== CP_VERS_4
)
1357 xattr_size
= offsetof(struct cp_root_xattr
, auto_roll_min_version
);
1360 newxattr
->major_version
= OSSwapHostToLittleInt16(newxattr
->major_version
);
1361 newxattr
->minor_version
= OSSwapHostToLittleInt16(newxattr
->minor_version
);
1363 error
= hfs_setxattr_internal(NULL
, (caddr_t
)newxattr
,
1364 xattr_size
, &args
, hfsmp
, 1);
1367 hfsmp
->cproot_flags
= flags
;
1368 #if HFS_CONFIG_KEY_ROLL
1370 hfsmp
->hfs_running_cp_major_vers
= CP_CURRENT_VERS
;
1379 * Stores new xattr data on the cnode.
1380 * cnode lock held exclusive (if available).
1382 * This function is also invoked during file creation.
1384 int cp_setxattr(struct cnode
*cp
, struct cprotect
*entry
, struct hfsmount
*hfsmp
,
1385 uint32_t fileid
, int options
)
1388 cp_key_pair_t
*cpkp
= &entry
->cp_keys
;
1389 #if HFS_CONFIG_KEY_ROLL
1390 bool rolling
= entry
->cp_key_roll_ctx
!= NULL
;
1392 if (rolling
&& entry
->cp_key_roll_ctx
->ckr_off_rsrc
== INT64_MAX
) {
1393 // We've finished rolling, but we still have the context
1395 cpkp
= &entry
->cp_key_roll_ctx
->ckr_keys
;
1399 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1403 if (hfsmp
->hfs_running_cp_major_vers
< CP_CURRENT_VERS
) {
1405 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS
);
1407 struct cp_root_xattr root_xattr
;
1409 error
= cp_getrootxattr(hfsmp
, &root_xattr
);
1413 root_xattr
.major_version
= CP_CURRENT_VERS
;
1414 root_xattr
.minor_version
= CP_MINOR_VERS
;
1416 error
= cp_setrootxattr(hfsmp
, &root_xattr
);
1420 hfsmp
->hfs_running_cp_major_vers
= CP_CURRENT_VERS
;
1423 struct cp_xattr_v5
*xattr
;
1424 xattr
= hfs_malloc(sizeof(*xattr
));
1426 xattr
->xattr_major_version
= OSSwapHostToLittleConstInt16(CP_VERS_5
);
1427 xattr
->xattr_minor_version
= OSSwapHostToLittleConstInt16(CP_MINOR_VERS
);
1429 #if HFS_CONFIG_KEY_ROLL
1431 xattr
->flags
|= CP_XAF_KEY_ROLLING
;
1433 xattr
->persistent_class
= OSSwapHostToLittleInt32(entry
->cp_pclass
);
1434 xattr
->key_os_version
= OSSwapHostToLittleInt32(entry
->cp_key_os_version
);
1435 xattr
->key_revision
= OSSwapHostToLittleInt16(entry
->cp_key_revision
);
1437 uint16_t key_len
= cpkp_pers_key_len(cpkp
);
1438 xattr
->key_len
= OSSwapHostToLittleInt16(key_len
);
1439 memcpy(xattr
->persistent_key
, cpkp_pers_key(cpkp
), key_len
);
1441 size_t xattr_len
= offsetof(struct cp_xattr_v5
, persistent_key
) + key_len
;
1443 #if HFS_CONFIG_KEY_ROLL
1445 struct cp_roll_info
*roll_info
= PTR_ADD(struct cp_roll_info
*, xattr
, xattr_len
);
1447 roll_info
->off_rsrc
= OSSwapHostToLittleInt64(entry
->cp_key_roll_ctx
->ckr_off_rsrc
);
1449 key_len
= cpkp_pers_key_len(&entry
->cp_key_roll_ctx
->ckr_keys
);
1450 roll_info
->key_len
= OSSwapHostToLittleInt16(key_len
);
1452 memcpy(roll_info
->key
, cpkp_pers_key(&entry
->cp_key_roll_ctx
->ckr_keys
), key_len
);
1454 xattr_len
+= offsetof(struct cp_roll_info
, key
) + key_len
;
1458 struct vnop_setxattr_args args
= {
1459 .a_vp
= cp
? cp
->c_vp
: NULL
,
1460 .a_name
= CONTENT_PROTECTION_XATTR_NAME
,
1461 .a_options
= options
,
1462 .a_context
= vfs_context_current(),
1465 error
= hfs_setxattr_internal(cp
, xattr
, xattr_len
, &args
, hfsmp
, fileid
);
1467 hfs_free(xattr
, sizeof(*xattr
));
1470 entry
->cp_flags
&= ~CP_NO_XATTR
;
1477 * Used by an fcntl to query the underlying FS for its content protection version #
1481 cp_get_root_major_vers(vnode_t vp
, uint32_t *level
)
1484 struct hfsmount
*hfsmp
= NULL
;
1485 struct mount
*mp
= NULL
;
1489 /* check if it supports content protection */
1490 if (cp_fs_protected(mp
) == 0) {
1494 hfsmp
= VFSTOHFS(mp
);
1495 /* figure out the level */
1497 err
= cp_root_major_vers(mp
);
1500 *level
= hfsmp
->hfs_running_cp_major_vers
;
1502 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1507 /* Used by fcntl to query default protection level of FS */
1508 int cp_get_default_level (struct vnode
*vp
, uint32_t *level
) {
1510 struct hfsmount
*hfsmp
= NULL
;
1511 struct mount
*mp
= NULL
;
1515 /* check if it supports content protection */
1516 if (cp_fs_protected(mp
) == 0) {
1520 hfsmp
= VFSTOHFS(mp
);
1521 /* figure out the default */
1523 *level
= hfsmp
->default_cp_class
;
1527 /********************
1529 *******************/
1532 cp_root_major_vers(mount_t mp
)
1535 struct cp_root_xattr xattr
;
1536 struct hfsmount
*hfsmp
= NULL
;
1538 hfsmp
= vfs_fsprivate(mp
);
1539 err
= cp_getrootxattr (hfsmp
, &xattr
);
1542 hfsmp
->hfs_running_cp_major_vers
= xattr
.major_version
;
1552 cp_vnode_is_eligible(struct vnode
*vp
)
1554 return !vnode_issystem(vp
) && (vnode_isreg(vp
) || vnode_isdir(vp
));
1558 static const uint32_t cp_magic1
= 0x7b727063; // cpr{
1559 static const uint32_t cp_magic2
= 0x7270637d; // }cpr
1563 cp_entry_alloc(cprotect_t old
, uint16_t pers_key_len
,
1564 uint16_t cached_key_len
, cp_key_pair_t
**pcpkp
)
1566 struct cprotect
*cp_entry
;
1568 if (pers_key_len
> CP_MAX_WRAPPEDKEYSIZE
)
1571 size_t size
= (sizeof(struct cprotect
) - sizeof(cp_key_pair_t
)
1572 + cpkp_size(pers_key_len
, cached_key_len
));
1575 size
+= 4; // Extra for magic2
1578 cp_entry
= hfs_mallocz(size
);
1581 memcpy(cp_entry
, old
, offsetof(struct cprotect
, cp_keys
));
1583 #if HFS_CONFIG_KEY_ROLL
1584 // We don't copy the key roll context
1585 cp_entry
->cp_key_roll_ctx
= NULL
;
1590 cp_entry
->cp_magic1
= cp_magic1
;
1591 *PTR_ADD(uint32_t *, cp_entry
, size
- 4) = cp_magic2
;
1594 cpkp_init(&cp_entry
->cp_keys
, pers_key_len
, cached_key_len
);
1597 * If we've been passed the old entry, then we are in the process of
1598 * rewrapping in which case we need to copy the cached key. This is
1599 * important for class B files when the device is locked because we
1600 * won't be able to unwrap whilst in this state, yet we still need the
1604 cpx_copy(cpkp_cpx(&old
->cp_keys
), cpkp_cpx(&cp_entry
->cp_keys
));
1607 *pcpkp
= &cp_entry
->cp_keys
;
1613 cp_entry_dealloc(__unused hfsmount_t
*hfsmp
, struct cprotect
*entry
)
1615 #if HFS_CONFIG_KEY_ROLL
1616 hfs_release_key_roll_ctx(hfsmp
, entry
);
1619 cpkp_flush(&entry
->cp_keys
);
1621 size_t entry_size
= (sizeof(struct cprotect
) - sizeof(cp_key_pair_t
)
1622 + cpkp_sizex(&entry
->cp_keys
));
1625 * We are freeing the HFS cprotect, which contains the memory for 'cpx'
1626 * Don't forget to release the CPX AES context
1628 cpx_t embedded_cpx
= cpkp_cpx(&entry
->cp_keys
);
1629 cpx_free_ctx (embedded_cpx
);
1632 hfs_assert(entry
->cp_magic1
== cp_magic1
);
1633 hfs_assert(*PTR_ADD(uint32_t *, entry
, (sizeof(struct cprotect
) - sizeof(cp_key_pair_t
)
1634 + cpkp_sizex(&entry
->cp_keys
) == cp_magic2
)));
1636 entry_size
+= 4; // Extra for magic2
1639 hfs_free(entry
, entry_size
);
1642 static int cp_read_xattr_v4(__unused hfsmount_t
*hfsmp
, struct cp_xattr_v4
*xattr
,
1643 size_t xattr_len
, cprotect_t
*pcpr
, cp_getxattr_options_t options
)
1645 /* Endian swap the multi-byte fields into host endianness from L.E. */
1646 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1647 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1648 xattr
->key_size
= OSSwapLittleToHostInt32(xattr
->key_size
);
1649 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1650 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1651 xattr
->key_os_version
= OSSwapLittleToHostInt32(xattr
->key_os_version
);
1654 * Prevent a buffer overflow, and validate the key length obtained from the
1655 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1658 if (xattr
->key_size
> CP_MAX_WRAPPEDKEYSIZE
)
1659 return HFS_EINCONSISTENT
;
1661 size_t min_len
= offsetof(struct cp_xattr_v4
, persistent_key
) + xattr
->key_size
;
1662 if (xattr_len
< min_len
)
1663 return HFS_EINCONSISTENT
;
1666 * Class F files have no backing key; their keylength should be 0,
1667 * though they should have the proper flags set.
1669 * A request to instantiate a CP for a class F file should result
1670 * in a bzero'd cp that just says class F, with key_flushed set.
1672 if (CP_CLASS(xattr
->persistent_class
) == PROTECTION_CLASS_F
1673 || ISSET(xattr
->flags
, CP_XAF_NEEDS_KEYS
)) {
1674 xattr
->key_size
= 0;
1677 /* set up entry with information from xattr */
1678 cp_key_pair_t
*cpkp
= NULL
;
1681 if (ISSET(options
, CP_GET_XATTR_BASIC_INFO
)) {
1682 /* caller passed in a pre-allocated structure to get the basic info */
1684 bzero(entry
, offsetof(struct cprotect
, cp_keys
));
1687 entry
= cp_entry_alloc(NULL
, xattr
->key_size
, CP_MAX_CACHEBUFLEN
, &cpkp
);
1690 entry
->cp_pclass
= xattr
->persistent_class
;
1691 entry
->cp_key_os_version
= xattr
->key_os_version
;
1694 if (!ISSET(options
, CP_GET_XATTR_BASIC_INFO
)) {
1695 if (xattr
->key_size
) {
1696 cpkp_set_pers_key_len(cpkp
, xattr
->key_size
);
1697 memcpy(cpkp_pers_key(cpkp
), xattr
->persistent_key
, xattr
->key_size
);
1702 else if (xattr
->key_size
) {
1703 SET(entry
->cp_flags
, CP_HAS_A_KEY
);
1709 int cp_read_xattr_v5(hfsmount_t
*hfsmp
, struct cp_xattr_v5
*xattr
,
1710 size_t xattr_len
, cprotect_t
*pcpr
, cp_getxattr_options_t options
)
1712 if (xattr
->xattr_major_version
== OSSwapHostToLittleConstInt16(CP_VERS_4
)) {
1713 return cp_read_xattr_v4(hfsmp
, (struct cp_xattr_v4
*)xattr
, xattr_len
, pcpr
, options
);
1716 xattr
->xattr_major_version
= OSSwapLittleToHostInt16(xattr
->xattr_major_version
);
1718 if (xattr
->xattr_major_version
!= CP_VERS_5
) {
1719 printf("hfs: cp_getxattr: unsupported xattr version %d\n",
1720 xattr
->xattr_major_version
);
1724 size_t min_len
= offsetof(struct cp_xattr_v5
, persistent_key
);
1726 if (xattr_len
< min_len
)
1727 return HFS_EINCONSISTENT
;
1729 xattr
->xattr_minor_version
= OSSwapLittleToHostInt16(xattr
->xattr_minor_version
);
1730 xattr
->flags
= OSSwapLittleToHostInt32(xattr
->flags
);
1731 xattr
->persistent_class
= OSSwapLittleToHostInt32(xattr
->persistent_class
);
1732 xattr
->key_os_version
= OSSwapLittleToHostInt32(xattr
->key_os_version
);
1733 xattr
->key_revision
= OSSwapLittleToHostInt16(xattr
->key_revision
);
1734 xattr
->key_len
= OSSwapLittleToHostInt16(xattr
->key_len
);
1736 uint16_t pers_key_len
= xattr
->key_len
;
1738 min_len
+= pers_key_len
;
1739 if (xattr_len
< min_len
)
1740 return HFS_EINCONSISTENT
;
1742 #if HFS_CONFIG_KEY_ROLL
1743 struct cp_roll_info
*roll_info
= NULL
;
1745 if (ISSET(xattr
->flags
, CP_XAF_KEY_ROLLING
)) {
1746 roll_info
= PTR_ADD(struct cp_roll_info
*, xattr
, min_len
);
1748 min_len
+= offsetof(struct cp_roll_info
, key
);
1750 if (xattr_len
< min_len
)
1751 return HFS_EINCONSISTENT
;
1753 roll_info
->off_rsrc
= OSSwapLittleToHostInt64(roll_info
->off_rsrc
);
1755 if (roll_info
->off_rsrc
% hfsmp
->blockSize
)
1756 return HFS_EINCONSISTENT
;
1758 roll_info
->key_len
= OSSwapLittleToHostInt16(roll_info
->key_len
);
1760 min_len
+= roll_info
->key_len
;
1761 if (xattr_len
< min_len
)
1762 return HFS_EINCONSISTENT
;
1766 cp_key_pair_t
*cpkp
= NULL
;
1770 * If option CP_GET_XATTR_BASIC_INFO is set, we only return basic
1771 * information about the file's protection (and not the key) and
1772 * we store the result in the structure the caller passed to us.
1774 if (ISSET(options
, CP_GET_XATTR_BASIC_INFO
)) {
1776 bzero(entry
, offsetof(struct cprotect
, cp_keys
));
1777 #if HFS_CONFIG_KEY_ROLL
1778 if (ISSET(xattr
->flags
, CP_XAF_KEY_ROLLING
)) {
1779 SET(entry
->cp_flags
, CP_KEY_IS_ROLLING
);
1783 entry
= cp_entry_alloc(NULL
, xattr
->key_len
, CP_MAX_CACHEBUFLEN
, &cpkp
);
1786 entry
->cp_pclass
= xattr
->persistent_class
;
1787 entry
->cp_key_os_version
= xattr
->key_os_version
;
1788 entry
->cp_key_revision
= xattr
->key_revision
;
1790 if (!ISSET(options
, CP_GET_XATTR_BASIC_INFO
)) {
1791 if (xattr
->key_len
) {
1792 cpkp_set_pers_key_len(cpkp
, xattr
->key_len
);
1793 memcpy(cpkp_pers_key(cpkp
), xattr
->persistent_key
, xattr
->key_len
);
1796 #if HFS_CONFIG_KEY_ROLL
1798 entry
->cp_key_roll_ctx
= hfs_key_roll_ctx_alloc(NULL
, roll_info
->key_len
,
1799 CP_MAX_CACHEBUFLEN
, &cpkp
);
1801 entry
->cp_key_roll_ctx
->ckr_off_rsrc
= roll_info
->off_rsrc
;
1803 if (roll_info
->key_len
) {
1804 cpkp_set_pers_key_len(cpkp
, roll_info
->key_len
);
1805 memcpy(cpkp_pers_key(cpkp
), roll_info
->key
, roll_info
->key_len
);
1812 else if (xattr
->key_len
) {
1813 SET(entry
->cp_flags
, CP_HAS_A_KEY
);
1820 * Initializes a new cprotect entry with xattr data from the cnode.
1821 * cnode lock held shared
1824 cp_getxattr(struct cnode
*cp
, struct hfsmount
*hfsmp
, cprotect_t
*outentry
)
1827 struct cp_xattr_v5
*xattr
;
1829 xattr
= hfs_malloc(xattr_len
= sizeof(*xattr
));
1831 int error
= hfs_xattr_read(cp
->c_vp
, CONTENT_PROTECTION_XATTR_NAME
,
1835 if (xattr_len
< CP_XATTR_MIN_LEN
)
1836 error
= HFS_EINCONSISTENT
;
1838 error
= cp_read_xattr_v5(hfsmp
, xattr
, xattr_len
, outentry
, 0);
1842 if (error
&& error
!= ENOATTR
) {
1843 printf("cp_getxattr: bad cp xattr (%d):\n", error
);
1844 for (size_t i
= 0; i
< xattr_len
; ++i
)
1845 printf("%02x ", ((uint8_t *)xattr
)[i
]);
1850 hfs_free(xattr
, sizeof(*xattr
));
1856 * If permitted, restore entry's unwrapped key from the persistent key.
1857 * If not, clear key and set CP_KEY_FLUSHED.
1858 * cnode lock held exclusive
1861 cp_restore_keys(struct cprotect
*entry
, struct hfsmount
*hfsmp
, struct cnode
*cp
)
1865 error
= cp_unwrap(hfsmp
, entry
, cp
);
1867 cp_flush_cached_keys(entry
);
1873 void cp_device_locked_callback(mount_t mp
, cp_lock_state_t state
)
1875 struct hfsmount
*hfsmp
;
1878 * When iterating the various mount points that may
1879 * be present on a content-protected device, we need to skip
1880 * those that do not have it enabled.
1882 if (!cp_fs_protected(mp
)) {
1886 hfsmp
= VFSTOHFS(mp
);
1888 hfsmp
->hfs_cp_lock_state
= state
;
1890 if (state
== CP_LOCKED_STATE
) {
1892 * We respond only to lock events. Since cprotect structs
1893 * decrypt/restore keys lazily, the unlock events don't
1894 * actually cause anything to happen.
1896 vnode_iterate(mp
, 0, cp_lock_vnode_callback
, (void *)(uintptr_t)state
);
1901 * Deny access to protected files if keys have been locked.
1904 cp_check_access(struct cnode
*cp
, struct hfsmount
*hfsmp
, int vnop __unused
)
1909 * For now it's OK to examine the state variable here without
1910 * holding the HFS lock. This is only a short-circuit; if the state
1911 * transitions (or is in transition) after we examine this field, we'd
1912 * have to handle that anyway.
1914 if (hfsmp
->hfs_cp_lock_state
== CP_UNLOCKED_STATE
) {
1918 if (!cp
->c_cpentry
) {
1919 /* unprotected node */
1923 if (!S_ISREG(cp
->c_mode
)) {
1927 /* Deny all access for class A files */
1928 switch (CP_CLASS(cp
->c_cpentry
->cp_pclass
)) {
1929 case PROTECTION_CLASS_A
: {
1942 * Respond to a lock or unlock event.
1943 * On lock: clear out keys from memory, then flush file contents.
1944 * On unlock: nothing (function not called).
1947 cp_lock_vnode_callback(struct vnode
*vp
, void *arg
)
1950 struct cprotect
*entry
= NULL
;
1953 unsigned long action
= 0;
1954 int took_truncate_lock
= 0;
1956 error
= vnode_getwithref (vp
);
1964 * When cleaning cnodes due to a lock event, we must
1965 * take the truncate lock AND the cnode lock. By taking
1966 * the truncate lock here, we force (nearly) all pending IOs
1967 * to drain before we can acquire the truncate lock. All HFS cluster
1968 * io calls except for swapfile IO need to acquire the truncate lock
1969 * prior to calling into the cluster layer.
1971 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
1972 took_truncate_lock
= 1;
1974 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
1976 entry
= cp
->c_cpentry
;
1978 /* unprotected vnode: not a regular file */
1982 action
= (unsigned long) arg
;
1984 case CP_LOCKED_STATE
: {
1986 if (CP_CLASS(entry
->cp_pclass
) != PROTECTION_CLASS_A
||
1989 * There is no change at lock for other classes than A.
1990 * B is kept in memory for writing, and class F (for VM) does
1991 * not have a wrapped key, so there is no work needed for
1992 * wrapping/unwrapping.
1994 * Note that 'class F' is relevant here because if
1995 * hfs_vnop_strategy does not take the cnode lock
1996 * to protect the cp blob across IO operations, we rely
1997 * implicitly on the truncate lock to be held when doing IO.
1998 * The only case where the truncate lock is not held is during
1999 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
2000 * directly to cluster_pageout.
2005 /* Before doing anything else, zero-fill sparse ranges as needed */
2006 ctx
= vfs_context_current();
2007 (void) hfs_filedone (vp
, ctx
, 0);
2009 /* first, sync back dirty pages */
2011 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
| UBC_INVALIDATE
| UBC_SYNC
);
2012 hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
2015 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
2016 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
2017 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
2018 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
2019 * Also verified that the cached data in IOFS is overwritten by other data, and there
2020 * is no key leakage in that layer.
2023 cp_flush_cached_keys(entry
);
2025 /* some write may have arrived in the mean time. dump those pages */
2029 ubc_msync (vp
, 0, ubc_getsize(vp
), NULL
, UBC_INVALIDATE
| UBC_SYNC
);
2032 case CP_UNLOCKED_STATE
: {
2037 panic("Content Protection: unknown lock action %lu\n", action
);
2045 if (took_truncate_lock
) {
2046 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
2057 * Generate a new wrapped key based on the existing cache key.
2061 cp_rewrap(struct cnode
*cp
, __unused hfsmount_t
*hfsmp
,
2062 cp_key_class_t
*newclass
, cp_key_pair_t
*cpkp
, const void *old_holder
,
2063 cp_new_alloc_fn alloc_fn
, void **pholder
)
2065 struct cprotect
*entry
= cp
->c_cpentry
;
2067 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
2068 unsigned keylen
= CP_MAX_WRAPPEDKEYSIZE
;
2070 const cp_key_class_t key_class
= CP_CLASS(*newclass
);
2072 /* Structures passed between HFS and AKS */
2073 struct aks_cred_s access_in
;
2074 struct aks_wrapped_key_s wrapped_key_in
;
2075 struct aks_wrapped_key_s wrapped_key_out
;
2078 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2079 * key that is only good as long as the file is open. There is no
2080 * wrapped key, so there isn't anything to wrap.
2082 if (key_class
== PROTECTION_CLASS_F
) {
2086 cp_init_access(&access_in
, cp
);
2088 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
2089 wrapped_key_in
.key
= cpkp_pers_key(cpkp
);
2090 wrapped_key_in
.key_len
= cpkp_pers_key_len(cpkp
);
2091 /* Use the persistent class when talking to AKS */
2092 wrapped_key_in
.dp_class
= entry
->cp_pclass
;
2094 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
2095 wrapped_key_out
.key
= new_persistent_key
;
2096 wrapped_key_out
.key_len
= keylen
;
2099 * inode is passed here to find the backup bag wrapped blob
2100 * from userspace. This lookup will occur shortly after creation
2101 * and only if the file still exists. Beyond this lookup the
2102 * inode is not used. Technically there is a race, we practically
2105 error
= hfs_rewrap_key(&access_in
,
2106 key_class
, /* new class */
2110 keylen
= wrapped_key_out
.key_len
;
2114 * Verify that AKS returned to us a wrapped key of the
2115 * target class requested.
2117 /* Get the effective class here */
2118 cp_key_class_t effective
= CP_CLASS(wrapped_key_out
.dp_class
);
2119 if (effective
!= key_class
) {
2121 * Fail the operation if defaults or some other enforcement
2122 * dictated that the class be wrapped differently.
2125 /* TODO: Invalidate the key when 12170074 unblocked */
2129 /* Allocate a new cpentry */
2130 cp_key_pair_t
*new_cpkp
;
2131 *pholder
= alloc_fn(old_holder
, keylen
, CP_MAX_CACHEBUFLEN
, &new_cpkp
);
2133 /* copy the new key into the entry */
2134 cpkp_set_pers_key_len(new_cpkp
, keylen
);
2135 memcpy(cpkp_pers_key(new_cpkp
), new_persistent_key
, keylen
);
2137 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
2138 *newclass
= wrapped_key_out
.dp_class
;
2147 static int cpkp_unwrap(cnode_t
*cp
, cp_key_class_t key_class
, cp_key_pair_t
*cpkp
)
2150 uint8_t iv_key
[CP_IV_KEYSIZE
];
2151 cpx_t cpx
= cpkp_cpx(cpkp
);
2153 /* Structures passed between HFS and AKS */
2154 struct aks_cred_s access_in
;
2155 struct aks_wrapped_key_s wrapped_key_in
;
2156 struct aks_raw_key_s key_out
;
2158 cp_init_access(&access_in
, cp
);
2160 bzero(&wrapped_key_in
, sizeof(wrapped_key_in
));
2161 wrapped_key_in
.key
= cpkp_pers_key(cpkp
);
2162 wrapped_key_in
.key_len
= cpkp_max_pers_key_len(cpkp
);
2163 /* Use the persistent class when talking to AKS */
2164 wrapped_key_in
.dp_class
= key_class
;
2166 bzero(&key_out
, sizeof(key_out
));
2167 key_out
.iv_key
= iv_key
;
2168 key_out
.key
= cpx_key(cpx
);
2170 * The unwrapper should validate/set the key length for
2171 * the IV key length and the cache key length, however we need
2172 * to supply the correct buffer length so that AKS knows how
2173 * many bytes it has to work with.
2175 key_out
.iv_key_len
= CP_IV_KEYSIZE
;
2176 key_out
.key_len
= cpx_max_key_len(cpx
);
2178 error
= hfs_unwrap_key(&access_in
, &wrapped_key_in
, &key_out
);
2180 if (key_out
.key_len
== 0 || key_out
.key_len
> CP_MAX_CACHEBUFLEN
) {
2181 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out
.key_len
);
2184 if (key_out
.iv_key_len
!= CP_IV_KEYSIZE
)
2185 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out
.iv_key_len
);
2187 cpx_set_key_len(cpx
, key_out
.key_len
);
2189 cpx_set_aes_iv_key(cpx
, iv_key
);
2190 cpx_set_is_sep_wrapped_key(cpx
, ISSET(key_out
.flags
, AKS_RAW_KEY_WRAPPEDKEY
));
2199 cp_unwrap(__unused
struct hfsmount
*hfsmp
, struct cprotect
*entry
, struct cnode
*cp
)
2202 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2203 * key that is only good as long as the file is open. There is no
2204 * wrapped key, so there isn't anything to unwrap.
2206 if (CP_CLASS(entry
->cp_pclass
) == PROTECTION_CLASS_F
) {
2210 int error
= cpkp_unwrap(cp
, entry
->cp_pclass
, &entry
->cp_keys
);
2212 #if HFS_CONFIG_KEY_ROLL
2213 if (!error
&& entry
->cp_key_roll_ctx
) {
2214 error
= cpkp_unwrap(cp
, entry
->cp_pclass
, &entry
->cp_key_roll_ctx
->ckr_keys
);
2216 cpx_flush(cpkp_cpx(&entry
->cp_keys
));
2226 * Take a cnode that has already been initialized and establish persistent and
2227 * cache keys for it at this time. Note that at the time this is called, the
2228 * directory entry has already been created and we are holding the cnode lock
2232 int cp_generate_keys (struct hfsmount
*hfsmp
, struct cnode
*cp
, cp_key_class_t targetclass
,
2233 uint32_t keyflags
, struct cprotect
**newentry
)
2237 struct cprotect
*newcp
= NULL
;
2240 /* Target class must be an effective class only */
2241 targetclass
= CP_CLASS(targetclass
);
2243 /* Validate that it has a cprotect already */
2244 if (cp
->c_cpentry
== NULL
) {
2245 /* We can't do anything if it shouldn't be protected. */
2249 /* Asserts for the underlying cprotect */
2250 if (cp
->c_cpentry
->cp_flags
& CP_NO_XATTR
) {
2251 /* should already have an xattr by this point. */
2256 if (S_ISREG(cp
->c_mode
)) {
2257 if (!cp_needs_pers_key(cp
->c_cpentry
)) {
2263 cp_key_revision_t key_revision
= cp_initial_key_revision(hfsmp
);
2265 error
= cp_new (&targetclass
, hfsmp
, cp
, cp
->c_mode
, keyflags
, key_revision
,
2266 (cp_new_alloc_fn
)cp_entry_alloc
, (void **)&newcp
);
2269 * Key generation failed. This is not necessarily fatal
2270 * since the device could have transitioned into the lock
2271 * state before we called this.
2277 newcp
->cp_pclass
= targetclass
;
2278 newcp
->cp_key_os_version
= cp_os_version();
2279 newcp
->cp_key_revision
= key_revision
;
2282 * If we got here, then we have a new cprotect.
2283 * Attempt to write the new one out.
2285 error
= cp_setxattr (cp
, newcp
, hfsmp
, cp
->c_fileid
, XATTR_REPLACE
);
2288 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2289 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2291 cp_entry_destroy(hfsmp
, newcp
);
2297 * If we get here then we can assert that:
2298 * 1) generated wrapped/unwrapped keys.
2299 * 2) wrote the new keys to disk.
2300 * 3) cprotect is ready to go.
2310 void cp_replace_entry (hfsmount_t
*hfsmp
, struct cnode
*cp
, struct cprotect
*newentry
)
2312 if (cp
->c_cpentry
) {
2313 #if HFS_CONFIG_KEY_ROLL
2314 // Transfer the tentative reservation
2315 if (cp
->c_cpentry
->cp_key_roll_ctx
&& newentry
->cp_key_roll_ctx
) {
2316 newentry
->cp_key_roll_ctx
->ckr_tentative_reservation
2317 = cp
->c_cpentry
->cp_key_roll_ctx
->ckr_tentative_reservation
;
2319 cp
->c_cpentry
->cp_key_roll_ctx
->ckr_tentative_reservation
= NULL
;
2323 cp_entry_destroy (hfsmp
, cp
->c_cpentry
);
2325 cp
->c_cpentry
= newentry
;
2326 newentry
->cp_backing_cnode
= cp
;
2335 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2336 * allocate a cprotect, and vend it back to the caller.
2338 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2339 * but they do not have keys.
2344 cp_new(cp_key_class_t
*newclass_eff
, __unused
struct hfsmount
*hfsmp
, struct cnode
*cp
,
2345 mode_t cmode
, int32_t keyflags
, cp_key_revision_t key_revision
,
2346 cp_new_alloc_fn alloc_fn
, void **pholder
)
2349 uint8_t new_key
[CP_MAX_CACHEBUFLEN
];
2350 unsigned new_key_len
= CP_MAX_CACHEBUFLEN
; /* AKS tell us the proper key length, how much of this is used */
2351 uint8_t new_persistent_key
[CP_MAX_WRAPPEDKEYSIZE
];
2352 unsigned new_persistent_len
= CP_MAX_WRAPPEDKEYSIZE
;
2353 uint8_t iv_key
[CP_IV_KEYSIZE
];
2354 unsigned iv_key_len
= CP_IV_KEYSIZE
;
2356 cp_key_class_t key_class
= CP_CLASS(*newclass_eff
);
2358 /* Structures passed between HFS and AKS */
2359 struct aks_cred_s access_in
;
2360 struct aks_wrapped_key_s wrapped_key_out
;
2361 struct aks_raw_key_s key_out
;
2363 /* Sanity check that it's a file or directory here */
2364 if (!(S_ISREG(cmode
)) && !(S_ISDIR(cmode
))) {
2369 * Step 1: Generate Keys if needed.
2371 * For class F files, the kernel provides the key.
2372 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2373 * key that is only good as long as the file is open. There is no
2374 * wrapped key, so there isn't anything to wrap.
2376 * For class A->D files, the key store provides the key
2378 * For Directories, we only give them a class ; no keys.
2380 if (S_ISDIR (cmode
)) {
2382 new_persistent_len
= 0;
2388 /* Must be a file */
2389 if (key_class
== PROTECTION_CLASS_F
) {
2390 /* class F files are not wrapped; they can still use the max key size */
2391 new_key_len
= CP_MAX_KEYSIZE
;
2392 read_random (&new_key
[0], new_key_len
);
2393 new_persistent_len
= 0;
2399 * The keystore is provided the file ID so that it can associate
2400 * the wrapped backup blob with this key from userspace. This
2401 * lookup occurs after successful file creation. Beyond this, the
2402 * file ID is not used. Note that there is a potential race here if
2403 * the file ID is re-used.
2405 cp_init_access(&access_in
, cp
);
2407 bzero(&key_out
, sizeof(key_out
));
2408 key_out
.key
= new_key
;
2409 key_out
.iv_key
= iv_key
;
2411 * AKS will override our key length fields, but we need to supply
2412 * the length of the buffer in those length fields so that
2413 * AKS knows hoa many bytes it has to work with.
2415 key_out
.key_len
= new_key_len
;
2416 key_out
.iv_key_len
= iv_key_len
;
2418 bzero(&wrapped_key_out
, sizeof(wrapped_key_out
));
2419 wrapped_key_out
.key
= new_persistent_key
;
2420 wrapped_key_out
.key_len
= new_persistent_len
;
2422 access_in
.key_revision
= key_revision
;
2424 error
= hfs_new_key(&access_in
,
2430 /* keybag returned failure */
2435 /* Now sanity-check the output from new_key */
2436 if (key_out
.key_len
== 0 || key_out
.key_len
> CP_MAX_CACHEBUFLEN
) {
2437 panic ("cp_new: invalid key length! (%ul) \n", key_out
.key_len
);
2440 if (key_out
.iv_key_len
!= CP_IV_KEYSIZE
) {
2441 panic ("cp_new: invalid iv key length! (%ul) \n", key_out
.iv_key_len
);
2445 * AKS is allowed to override our preferences and wrap with a
2446 * different class key for policy reasons. If we were told that
2447 * any class other than the one specified is unacceptable then error out
2448 * if that occurred. Check that the effective class returned by
2449 * AKS is the same as our effective new class
2451 if (CP_CLASS(wrapped_key_out
.dp_class
) != key_class
) {
2452 if (!ISSET(keyflags
, CP_KEYWRAP_DIFFCLASS
)) {
2454 /* TODO: When 12170074 fixed, release/invalidate the key! */
2459 *newclass_eff
= wrapped_key_out
.dp_class
;
2460 new_key_len
= key_out
.key_len
;
2461 iv_key_len
= key_out
.iv_key_len
;
2462 new_persistent_len
= wrapped_key_out
.key_len
;
2464 /* Is the key a SEP wrapped key? */
2465 if (key_out
.flags
& AKS_RAW_KEY_WRAPPEDKEY
) {
2472 * Step 2: allocate cprotect and initialize it.
2475 cp_key_pair_t
*cpkp
;
2476 *pholder
= alloc_fn(NULL
, new_persistent_len
, new_key_len
, &cpkp
);
2477 if (*pholder
== NULL
) {
2481 /* Copy the cache key & IV keys into place if needed. */
2482 if (new_key_len
> 0) {
2483 cpx_t cpx
= cpkp_cpx(cpkp
);
2485 cpx_set_key_len(cpx
, new_key_len
);
2486 memcpy(cpx_key(cpx
), new_key
, new_key_len
);
2488 /* Initialize the IV key */
2489 if (key_class
!= PROTECTION_CLASS_F
)
2490 cpx_set_aes_iv_key(cpx
, iv_key
);
2492 cpx_set_is_sep_wrapped_key(cpx
, iswrapped
);
2494 if (new_persistent_len
> 0) {
2495 cpkp_set_pers_key_len(cpkp
, new_persistent_len
);
2496 memcpy(cpkp_pers_key(cpkp
), new_persistent_key
, new_persistent_len
);
2503 if ((hfsmp
->hfs_cp_verbose
) && (error
== EPERM
)) {
2504 /* Only introspect the data fork */
2505 cp_log_eperm (cp
->c_vp
, *newclass_eff
, true);
2514 /* Initialize the aks_cred_t structure passed to AKS */
2515 static void cp_init_access(aks_cred_t access
, struct cnode
*cp
)
2517 vfs_context_t context
= vfs_context_current();
2518 kauth_cred_t cred
= vfs_context_ucred(context
);
2519 proc_t proc
= vfs_context_proc(context
);
2520 struct hfsmount
*hfsmp
;
2524 bzero(access
, sizeof(*access
));
2531 //leave the struct bzeroed.
2537 hfs_getvoluuid(hfsmp
, hfs_uuid
);
2539 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2540 access
->inode
= cp
->c_fileid
;
2541 access
->pid
= proc_pid(proc
);
2542 access
->uid
= kauth_cred_getuid(cred
);
2543 uuid_copy (access
->volume_uuid
, hfs_uuid
);
2546 access
->key_revision
= cp
->c_cpentry
->cp_key_revision
;
2551 #if HFS_CONFIG_KEY_ROLL
2553 errno_t
cp_set_auto_roll(hfsmount_t
*hfsmp
,
2554 const hfs_key_auto_roll_args_t
*args
)
2556 // 64 bytes should be OK on the stack
2557 _Static_assert(sizeof(struct cp_root_xattr
) < 64, "cp_root_xattr too big!");
2559 struct cp_root_xattr xattr
;
2562 ret
= cp_getrootxattr(hfsmp
, &xattr
);
2566 ret
= hfs_start_transaction(hfsmp
);
2570 xattr
.auto_roll_min_version
= args
->min_key_os_version
;
2571 xattr
.auto_roll_max_version
= args
->max_key_os_version
;
2573 bool roll_old_class_gen
= ISSET(args
->flags
, HFS_KEY_AUTO_ROLL_OLD_CLASS_GENERATION
);
2575 if (roll_old_class_gen
)
2576 SET(xattr
.flags
, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION
);
2578 CLR(xattr
.flags
, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION
);
2580 ret
= cp_setrootxattr(hfsmp
, &xattr
);
2582 errno_t ret2
= hfs_end_transaction(hfsmp
);
2590 hfs_lock_mount(hfsmp
);
2591 hfsmp
->hfs_auto_roll_min_key_os_version
= args
->min_key_os_version
;
2592 hfsmp
->hfs_auto_roll_max_key_os_version
= args
->max_key_os_version
;
2593 hfs_unlock_mount(hfsmp
);
2598 bool cp_should_auto_roll(hfsmount_t
*hfsmp
, cprotect_t cpr
)
2600 if (cpr
->cp_key_roll_ctx
) {
2605 // Only automatically roll class A, B & C
2606 if (CP_CLASS(cpr
->cp_pclass
) < PROTECTION_CLASS_A
2607 || CP_CLASS(cpr
->cp_pclass
) > PROTECTION_CLASS_C
) {
2611 if (!cpkp_has_pers_key(&cpr
->cp_keys
))
2615 * Remember, the class generation stored in HFS+ is updated at the *end*,
2616 * so it's old if it matches the generation we have stored.
2618 if (ISSET(hfsmp
->cproot_flags
, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION
)
2619 && cp_get_crypto_generation(cpr
->cp_pclass
) == hfsmp
->cp_crypto_generation
) {
2623 if (!hfsmp
->hfs_auto_roll_min_key_os_version
2624 && !hfsmp
->hfs_auto_roll_max_key_os_version
) {
2625 // No minimum or maximum set
2629 if (hfsmp
->hfs_auto_roll_min_key_os_version
2630 && cpr
->cp_key_os_version
< hfsmp
->hfs_auto_roll_min_key_os_version
) {
2635 if (hfsmp
->hfs_auto_roll_max_key_os_version
2636 && cpr
->cp_key_os_version
>= hfsmp
->hfs_auto_roll_max_key_os_version
) {
2637 // Greater than maximum
2644 #endif // HFS_CONFIG_KEY_ROLL
2646 errno_t
cp_handle_strategy(buf_t bp
)
2648 vnode_t vp
= buf_vnode(bp
);
2651 if (bufattr_rawencrypted(buf_attr(bp
))
2652 || !(cp
= cp_get_protected_cnode(vp
))
2653 || !cp
->c_cpentry
) {
2659 * For filesystem resize, we may not have access to the underlying
2660 * file's cache key for whatever reason (device may be locked).
2661 * However, we do not need it since we are going to use the
2662 * temporary HFS-wide resize key which is generated once we start
2663 * relocating file content. If this file's I/O should be done
2664 * using the resize key, it will have been supplied already, so do
2665 * not attach the file's cp blob to the buffer.
2667 if (ISSET(cp
->c_cpentry
->cp_flags
, CP_RELOCATION_INFLIGHT
))
2670 #if HFS_CONFIG_KEY_ROLL
2672 * We don't require any locks here. Pages will be locked so no
2673 * key rolling can take place until this I/O has completed.
2675 if (!cp
->c_cpentry
->cp_key_roll_ctx
)
2679 cpx_t cpx
= cpkp_cpx(&cp
->c_cpentry
->cp_keys
);
2681 if (cpx_has_key(cpx
)) {
2682 bufattr_setcpx(buf_attr(bp
), cpx
);
2688 * We rely mostly (see note below) upon the truncate lock to
2689 * protect the CP cache key from getting tossed prior to our IO
2690 * finishing here. Nearly all cluster io calls to manipulate file
2691 * payload from HFS take the truncate lock before calling into the
2692 * cluster layer to ensure the file size does not change, or that
2693 * they have exclusive right to change the EOF of the file. That
2694 * same guarantee protects us here since the code that deals with
2695 * CP lock events must now take the truncate lock before doing
2698 * If you want to change content protection structures, then the
2699 * truncate lock is not sufficient; you must take the truncate
2700 * lock and then wait for outstanding writes to complete. This is
2701 * necessary because asynchronous I/O only holds the truncate lock
2702 * whilst I/O is being queued.
2704 * One exception should be the VM swapfile IO, because HFS will
2705 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call
2706 * for the swapfile code only without holding the truncate lock.
2707 * This is because individual swapfiles are maintained at
2708 * fixed-length sizes by the VM code. In non-swapfile IO we use
2709 * PAGEOUT_V2 semantics which allow us to create our own UPL and
2710 * thus take the truncate lock before calling into the cluster
2711 * layer. In that case, however, we are not concerned with the CP
2712 * blob being wiped out in the middle of the IO because there
2713 * isn't anything to toss; the VM swapfile key stays in-core as
2714 * long as the file is open.
2717 off_rsrc_t off_rsrc
= off_rsrc_make(buf_lblkno(bp
) * GetLogicalBlockSize(vp
),
2719 cp_io_params_t io_params
;
2723 * We want to take the cnode lock here and because the vnode write
2724 * count is a pseudo-lock, we need to do something to preserve
2725 * lock ordering; the cnode lock comes before the write count.
2726 * Ideally, the write count would be incremented after the
2727 * strategy routine returns, but that becomes complicated if the
2728 * strategy routine where to call buf_iodone before returning.
2729 * For now, we drop the write count here and then pick it up again
2732 if (!ISSET(buf_flags(bp
), B_READ
) && !ISSET(buf_flags(bp
), B_RAW
))
2733 vnode_writedone(vp
);
2735 hfs_lock_always(cp
, HFS_SHARED_LOCK
);
2736 cp_io_params(VTOHFS(vp
), cp
->c_cpentry
, off_rsrc
,
2737 ISSET(buf_flags(bp
), B_READ
) ? VNODE_READ
: VNODE_WRITE
,
2742 * Last chance: If this data protected I/O does not have unwrapped
2743 * keys present, then try to get them. We already know that it
2744 * should, by this point.
2746 if (!cpx_has_key(io_params
.cpx
)) {
2747 int io_op
= ( (buf_flags(bp
) & B_READ
) ? CP_READ_ACCESS
: CP_WRITE_ACCESS
);
2748 errno_t error
= cp_handle_vnop(vp
, io_op
, 0);
2751 * We have to be careful here. By this point in the I/O
2752 * path, VM or the cluster engine has prepared a buf_t
2753 * with the proper file offsets and all the rest, so
2754 * simply erroring out will result in us leaking this
2755 * particular buf_t. We need to properly decorate the
2756 * buf_t just as buf_strategy would so as to make it
2757 * appear that the I/O errored out with the particular
2760 if (!ISSET(buf_flags(bp
), B_READ
) && !ISSET(buf_flags(bp
), B_RAW
))
2761 vnode_startwrite(vp
);
2762 buf_seterror (bp
, error
);
2767 hfs_lock_always(cp
, HFS_SHARED_LOCK
);
2768 cp_io_params(VTOHFS(vp
), cp
->c_cpentry
, off_rsrc
,
2769 ISSET(buf_flags(bp
), B_READ
) ? VNODE_READ
: VNODE_WRITE
,
2774 hfs_assert(buf_count(bp
) <= io_params
.max_len
);
2775 bufattr_setcpx(buf_attr(bp
), io_params
.cpx
);
2777 if (!ISSET(buf_flags(bp
), B_READ
) && !ISSET(buf_flags(bp
), B_RAW
))
2778 vnode_startwrite(vp
);
2783 #endif /* CONFIG_PROTECT */