]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_cprotect.c
bc1b2c3bab718a70d492fc05feeb1bc94308650f
[apple/hfs.git] / core / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #if CONFIG_PROTECT
29
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/vnode_if.h>
34 #include <sys/fcntl.h>
35 #include <libkern/OSByteOrder.h>
36 #include <libkern/crypto/sha1.h>
37 #include <sys/proc.h>
38 #include <sys/kauth.h>
39 #include <sys/sysctl.h>
40 #include <sys/ubc.h>
41
42 #include "hfs.h"
43 #include "hfs_cnode.h"
44 #include "hfs_fsctl.h"
45 #include "hfs_cprotect.h"
46 #include "hfs_iokit.h"
47
48 #if HFS_CONFIG_KEY_ROLL
49 #include "hfs_key_roll.h"
50 #endif
51
52 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
53
54 extern int (**hfs_vnodeop_p) (void *);
55
56 /*
57 * CP private functions
58 */
59 static int cp_root_major_vers(mount_t mp);
60 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
61 static void cp_entry_dealloc(hfsmount_t *hfsmp, struct cprotect *entry);
62 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
63 static int cp_lock_vnode_callback(vnode_t, void *);
64 static int cp_vnode_is_eligible (vnode_t);
65 static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop);
66 static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
67 static void cp_init_access(aks_cred_t access, struct cnode *cp);
68
69 // -- cp_key_pair accessors --
70
71 void cpkp_init(cp_key_pair_t *cpkp, uint16_t max_pers_key_len,
72 uint16_t max_cached_key_len)
73 {
74 cpkp->cpkp_max_pers_key_len = max_pers_key_len;
75 cpkp->cpkp_pers_key_len = 0;
76 cpx_init(cpkp_cpx(cpkp), max_cached_key_len);
77
78 // Default to using offsets
79 cpx_set_use_offset_for_iv(cpkp_cpx(cpkp), true);
80 }
81
82 uint16_t cpkp_max_pers_key_len(const cp_key_pair_t *cpkp)
83 {
84 return cpkp->cpkp_max_pers_key_len;
85 }
86
87 uint16_t cpkp_pers_key_len(const cp_key_pair_t *cpkp)
88 {
89 return cpkp->cpkp_pers_key_len;
90 }
91
92 static bool cpkp_has_pers_key(const cp_key_pair_t *cpkp)
93 {
94 return cpkp->cpkp_pers_key_len > 0;
95 }
96
97 static void *cpkp_pers_key(const cp_key_pair_t *cpkp)
98 {
99 return PTR_ADD(void *, &cpkp->cpkp_cpx, cpx_sizex(cpkp_cpx(cpkp)));
100 }
101
102 static void cpkp_set_pers_key_len(cp_key_pair_t *cpkp, uint16_t key_len)
103 {
104 if (key_len > cpkp->cpkp_max_pers_key_len)
105 panic("hfs_cprotect: key too big!");
106 cpkp->cpkp_pers_key_len = key_len;
107 }
108
109 #pragma clang diagnostic push
110 #pragma clang diagnostic ignored "-Wcast-qual"
111 cpx_t cpkp_cpx(const cp_key_pair_t *cpkp)
112 {
113 // Cast to remove const qualifier
114 return (cpx_t)&cpkp->cpkp_cpx;
115 }
116 #pragma clang diagnostic pop
117
118 size_t cpkp_size(uint16_t pers_key_len, uint16_t cached_key_len)
119 {
120 return sizeof(cp_key_pair_t) + pers_key_len + cpx_size(cached_key_len);
121 }
122
123 size_t cpkp_sizex(const cp_key_pair_t *cpkp)
124 {
125 return cpkp_size(cpkp->cpkp_max_pers_key_len, cpx_max_key_len(cpkp_cpx(cpkp)));
126 }
127
128 void cpkp_flush(cp_key_pair_t *cpkp)
129 {
130 cpx_flush(cpkp_cpx(cpkp));
131 cpkp->cpkp_pers_key_len = 0;
132 bzero(cpkp_pers_key(cpkp), cpkp->cpkp_max_pers_key_len);
133 }
134
135 bool cpkp_can_copy(const cp_key_pair_t *src, const cp_key_pair_t *dst)
136 {
137 return (cpkp_pers_key_len(src) <= dst->cpkp_max_pers_key_len
138 && cpx_can_copy(cpkp_cpx(src), cpkp_cpx(dst)));
139 }
140
141 void cpkp_copy(const cp_key_pair_t *src, cp_key_pair_t *dst)
142 {
143 const uint16_t key_len = cpkp_pers_key_len(src);
144 cpkp_set_pers_key_len(dst, key_len);
145 memcpy(cpkp_pers_key(dst), cpkp_pers_key(src), key_len);
146 cpx_copy(cpkp_cpx(src), cpkp_cpx(dst));
147 }
148
149 // --
150
151 bool cp_is_supported_version(uint16_t vers)
152 {
153 return vers == CP_VERS_4 || vers == CP_VERS_5;
154 }
155
156 /*
157 * Return the appropriate key and, if requested, the physical offset and
158 * maximum length for a particular I/O operation.
159 */
160 void cp_io_params(__unused hfsmount_t *hfsmp, cprotect_t cpr,
161 __unused off_rsrc_t off_rsrc,
162 __unused int direction, cp_io_params_t *io_params)
163 {
164 #if HFS_CONFIG_KEY_ROLL
165 hfs_cp_key_roll_ctx_t *ckr = cpr->cp_key_roll_ctx;
166
167 if (ckr && off_rsrc < ckr->ckr_off_rsrc) {
168 /*
169 * When we're in the process of rolling an extent, ckr_off_rsrc will
170 * indicate the end of the extent.
171 */
172 const off_rsrc_t roll_loc = ckr->ckr_off_rsrc
173 - hfs_blk_to_bytes(ckr->ckr_roll_extent.blockCount,
174 hfsmp->blockSize);
175
176 if (off_rsrc < roll_loc) {
177 io_params->max_len = roll_loc - off_rsrc;
178 io_params->phys_offset = -1;
179 } else {
180 /*
181 * We should never get reads to the extent we're rolling
182 * because the pages should be locked in the UBC. If we
183 * did get reads it's not obvious what the right thing to
184 * do is either: we could read from the old location, but
185 * we might have written later data to the new location,
186 * or we could read from the new location, but data might
187 * not have been written there yet.
188 *
189 * Note that whilst raw encrypted reads don't lock any
190 * pages, or take a cluster_read_direct lock, the call to
191 * hfs_key_roll_up_to in hfs_vnop_read will have ensured
192 * that the file has been rolled beyond the offset being
193 * read so this path should never be taken in that case.
194 */
195 hfs_assert(direction == VNODE_WRITE);
196
197 // For release builds, just in case...
198 if (direction == VNODE_READ) {
199 // Use the old key and offset
200 goto old_key;
201 }
202
203 io_params->max_len = ckr->ckr_off_rsrc - off_rsrc;
204 io_params->phys_offset = hfs_blk_to_bytes(ckr->ckr_roll_extent.startBlock,
205 hfsmp->blockSize) + off_rsrc - roll_loc;
206 }
207
208 // Use new key
209 io_params->cpx = cpkp_cpx(&ckr->ckr_keys);
210 return;
211 }
212 old_key:
213 // Use old key...
214 #endif
215
216 io_params->max_len = INT64_MAX;
217 io_params->phys_offset = -1;
218 io_params->cpx = cpkp_cpx(&cpr->cp_keys);
219 }
220
221 static void cp_flush_cached_keys(cprotect_t cpr)
222 {
223 cpx_flush(cpkp_cpx(&cpr->cp_keys));
224 #if HFS_CONFIG_KEY_ROLL
225 if (cpr->cp_key_roll_ctx)
226 cpx_flush(cpkp_cpx(&cpr->cp_key_roll_ctx->ckr_keys));
227 #endif
228 }
229
230 static bool cp_needs_pers_key(cprotect_t cpr)
231 {
232 if (CP_CLASS(cpr->cp_pclass) == PROTECTION_CLASS_F)
233 return !cpx_has_key(cpkp_cpx(&cpr->cp_keys));
234 else
235 return !cpkp_has_pers_key(&cpr->cp_keys);
236 }
237
238 static cp_key_revision_t cp_initial_key_revision(__unused hfsmount_t *hfsmp)
239 {
240 return 1;
241 }
242
243 cp_key_revision_t cp_next_key_revision(cp_key_revision_t rev)
244 {
245 rev = (rev + 0x0100) ^ (mach_absolute_time() & 0xff);
246 if (!rev)
247 rev = 1;
248 return rev;
249 }
250
251 /*
252 * Allocate and initialize a cprotect blob for a new cnode.
253 * Called from hfs_getnewvnode: cnode is locked exclusive.
254 *
255 * Read xattr data off the cnode. Then, if conditions permit,
256 * unwrap the file key and cache it in the cprotect blob.
257 */
258 int
259 cp_entry_init(struct cnode *cp, struct mount *mp)
260 {
261 struct cprotect *entry = NULL;
262 int error = 0;
263 struct hfsmount *hfsmp = VFSTOHFS(mp);
264
265 /*
266 * The cnode should be locked at this point, regardless of whether or not
267 * we are creating a new item in the namespace or vending a vnode on behalf
268 * of lookup. The only time we tell getnewvnode to skip the lock is when
269 * constructing a resource fork vnode. But a resource fork vnode must come
270 * after the regular data fork cnode has already been constructed.
271 */
272 if (!cp_fs_protected (mp)) {
273 cp->c_cpentry = NULL;
274 return 0;
275 }
276
277 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
278 cp->c_cpentry = NULL;
279 return 0;
280 }
281
282 if (hfsmp->hfs_running_cp_major_vers == 0) {
283 panic ("hfs cp: no running mount point version! ");
284 }
285
286 hfs_assert(cp->c_cpentry == NULL);
287
288 error = cp_getxattr(cp, hfsmp, &entry);
289 if (error == ENOATTR) {
290 /*
291 * Normally, we should always have a CP EA for a file or directory that
292 * we are initializing here. However, there are some extenuating circumstances,
293 * such as the root directory immediately following a newfs_hfs.
294 *
295 * As a result, we leave code here to deal with an ENOATTR which will always
296 * default to a 'D/NONE' key, though we don't expect to use it much.
297 */
298 cp_key_class_t target_class = PROTECTION_CLASS_D;
299
300 if (S_ISDIR(cp->c_mode)) {
301 target_class = PROTECTION_CLASS_DIR_NONE;
302 }
303
304 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
305
306 /* allow keybag to override our class preferences */
307 error = cp_new (&target_class, hfsmp, cp, cp->c_mode, CP_KEYWRAP_DIFFCLASS,
308 key_revision, (cp_new_alloc_fn)cp_entry_alloc, (void **)&entry);
309 if (error == 0) {
310 entry->cp_pclass = target_class;
311 entry->cp_key_os_version = cp_os_version();
312 entry->cp_key_revision = key_revision;
313 error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
314 }
315 }
316
317 /*
318 * Bail out if:
319 * a) error was not ENOATTR (we got something bad from the getxattr call)
320 * b) we encountered an error setting the xattr above.
321 * c) we failed to generate a new cprotect data structure.
322 */
323 if (error) {
324 goto out;
325 }
326
327 cp->c_cpentry = entry;
328
329 out:
330 if (error == 0) {
331 entry->cp_backing_cnode = cp;
332 }
333 else {
334 if (entry) {
335 cp_entry_destroy(hfsmp, entry);
336 }
337 cp->c_cpentry = NULL;
338 }
339
340 return error;
341 }
342
343 /*
344 * cp_setup_newentry
345 *
346 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
347 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
348 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
349 * and the file/directory is established, then we can ask it to generate keys. Note that
350 * this introduces a potential race; If the device is locked and the wrapping
351 * keys are purged between the time we call this function and the time we ask it to generate
352 * keys for us, we could have to fail the open(2) call and back out the entry.
353 */
354
355 int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp,
356 cp_key_class_t suppliedclass, mode_t cmode,
357 struct cprotect **tmpentry)
358 {
359 int isdir = 0;
360 struct cprotect *entry = NULL;
361 uint32_t target_class = hfsmp->default_cp_class;
362 suppliedclass = CP_CLASS(suppliedclass);
363
364 if (hfsmp->hfs_running_cp_major_vers == 0) {
365 panic ("CP: major vers not set in mount!");
366 }
367
368 if (S_ISDIR (cmode)) {
369 isdir = 1;
370 }
371
372 /* Decide the target class. Input argument takes priority. */
373 if (cp_is_valid_class (isdir, suppliedclass)) {
374 /* caller supplies -1 if it was not specified so we will default to the mount point value */
375 target_class = suppliedclass;
376 /*
377 * One exception, F is never valid for a directory
378 * because its children may inherit and userland will be
379 * unable to read/write to the files.
380 */
381 if (isdir) {
382 if (target_class == PROTECTION_CLASS_F) {
383 *tmpentry = NULL;
384 return EINVAL;
385 }
386 }
387 }
388 else {
389 /*
390 * If no valid class was supplied, behave differently depending on whether or not
391 * the item being created is a file or directory.
392 *
393 * for FILE:
394 * If parent directory has a non-zero class, use that.
395 * If parent directory has a zero class (not set), then attempt to
396 * apply the mount point default.
397 *
398 * for DIRECTORY:
399 * Directories always inherit from the parent; if the parent
400 * has a NONE class set, then we can continue to use that.
401 */
402 if ((dcp) && (dcp->c_cpentry)) {
403 uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass);
404 /* If the parent class is not valid, default to the mount point value */
405 if (cp_is_valid_class(1, parentclass)) {
406 if (isdir) {
407 target_class = parentclass;
408 }
409 else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
410 /* files can inherit so long as it's not NONE */
411 target_class = parentclass;
412 }
413 }
414 /* Otherwise, we already defaulted to the mount point's default */
415 }
416 }
417
418 /* Generate the cprotect to vend out */
419 entry = cp_entry_alloc(NULL, 0, 0, NULL);
420 if (entry == NULL) {
421 *tmpentry = NULL;
422 return ENOMEM;
423 }
424
425 /*
426 * We don't have keys yet, so fill in what we can. At this point
427 * this blob has no keys and it has no backing xattr. We just know the
428 * target class.
429 */
430 entry->cp_flags = CP_NO_XATTR;
431 /* Note this is only the effective class */
432 entry->cp_pclass = target_class;
433 *tmpentry = entry;
434
435 return 0;
436 }
437
438 /*
439 * Set up an initial key/class pair for a disassociated cprotect entry.
440 * This function is used to generate transient keys that will never be
441 * written to disk. We use class F for this since it provides the exact
442 * semantics that are needed here. Because we never attach this blob to
443 * a cnode directly, we take a pointer to the cprotect struct.
444 *
445 * This function is primarily used in the HFS FS truncation codepath
446 * where we may rely on AES symmetry to relocate encrypted data from
447 * one spot in the disk to another.
448 */
449 int cpx_gentempkeys(cpx_t *pcpx, __unused struct hfsmount *hfsmp)
450 {
451 cpx_t cpx = cpx_alloc(CP_MAX_KEYSIZE);
452
453 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
454 read_random(cpx_key(cpx), CP_MAX_KEYSIZE);
455 cpx_set_use_offset_for_iv(cpx, true);
456
457 *pcpx = cpx;
458
459 return 0;
460 }
461
462 /*
463 * Tear down and clear a cprotect blob for a closing file.
464 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
465 */
466 void
467 cp_entry_destroy(hfsmount_t *hfsmp, struct cprotect *entry_ptr)
468 {
469 if (entry_ptr == NULL) {
470 /* nothing to clean up */
471 return;
472 }
473 cp_entry_dealloc(hfsmp, entry_ptr);
474 }
475
476
477 int
478 cp_fs_protected (mount_t mnt)
479 {
480 return (vfs_flags(mnt) & MNT_CPROTECT);
481 }
482
483
484 /*
485 * Return a pointer to underlying cnode if there is one for this vnode.
486 * Done without taking cnode lock, inspecting only vnode state.
487 */
488 struct cnode *
489 cp_get_protected_cnode(struct vnode *vp)
490 {
491 if (!cp_vnode_is_eligible(vp)) {
492 return NULL;
493 }
494
495 if (!cp_fs_protected(VTOVFS(vp))) {
496 /* mount point doesn't support it */
497 return NULL;
498 }
499
500 return vnode_fsnode(vp);
501 }
502
503
504 /*
505 * Sets *class to persistent class associated with vnode,
506 * or returns error.
507 */
508 int
509 cp_vnode_getclass(struct vnode *vp, cp_key_class_t *class)
510 {
511 struct cprotect *entry;
512 int error = 0;
513 struct cnode *cp;
514 int took_truncate_lock = 0;
515 struct hfsmount *hfsmp = NULL;
516
517 /* Is this an interesting vp? */
518 if (!cp_vnode_is_eligible (vp)) {
519 return EBADF;
520 }
521
522 /* Is the mount point formatted for content protection? */
523 if (!cp_fs_protected(VTOVFS(vp))) {
524 return ENOTSUP;
525 }
526
527 cp = VTOC(vp);
528 hfsmp = VTOHFS(vp);
529
530 /*
531 * Take the truncate lock up-front in shared mode because we may need
532 * to manipulate the CP blob. Pend lock events until we're done here.
533 */
534 hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
535 took_truncate_lock = 1;
536
537 /*
538 * We take only the shared cnode lock up-front. If it turns out that
539 * we need to manipulate the CP blob to write a key out, drop the
540 * shared cnode lock and acquire an exclusive lock.
541 */
542 error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
543 if (error) {
544 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
545 return error;
546 }
547
548 /* pull the class from the live entry */
549 entry = cp->c_cpentry;
550
551 if (entry == NULL) {
552 panic("Content Protection: uninitialized cnode %p", cp);
553 }
554
555 /* Note that we may not have keys yet, but we know the target class. */
556
557 if (error == 0) {
558 *class = CP_CLASS(entry->cp_pclass);
559 }
560
561 if (took_truncate_lock) {
562 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
563 }
564
565 hfs_unlock(cp);
566 return error;
567 }
568
569 /*
570 * Sets persistent class for this file or directory.
571 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
572 * If the new class can't be accessed now, EPERM.
573 * Otherwise, record class and re-wrap key if the mount point is content-protected.
574 */
575 int
576 cp_vnode_setclass(struct vnode *vp, cp_key_class_t newclass)
577 {
578 struct cnode *cp;
579 struct cprotect *entry = 0;
580 int error = 0;
581 int took_truncate_lock = 0;
582 struct hfsmount *hfsmp = NULL;
583 int isdir = 0;
584
585 if (vnode_isdir (vp)) {
586 isdir = 1;
587 }
588
589 /* Ensure we only use the effective class here */
590 newclass = CP_CLASS(newclass);
591
592 if (!cp_is_valid_class(isdir, newclass)) {
593 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
594 return EINVAL;
595 }
596
597 /* Is this an interesting vp? */
598 if (!cp_vnode_is_eligible(vp)) {
599 return EBADF;
600 }
601
602 /* Is the mount point formatted for content protection? */
603 if (!cp_fs_protected(VTOVFS(vp))) {
604 return ENOTSUP;
605 }
606
607 hfsmp = VTOHFS(vp);
608 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
609 return EROFS;
610 }
611
612 /*
613 * Take the cnode truncate lock exclusive because we want to manipulate the
614 * CP blob. The lock-event handling code is doing the same. This also forces
615 * all pending IOs to drain before we can re-write the persistent and cache keys.
616 */
617 cp = VTOC(vp);
618 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
619 took_truncate_lock = 1;
620
621 /*
622 * The truncate lock is not sufficient to guarantee the CP blob
623 * isn't being used. We must wait for existing writes to finish.
624 */
625 vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass");
626
627 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
628 return EINVAL;
629 }
630
631 entry = cp->c_cpentry;
632 if (entry == NULL) {
633 error = EINVAL;
634 goto out;
635 }
636
637 /*
638 * re-wrap per-file key with new class.
639 * Generate an entirely new key if switching to F.
640 */
641 if (vnode_isreg(vp)) {
642 /*
643 * The vnode is a file. Before proceeding with the re-wrap, we need
644 * to unwrap the keys before proceeding. This is to ensure that
645 * the destination class's properties still work appropriately for the
646 * target class (since B allows I/O but an unwrap prior to the next unlock
647 * will not be allowed).
648 */
649 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
650 error = cp_restore_keys (entry, hfsmp, cp);
651 if (error) {
652 goto out;
653 }
654 }
655
656 if (newclass == PROTECTION_CLASS_F) {
657 /* Verify that file is blockless if switching to class F */
658 if (cp->c_datafork->ff_size > 0) {
659 error = EINVAL;
660 goto out;
661 }
662
663 cp_key_pair_t *cpkp;
664 cprotect_t new_entry = cp_entry_alloc(NULL, 0, CP_MAX_KEYSIZE, &cpkp);
665
666 if (!new_entry) {
667 error = ENOMEM;
668 goto out;
669 }
670
671 /* newclass is only the effective class */
672 new_entry->cp_pclass = newclass;
673 new_entry->cp_key_os_version = cp_os_version();
674 new_entry->cp_key_revision = cp_next_key_revision(entry->cp_key_revision);
675
676 cpx_t cpx = cpkp_cpx(cpkp);
677
678 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
679 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
680 read_random (cpx_key(cpx), CP_MAX_KEYSIZE);
681
682 cp_replace_entry(hfsmp, cp, new_entry);
683
684 error = 0;
685 goto out;
686 }
687
688 /* Deny the setclass if file is to be moved from F to something else */
689 if (entry->cp_pclass == PROTECTION_CLASS_F) {
690 error = EPERM;
691 goto out;
692 }
693
694 if (!cpkp_has_pers_key(&entry->cp_keys)) {
695 struct cprotect *new_entry = NULL;
696 /*
697 * We want to fail if we can't wrap to the target class. By not setting
698 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
699 * to 'newclass' then error out.
700 */
701 uint32_t flags = 0;
702 error = cp_generate_keys (hfsmp, cp, newclass, flags, &new_entry);
703 if (error == 0) {
704 cp_replace_entry (hfsmp, cp, new_entry);
705 }
706 /* Bypass the setxattr code below since generate_keys does it for us */
707 goto out;
708 }
709
710 cprotect_t new_entry;
711 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_keys, entry,
712 (cp_new_alloc_fn)cp_entry_alloc, (void **)&new_entry);
713 if (error) {
714 /* we didn't have perms to set this class. leave file as-is and error out */
715 goto out;
716 }
717
718 #if HFS_CONFIG_KEY_ROLL
719 hfs_cp_key_roll_ctx_t *new_key_roll_ctx = NULL;
720 if (entry->cp_key_roll_ctx) {
721 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_key_roll_ctx->ckr_keys,
722 entry->cp_key_roll_ctx,
723 (cp_new_alloc_fn)hfs_key_roll_ctx_alloc,
724 (void **)&new_key_roll_ctx);
725
726 if (error) {
727 cp_entry_dealloc(hfsmp, new_entry);
728 goto out;
729 }
730
731 new_entry->cp_key_roll_ctx = new_key_roll_ctx;
732 }
733 #endif
734
735 new_entry->cp_pclass = newclass;
736
737 cp_replace_entry(hfsmp, cp, new_entry);
738 entry = new_entry;
739 }
740 else if (vnode_isdir(vp)) {
741 /* For directories, just update the pclass. newclass is only effective class */
742 entry->cp_pclass = newclass;
743 error = 0;
744 }
745 else {
746 /* anything else, just error out */
747 error = EINVAL;
748 goto out;
749 }
750
751 /*
752 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
753 * existed. If the keys were never generated, then they'll skip the setxattr calls.
754 */
755
756 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
757 if (error == ENOATTR) {
758 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
759 }
760
761 out:
762
763 if (took_truncate_lock) {
764 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
765 }
766 hfs_unlock(cp);
767 return error;
768 }
769
770
771 int cp_vnode_transcode(vnode_t vp, cp_key_t *k)
772 {
773 struct cnode *cp;
774 struct cprotect *entry = 0;
775 int error = 0;
776 int took_truncate_lock = 0;
777 struct hfsmount *hfsmp = NULL;
778
779 /* Structures passed between HFS and AKS */
780 struct aks_cred_s access_in;
781 struct aks_wrapped_key_s wrapped_key_in, wrapped_key_out;
782
783 /* Is this an interesting vp? */
784 if (!cp_vnode_is_eligible(vp)) {
785 return EBADF;
786 }
787
788 /* Is the mount point formatted for content protection? */
789 if (!cp_fs_protected(VTOVFS(vp))) {
790 return ENOTSUP;
791 }
792
793 cp = VTOC(vp);
794 hfsmp = VTOHFS(vp);
795
796 /*
797 * Take the cnode truncate lock exclusive because we want to manipulate the
798 * CP blob. The lock-event handling code is doing the same. This also forces
799 * all pending IOs to drain before we can re-write the persistent and cache keys.
800 */
801 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
802 took_truncate_lock = 1;
803
804 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
805 return EINVAL;
806 }
807
808 entry = cp->c_cpentry;
809 if (entry == NULL) {
810 error = EINVAL;
811 goto out;
812 }
813
814 /* Send the per-file key in wrapped form for re-wrap with the current class information
815 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
816 * Don't need to process any outputs, so just clear the locks and pass along the error. */
817 if (vnode_isreg(vp)) {
818
819 /* Picked up the following from cp_wrap().
820 * If needed, more comments available there. */
821
822 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
823 error = EINVAL;
824 goto out;
825 }
826
827 cp_init_access(&access_in, cp);
828
829 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
830 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
831
832 cp_key_pair_t *cpkp = &entry->cp_keys;
833
834 #if HFS_CONFIG_KEY_ROLL
835 if (entry->cp_key_roll_ctx)
836 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
837 #endif
838
839 wrapped_key_in.key = cpkp_pers_key(cpkp);
840 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
841
842 if (!wrapped_key_in.key_len) {
843 error = EINVAL;
844 goto out;
845 }
846
847 /* Use the actual persistent class when talking to AKS */
848 wrapped_key_in.dp_class = entry->cp_pclass;
849 wrapped_key_out.key = k->key;
850 wrapped_key_out.key_len = k->len;
851
852 error = hfs_backup_key(&access_in,
853 &wrapped_key_in,
854 &wrapped_key_out);
855
856 if(error)
857 error = EPERM;
858 else
859 k->len = wrapped_key_out.key_len;
860 }
861
862 out:
863 if (took_truncate_lock) {
864 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
865 }
866 hfs_unlock(cp);
867 return error;
868 }
869
870
871 /*
872 * Check permission for the given operation (read, write) on this node.
873 * Additionally, if the node needs work, do it:
874 * - create a new key for the file if one hasn't been set before
875 * - write out the xattr if it hasn't already been saved
876 * - unwrap the key if needed
877 *
878 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
879 *
880 * Note that this function does *NOT* take the cnode truncate lock. This is because
881 * the thread calling us may already have the truncate lock. It is not necessary
882 * because either we successfully finish this function before the keys are tossed
883 * and the IO will fail, or the keys are tossed and then this function will fail.
884 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
885 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
886 */
887 int
888 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
889 {
890 struct cprotect *entry;
891 int error = 0;
892 struct hfsmount *hfsmp = NULL;
893 struct cnode *cp = NULL;
894
895 /*
896 * First, do validation against the vnode before proceeding any further:
897 * Is this vnode originating from a valid content-protected filesystem ?
898 */
899 if (cp_vnode_is_eligible(vp) == 0) {
900 /*
901 * It is either not HFS or not a file/dir. Just return success. This is a valid
902 * case if servicing i/o against another filesystem type from VFS
903 */
904 return 0;
905 }
906
907 if (cp_fs_protected (VTOVFS(vp)) == 0) {
908 /*
909 * The underlying filesystem does not support content protection. This is also
910 * a valid case. Simply return success.
911 */
912 return 0;
913 }
914
915 /*
916 * At this point, we know we have a HFS vnode that backs a file or directory on a
917 * filesystem that supports content protection
918 */
919 cp = VTOC(vp);
920
921 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
922 return error;
923 }
924
925 entry = cp->c_cpentry;
926
927 if (entry == NULL) {
928 /*
929 * If this cnode is not content protected, simply return success.
930 * Note that this function is called by all I/O-based call sites
931 * when CONFIG_PROTECT is enabled during XNU building.
932 */
933
934 /*
935 * All files should have cprotect structs. It's possible to encounter
936 * a directory from a V2.0 CP system but all files should have protection
937 * EAs
938 */
939 if (vnode_isreg(vp)) {
940 error = EPERM;
941 }
942
943 goto out;
944 }
945
946 vp = CTOV(cp, 0);
947 if (vp == NULL) {
948 /* is it a rsrc */
949 vp = CTOV(cp,1);
950 if (vp == NULL) {
951 error = EINVAL;
952 goto out;
953 }
954 }
955 hfsmp = VTOHFS(vp);
956
957 if ((error = cp_check_access(cp, hfsmp, vnop))) {
958 /* check for raw encrypted access before bailing out */
959 if ((ioflag & IO_ENCRYPTED)
960 #if HFS_CONFIG_KEY_ROLL
961 // If we're rolling, we need the keys
962 && !hfs_is_key_rolling(cp)
963 #endif
964 && (vnop == CP_READ_ACCESS)) {
965 /*
966 * read access only + asking for the raw encrypted bytes
967 * is legitimate, so reset the error value to 0
968 */
969 error = 0;
970 }
971 else {
972 goto out;
973 }
974 }
975
976 if (!ISSET(entry->cp_flags, CP_NO_XATTR)) {
977 if (!S_ISREG(cp->c_mode))
978 goto out;
979
980 // If we have a persistent key and the cached key, we're done
981 if (!cp_needs_pers_key(entry)
982 && cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
983 goto out;
984 }
985 }
986
987 /* upgrade to exclusive lock */
988 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
989 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
990 return error;
991 }
992 } else {
993 cp->c_lockowner = current_thread();
994 }
995
996 /* generate new keys if none have ever been saved */
997 if (cp_needs_pers_key(entry)) {
998 struct cprotect *newentry = NULL;
999 /*
1000 * It's ok if this ends up being wrapped in a different class than 'pclass'.
1001 * class modification is OK here.
1002 */
1003 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1004
1005 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1006 if (error == 0) {
1007 cp_replace_entry (hfsmp, cp, newentry);
1008 entry = newentry;
1009 }
1010 else {
1011 goto out;
1012 }
1013 }
1014
1015 /* unwrap keys if needed */
1016 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1017 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
1018 /* no need to try to restore keys; they are not going to be used */
1019 error = 0;
1020 }
1021 else {
1022 error = cp_restore_keys(entry, hfsmp, cp);
1023 if (error) {
1024 goto out;
1025 }
1026 }
1027 }
1028
1029 /* write out the xattr if it's new */
1030 if (entry->cp_flags & CP_NO_XATTR)
1031 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
1032
1033 out:
1034
1035 hfs_unlock(cp);
1036 return error;
1037 }
1038
1039 #if HFS_TMPDBG
1040 #if !SECURE_KERNEL
1041 static void cp_log_eperm (struct vnode* vp, int pclass, boolean_t create) {
1042 char procname[256] = {};
1043 const char *fname = "unknown";
1044 const char *dbgop = "open";
1045
1046 int ppid = proc_selfpid();
1047 /* selfname does a strlcpy so we're OK */
1048 proc_selfname(procname, sizeof(procname));
1049 if (vp && vp->v_name) {
1050 /* steal from the namecache */
1051 fname = vp->v_name;
1052 }
1053
1054 if (create) {
1055 dbgop = "create";
1056 }
1057
1058 printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname, ppid, pclass, dbgop, fname);
1059 }
1060 #endif
1061 #endif
1062
1063
1064 int
1065 cp_handle_open(struct vnode *vp, int mode)
1066 {
1067 struct cnode *cp = NULL ;
1068 struct cprotect *entry = NULL;
1069 struct hfsmount *hfsmp;
1070 int error = 0;
1071
1072 /* If vnode not eligible, just return success */
1073 if (!cp_vnode_is_eligible(vp)) {
1074 return 0;
1075 }
1076
1077 /* If mount point not properly set up, then also return success */
1078 if (!cp_fs_protected(VTOVFS(vp))) {
1079 return 0;
1080 }
1081
1082 cp = VTOC(vp);
1083
1084 // Allow if raw encrypted mode requested
1085 if (ISSET(mode, FENCRYPTED)) {
1086 #if HFS_CONFIG_KEY_ROLL
1087 // If we're rolling, we need the keys
1088 hfs_lock_always(cp, HFS_SHARED_LOCK);
1089 bool rolling = hfs_is_key_rolling(cp);
1090 hfs_unlock(cp);
1091 if (!rolling)
1092 return 0;
1093 #else
1094 return 0;
1095 #endif
1096 }
1097 if (ISSET(mode, FUNENCRYPTED)) {
1098 return 0;
1099 }
1100
1101 /* We know the vnode is in a valid state. Acquire cnode and validate */
1102 hfsmp = VTOHFS(vp);
1103
1104 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1105 return error;
1106 }
1107
1108 entry = cp->c_cpentry;
1109 if (entry == NULL) {
1110 /*
1111 * If the mount is protected and we couldn't get a cprotect for this vnode,
1112 * then it's not valid for opening.
1113 */
1114 if (vnode_isreg(vp)) {
1115 error = EPERM;
1116 }
1117 goto out;
1118 }
1119
1120 if (!S_ISREG(cp->c_mode))
1121 goto out;
1122
1123 /*
1124 * Does the cnode have keys yet? If not, then generate them.
1125 */
1126 if (cp_needs_pers_key(entry)) {
1127 struct cprotect *newentry = NULL;
1128 /* Allow the keybag to override our class preferences */
1129 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1130 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1131 if (error == 0) {
1132 cp_replace_entry (hfsmp, cp, newentry);
1133 entry = newentry;
1134 }
1135 else {
1136 goto out;
1137 }
1138 }
1139
1140 /*
1141 * We want to minimize the number of unwraps that we'll have to do since
1142 * the cost can vary, depending on the platform we're running.
1143 */
1144 switch (CP_CLASS(entry->cp_pclass)) {
1145 case PROTECTION_CLASS_B:
1146 if (mode & O_CREAT) {
1147 /*
1148 * Class B always allows creation. Since O_CREAT was passed through
1149 * we infer that this was a newly created vnode/cnode. Even though a potential
1150 * race exists when multiple threads attempt to create/open a particular
1151 * file, only one can "win" and actually create it. VFS will unset the
1152 * O_CREAT bit on the loser.
1153 *
1154 * Note that skipping the unwrap check here is not a security issue --
1155 * we have to unwrap the key permanently upon the first I/O.
1156 */
1157 break;
1158 }
1159
1160 if (cpx_has_key(cpkp_cpx(&entry->cp_keys)) && !ISSET(mode, FENCRYPTED)) {
1161 /*
1162 * For a class B file, attempt the unwrap if we have the key in
1163 * core already.
1164 * The device could have just transitioned into the lock state, and
1165 * this vnode may not yet have been purged from the vnode cache (which would
1166 * remove the keys).
1167 */
1168 struct aks_cred_s access_in;
1169 struct aks_wrapped_key_s wrapped_key_in;
1170
1171 cp_init_access(&access_in, cp);
1172 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1173 wrapped_key_in.key = cpkp_pers_key(&entry->cp_keys);
1174 wrapped_key_in.key_len = cpkp_pers_key_len(&entry->cp_keys);
1175 /* Use the persistent class when talking to AKS */
1176 wrapped_key_in.dp_class = entry->cp_pclass;
1177 error = hfs_unwrap_key(&access_in, &wrapped_key_in, NULL);
1178 if (error) {
1179 error = EPERM;
1180 }
1181 break;
1182 }
1183 /* otherwise, fall through to attempt the unwrap/restore */
1184 case PROTECTION_CLASS_A:
1185 case PROTECTION_CLASS_C:
1186 /*
1187 * At this point, we know that we need to attempt an unwrap if needed; we want
1188 * to makes sure that open(2) fails properly if the device is either just-locked
1189 * or never made it past first unlock. Since the keybag serializes access to the
1190 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1191 * we will get here in two cases:
1192 *
1193 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1194 * purged, the vnode will get flushed if needed.
1195 *
1196 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1197 *
1198 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1199 * we can always attempt the restore.
1200 */
1201 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1202 error = cp_restore_keys(entry, hfsmp, cp);
1203 }
1204
1205 if (error) {
1206 error = EPERM;
1207 }
1208
1209 break;
1210
1211 case PROTECTION_CLASS_D:
1212 default:
1213 break;
1214 }
1215
1216 out:
1217
1218 #if HFS_TMPDBG
1219 #if !SECURE_KERNEL
1220 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
1221 cp_log_eperm (vp, CP_CLASS(entry->cp_pclass), false);
1222 }
1223 #endif
1224 #endif
1225
1226 hfs_unlock(cp);
1227 return error;
1228 }
1229
1230
1231 /*
1232 * cp_getrootxattr:
1233 * Gets the EA we set on the root folder (fileid 1) to get information about the
1234 * version of Content Protection that was used to write to this filesystem.
1235 * Note that all multi-byte fields are written to disk little endian so they must be
1236 * converted to native endian-ness as needed.
1237 */
1238 int
1239 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
1240 {
1241 void *buf;
1242
1243 /*
1244 * We allow for an extra 64 bytes to cater for upgrades. This wouldn't
1245 * be necessary if the xattr routines just returned what we asked for.
1246 */
1247 size_t bufsize = roundup(sizeof(struct cp_root_xattr) + 64, 64);
1248
1249 int error = 0;
1250
1251 hfs_assert(outxattr);
1252
1253 buf = hfs_malloc(bufsize);
1254
1255 uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
1256
1257 uio_addiov(uio, CAST_USER_ADDR_T(buf), bufsize);
1258
1259 size_t attrsize = bufsize;
1260
1261 struct vnop_getxattr_args args = {
1262 .a_uio = uio,
1263 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1264 .a_size = &attrsize
1265 };
1266
1267 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
1268
1269 uio_free(uio);
1270
1271 if (error != 0) {
1272 goto out;
1273 }
1274
1275 if (attrsize < CP_ROOT_XATTR_MIN_LEN) {
1276 error = HFS_EINCONSISTENT;
1277 goto out;
1278 }
1279
1280 const struct cp_root_xattr *xattr = buf;
1281
1282 bzero(outxattr, sizeof(*outxattr));
1283
1284 /* Now convert the multi-byte fields to native endianness */
1285 outxattr->major_version = OSSwapLittleToHostInt16(xattr->major_version);
1286 outxattr->minor_version = OSSwapLittleToHostInt16(xattr->minor_version);
1287 outxattr->flags = OSSwapLittleToHostInt64(xattr->flags);
1288
1289 if (outxattr->major_version >= CP_VERS_5) {
1290 if (attrsize < sizeof(struct cp_root_xattr)) {
1291 error = HFS_EINCONSISTENT;
1292 goto out;
1293 }
1294 #if HFS_CONFIG_KEY_ROLL
1295 outxattr->auto_roll_min_version = OSSwapLittleToHostInt32(xattr->auto_roll_min_version);
1296 outxattr->auto_roll_max_version = OSSwapLittleToHostInt32(xattr->auto_roll_max_version);
1297 #endif
1298 }
1299
1300 out:
1301 hfs_free(buf, bufsize);
1302 return error;
1303 }
1304
1305 /*
1306 * cp_setrootxattr:
1307 * Sets the EA we set on the root folder (fileid 1) to get information about the
1308 * version of Content Protection that was used to write to this filesystem.
1309 * Note that all multi-byte fields are written to disk little endian so they must be
1310 * converted to little endian as needed.
1311 *
1312 * This will be written to the disk when it detects the EA is not there, or when we need
1313 * to make a modification to the on-disk version that can be done in-place.
1314 */
1315 int
1316 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
1317 {
1318 int error = 0;
1319 struct vnop_setxattr_args args;
1320
1321 args.a_desc = NULL;
1322 args.a_vp = NULL;
1323 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1324 args.a_uio = NULL; //pass data ptr instead
1325 args.a_options = 0;
1326 args.a_context = NULL; //no context needed, only done from mount.
1327
1328 const uint64_t flags = newxattr->flags;
1329
1330 /* Now convert the multi-byte fields to little endian before writing to disk. */
1331 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
1332
1333 int xattr_size = sizeof(struct cp_root_xattr);
1334
1335 #if HFS_CONFIG_KEY_ROLL
1336 bool upgraded = false;
1337
1338 if (newxattr->auto_roll_min_version || newxattr->auto_roll_max_version) {
1339 if (newxattr->major_version < CP_VERS_5) {
1340 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1341
1342 newxattr->major_version = CP_CURRENT_VERS;
1343 newxattr->minor_version = CP_MINOR_VERS;
1344
1345 upgraded = true;
1346 }
1347
1348 newxattr->auto_roll_min_version = OSSwapHostToLittleInt32(newxattr->auto_roll_min_version);
1349 newxattr->auto_roll_max_version = OSSwapHostToLittleInt32(newxattr->auto_roll_max_version);
1350 } else if (newxattr->major_version == CP_VERS_4)
1351 xattr_size = offsetof(struct cp_root_xattr, auto_roll_min_version);
1352 #endif
1353
1354 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
1355 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
1356
1357 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
1358 xattr_size, &args, hfsmp, 1);
1359
1360 if (!error) {
1361 hfsmp->cproot_flags = flags;
1362 #if HFS_CONFIG_KEY_ROLL
1363 if (upgraded)
1364 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1365 #endif
1366 }
1367
1368 return error;
1369 }
1370
1371
1372 /*
1373 * Stores new xattr data on the cnode.
1374 * cnode lock held exclusive (if available).
1375 *
1376 * This function is also invoked during file creation.
1377 */
1378 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp,
1379 uint32_t fileid, int options)
1380 {
1381 int error = 0;
1382 cp_key_pair_t *cpkp = &entry->cp_keys;
1383 #if HFS_CONFIG_KEY_ROLL
1384 bool rolling = entry->cp_key_roll_ctx != NULL;
1385
1386 if (rolling && entry->cp_key_roll_ctx->ckr_off_rsrc == INT64_MAX) {
1387 // We've finished rolling, but we still have the context
1388 rolling = false;
1389 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
1390 }
1391 #endif
1392
1393 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
1394 return EROFS;
1395 }
1396
1397 if (hfsmp->hfs_running_cp_major_vers < CP_CURRENT_VERS) {
1398 // Upgrade
1399 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1400
1401 struct cp_root_xattr root_xattr;
1402
1403 error = cp_getrootxattr(hfsmp, &root_xattr);
1404 if (error)
1405 return error;
1406
1407 root_xattr.major_version = CP_CURRENT_VERS;
1408 root_xattr.minor_version = CP_MINOR_VERS;
1409
1410 error = cp_setrootxattr(hfsmp, &root_xattr);
1411 if (error)
1412 return error;
1413
1414 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1415 }
1416
1417 struct cp_xattr_v5 *xattr;
1418 xattr = hfs_malloc(sizeof(*xattr));
1419
1420 xattr->xattr_major_version = OSSwapHostToLittleConstInt16(CP_VERS_5);
1421 xattr->xattr_minor_version = OSSwapHostToLittleConstInt16(CP_MINOR_VERS);
1422 xattr->flags = 0;
1423 #if HFS_CONFIG_KEY_ROLL
1424 if (rolling)
1425 xattr->flags |= CP_XAF_KEY_ROLLING;
1426 #endif
1427 xattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1428 xattr->key_os_version = OSSwapHostToLittleInt32(entry->cp_key_os_version);
1429 xattr->key_revision = OSSwapHostToLittleInt16(entry->cp_key_revision);
1430
1431 uint16_t key_len = cpkp_pers_key_len(cpkp);
1432 xattr->key_len = OSSwapHostToLittleInt16(key_len);
1433 memcpy(xattr->persistent_key, cpkp_pers_key(cpkp), key_len);
1434
1435 size_t xattr_len = offsetof(struct cp_xattr_v5, persistent_key) + key_len;
1436
1437 #if HFS_CONFIG_KEY_ROLL
1438 if (rolling) {
1439 struct cp_roll_info *roll_info = PTR_ADD(struct cp_roll_info *, xattr, xattr_len);
1440
1441 roll_info->off_rsrc = OSSwapHostToLittleInt64(entry->cp_key_roll_ctx->ckr_off_rsrc);
1442
1443 key_len = cpkp_pers_key_len(&entry->cp_key_roll_ctx->ckr_keys);
1444 roll_info->key_len = OSSwapHostToLittleInt16(key_len);
1445
1446 memcpy(roll_info->key, cpkp_pers_key(&entry->cp_key_roll_ctx->ckr_keys), key_len);
1447
1448 xattr_len += offsetof(struct cp_roll_info, key) + key_len;
1449 }
1450 #endif
1451
1452 struct vnop_setxattr_args args = {
1453 .a_vp = cp ? cp->c_vp : NULL,
1454 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1455 .a_options = options,
1456 .a_context = vfs_context_current(),
1457 };
1458
1459 error = hfs_setxattr_internal(cp, xattr, xattr_len, &args, hfsmp, fileid);
1460
1461 hfs_free(xattr, sizeof(*xattr));
1462
1463 if (error == 0 ) {
1464 entry->cp_flags &= ~CP_NO_XATTR;
1465 }
1466
1467 return error;
1468 }
1469
1470 /*
1471 * Used by an fcntl to query the underlying FS for its content protection version #
1472 */
1473
1474 int
1475 cp_get_root_major_vers(vnode_t vp, uint32_t *level)
1476 {
1477 int err = 0;
1478 struct hfsmount *hfsmp = NULL;
1479 struct mount *mp = NULL;
1480
1481 mp = VTOVFS(vp);
1482
1483 /* check if it supports content protection */
1484 if (cp_fs_protected(mp) == 0) {
1485 return ENOTSUP;
1486 }
1487
1488 hfsmp = VFSTOHFS(mp);
1489 /* figure out the level */
1490
1491 err = cp_root_major_vers(mp);
1492
1493 if (err == 0) {
1494 *level = hfsmp->hfs_running_cp_major_vers;
1495 }
1496 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1497
1498 return err;
1499 }
1500
1501 /* Used by fcntl to query default protection level of FS */
1502 int cp_get_default_level (struct vnode *vp, uint32_t *level) {
1503 int err = 0;
1504 struct hfsmount *hfsmp = NULL;
1505 struct mount *mp = NULL;
1506
1507 mp = VTOVFS(vp);
1508
1509 /* check if it supports content protection */
1510 if (cp_fs_protected(mp) == 0) {
1511 return ENOTSUP;
1512 }
1513
1514 hfsmp = VFSTOHFS(mp);
1515 /* figure out the default */
1516
1517 *level = hfsmp->default_cp_class;
1518 return err;
1519 }
1520
1521 /********************
1522 * Private Functions
1523 *******************/
1524
1525 static int
1526 cp_root_major_vers(mount_t mp)
1527 {
1528 int err = 0;
1529 struct cp_root_xattr xattr;
1530 struct hfsmount *hfsmp = NULL;
1531
1532 hfsmp = vfs_fsprivate(mp);
1533 err = cp_getrootxattr (hfsmp, &xattr);
1534
1535 if (err == 0) {
1536 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1537 }
1538 else {
1539 return EINVAL;
1540 }
1541
1542 return 0;
1543 }
1544
1545 static int
1546 cp_vnode_is_eligible(struct vnode *vp)
1547 {
1548 return !vnode_issystem(vp) && (vnode_isreg(vp) || vnode_isdir(vp));
1549 }
1550
1551 #if DEBUG
1552 static const uint32_t cp_magic1 = 0x7b727063; // cpr{
1553 static const uint32_t cp_magic2 = 0x7270637d; // }cpr
1554 #endif
1555
1556 struct cprotect *
1557 cp_entry_alloc(cprotect_t old, uint16_t pers_key_len,
1558 uint16_t cached_key_len, cp_key_pair_t **pcpkp)
1559 {
1560 struct cprotect *cp_entry;
1561
1562 if (pers_key_len > CP_MAX_WRAPPEDKEYSIZE)
1563 return (NULL);
1564
1565 size_t size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1566 + cpkp_size(pers_key_len, cached_key_len));
1567
1568 #if DEBUG
1569 size += 4; // Extra for magic2
1570 #endif
1571
1572 cp_entry = hfs_malloc(size);
1573
1574 if (old) {
1575 memcpy(cp_entry, old, offsetof(struct cprotect, cp_keys));
1576
1577 #if HFS_CONFIG_KEY_ROLL
1578 // We don't copy the key roll context
1579 cp_entry->cp_key_roll_ctx = NULL;
1580 #endif
1581 } else {
1582 bzero(cp_entry, offsetof(struct cprotect, cp_keys));
1583 }
1584
1585 #if DEBUG
1586 cp_entry->cp_magic1 = cp_magic1;
1587 *PTR_ADD(uint32_t *, cp_entry, size - 4) = cp_magic2;
1588 #endif
1589
1590 cpkp_init(&cp_entry->cp_keys, pers_key_len, cached_key_len);
1591
1592 /*
1593 * If we've been passed the old entry, then we are in the process of
1594 * rewrapping in which case we need to copy the cached key. This is
1595 * important for class B files when the device is locked because we
1596 * won't be able to unwrap whilst in this state, yet we still need the
1597 * unwrapped key.
1598 */
1599 if (old)
1600 cpx_copy(cpkp_cpx(&old->cp_keys), cpkp_cpx(&cp_entry->cp_keys));
1601
1602 if (pcpkp)
1603 *pcpkp = &cp_entry->cp_keys;
1604
1605 return cp_entry;
1606 }
1607
1608 static void
1609 cp_entry_dealloc(__unused hfsmount_t *hfsmp, struct cprotect *entry)
1610 {
1611 #if HFS_CONFIG_KEY_ROLL
1612 hfs_release_key_roll_ctx(hfsmp, entry);
1613 #endif
1614
1615 cpkp_flush(&entry->cp_keys);
1616
1617 size_t entry_size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1618 + cpkp_sizex(&entry->cp_keys));
1619
1620 #if DEBUG
1621 hfs_assert(entry->cp_magic1 == cp_magic1);
1622 hfs_assert(*PTR_ADD(uint32_t *, entry, (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1623 + cpkp_sizex(&entry->cp_keys) == cp_magic2)));
1624
1625 entry_size += 4; // Extra for magic2
1626 #endif
1627
1628 hfs_free(entry, entry_size);
1629 }
1630
1631 static int cp_read_xattr_v4(__unused hfsmount_t *hfsmp, struct cp_xattr_v4 *xattr,
1632 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1633 {
1634 /* Endian swap the multi-byte fields into host endianness from L.E. */
1635 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1636 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1637 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1638 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1639 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1640 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1641
1642 /*
1643 * Prevent a buffer overflow, and validate the key length obtained from the
1644 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1645 * point.
1646 */
1647 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE)
1648 return HFS_EINCONSISTENT;
1649
1650 size_t min_len = offsetof(struct cp_xattr_v4, persistent_key) + xattr->key_size;
1651 if (xattr_len < min_len)
1652 return HFS_EINCONSISTENT;
1653
1654 /*
1655 * Class F files have no backing key; their keylength should be 0,
1656 * though they should have the proper flags set.
1657 *
1658 * A request to instantiate a CP for a class F file should result
1659 * in a bzero'd cp that just says class F, with key_flushed set.
1660 */
1661 if (CP_CLASS(xattr->persistent_class) == PROTECTION_CLASS_F
1662 || ISSET(xattr->flags, CP_XAF_NEEDS_KEYS)) {
1663 xattr->key_size = 0;
1664 }
1665
1666 /* set up entry with information from xattr */
1667 cp_key_pair_t *cpkp;
1668 cprotect_t entry;
1669
1670 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1671 /* caller passed in a pre-allocated structure to get the basic info */
1672 entry = *pcpr;
1673 bzero(entry, offsetof(struct cprotect, cp_keys));
1674 }
1675 else {
1676 entry = cp_entry_alloc(NULL, xattr->key_size, CP_MAX_CACHEBUFLEN, &cpkp);
1677 }
1678
1679 entry->cp_pclass = xattr->persistent_class;
1680 entry->cp_key_os_version = xattr->key_os_version;
1681
1682
1683 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1684 if (xattr->key_size) {
1685 cpkp_set_pers_key_len(cpkp, xattr->key_size);
1686 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_size);
1687 }
1688
1689 *pcpr = entry;
1690 }
1691 else if (xattr->key_size) {
1692 SET(entry->cp_flags, CP_HAS_A_KEY);
1693 }
1694
1695 return 0;
1696 }
1697
1698 int cp_read_xattr_v5(hfsmount_t *hfsmp, struct cp_xattr_v5 *xattr,
1699 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1700 {
1701 if (xattr->xattr_major_version == OSSwapHostToLittleConstInt16(CP_VERS_4)) {
1702 return cp_read_xattr_v4(hfsmp, (struct cp_xattr_v4 *)xattr, xattr_len, pcpr, options);
1703 }
1704
1705 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1706
1707 if (xattr->xattr_major_version != CP_VERS_5) {
1708 printf("hfs: cp_getxattr: unsupported xattr version %d\n",
1709 xattr->xattr_major_version);
1710 return ENOTSUP;
1711 }
1712
1713 size_t min_len = offsetof(struct cp_xattr_v5, persistent_key);
1714
1715 if (xattr_len < min_len)
1716 return HFS_EINCONSISTENT;
1717
1718 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1719 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1720 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1721 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1722 xattr->key_revision = OSSwapLittleToHostInt16(xattr->key_revision);
1723 xattr->key_len = OSSwapLittleToHostInt16(xattr->key_len);
1724
1725 uint16_t pers_key_len = xattr->key_len;
1726
1727 min_len += pers_key_len;
1728 if (xattr_len < min_len)
1729 return HFS_EINCONSISTENT;
1730
1731 #if HFS_CONFIG_KEY_ROLL
1732 struct cp_roll_info *roll_info = NULL;
1733
1734 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1735 roll_info = PTR_ADD(struct cp_roll_info *, xattr, min_len);
1736
1737 min_len += offsetof(struct cp_roll_info, key);
1738
1739 if (xattr_len < min_len)
1740 return HFS_EINCONSISTENT;
1741
1742 roll_info->off_rsrc = OSSwapLittleToHostInt64(roll_info->off_rsrc);
1743
1744 if (roll_info->off_rsrc % hfsmp->blockSize)
1745 return HFS_EINCONSISTENT;
1746
1747 roll_info->key_len = OSSwapLittleToHostInt16(roll_info->key_len);
1748
1749 min_len += roll_info->key_len;
1750 if (xattr_len < min_len)
1751 return HFS_EINCONSISTENT;
1752 }
1753 #endif
1754
1755 cp_key_pair_t *cpkp;
1756 cprotect_t entry;
1757
1758 /*
1759 * If option CP_GET_XATTR_BASIC_INFO is set, we only return basic
1760 * information about the file's protection (and not the key) and
1761 * we store the result in the structure the caller passed to us.
1762 */
1763 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1764 entry = *pcpr;
1765 bzero(entry, offsetof(struct cprotect, cp_keys));
1766 #if HFS_CONFIG_KEY_ROLL
1767 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1768 SET(entry->cp_flags, CP_KEY_IS_ROLLING);
1769 }
1770 #endif
1771 } else {
1772 entry = cp_entry_alloc(NULL, xattr->key_len, CP_MAX_CACHEBUFLEN, &cpkp);
1773 }
1774
1775 entry->cp_pclass = xattr->persistent_class;
1776 entry->cp_key_os_version = xattr->key_os_version;
1777 entry->cp_key_revision = xattr->key_revision;
1778
1779 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1780 if (xattr->key_len) {
1781 cpkp_set_pers_key_len(cpkp, xattr->key_len);
1782 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_len);
1783 }
1784
1785 #if HFS_CONFIG_KEY_ROLL
1786 if (roll_info) {
1787 entry->cp_key_roll_ctx = hfs_key_roll_ctx_alloc(NULL, roll_info->key_len,
1788 CP_MAX_CACHEBUFLEN, &cpkp);
1789
1790 entry->cp_key_roll_ctx->ckr_off_rsrc = roll_info->off_rsrc;
1791
1792 if (roll_info->key_len) {
1793 cpkp_set_pers_key_len(cpkp, roll_info->key_len);
1794 memcpy(cpkp_pers_key(cpkp), roll_info->key, roll_info->key_len);
1795 }
1796 }
1797 #endif
1798
1799 *pcpr = entry;
1800 }
1801 else if (xattr->key_len) {
1802 SET(entry->cp_flags, CP_HAS_A_KEY);
1803 }
1804
1805 return 0;
1806 }
1807
1808 /*
1809 * Initializes a new cprotect entry with xattr data from the cnode.
1810 * cnode lock held shared
1811 */
1812 static int
1813 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, cprotect_t *outentry)
1814 {
1815 size_t xattr_len;
1816 struct cp_xattr_v5 *xattr;
1817
1818 xattr = hfs_malloc(xattr_len = sizeof(*xattr));
1819
1820 int error = hfs_xattr_read(cp->c_vp, CONTENT_PROTECTION_XATTR_NAME,
1821 xattr, &xattr_len);
1822
1823 if (!error) {
1824 if (xattr_len < CP_XATTR_MIN_LEN)
1825 error = HFS_EINCONSISTENT;
1826 else
1827 error = cp_read_xattr_v5(hfsmp, xattr, xattr_len, outentry, 0);
1828 }
1829
1830 #if DEBUG
1831 if (error && error != ENOATTR) {
1832 printf("cp_getxattr: bad cp xattr (%d):\n", error);
1833 for (size_t i = 0; i < xattr_len; ++i)
1834 printf("%02x ", ((uint8_t *)xattr)[i]);
1835 printf("\n");
1836 }
1837 #endif
1838
1839 hfs_free(xattr, sizeof(*xattr));
1840
1841 return error;
1842 }
1843
1844 /*
1845 * If permitted, restore entry's unwrapped key from the persistent key.
1846 * If not, clear key and set CP_KEY_FLUSHED.
1847 * cnode lock held exclusive
1848 */
1849 static int
1850 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
1851 {
1852 int error = 0;
1853
1854 error = cp_unwrap(hfsmp, entry, cp);
1855 if (error) {
1856 cp_flush_cached_keys(entry);
1857 error = EPERM;
1858 }
1859 return error;
1860 }
1861
1862 void cp_device_locked_callback(mount_t mp, cp_lock_state_t state)
1863 {
1864 struct hfsmount *hfsmp;
1865
1866 /*
1867 * When iterating the various mount points that may
1868 * be present on a content-protected device, we need to skip
1869 * those that do not have it enabled.
1870 */
1871 if (!cp_fs_protected(mp)) {
1872 return;
1873 }
1874
1875 hfsmp = VFSTOHFS(mp);
1876
1877 hfsmp->hfs_cp_lock_state = state;
1878
1879 if (state == CP_LOCKED_STATE) {
1880 /*
1881 * We respond only to lock events. Since cprotect structs
1882 * decrypt/restore keys lazily, the unlock events don't
1883 * actually cause anything to happen.
1884 */
1885 vnode_iterate(mp, 0, cp_lock_vnode_callback, (void *)(uintptr_t)state);
1886 }
1887 }
1888
1889 /*
1890 * Deny access to protected files if keys have been locked.
1891 */
1892 static int
1893 cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused)
1894 {
1895 int error = 0;
1896
1897 /*
1898 * For now it's OK to examine the state variable here without
1899 * holding the HFS lock. This is only a short-circuit; if the state
1900 * transitions (or is in transition) after we examine this field, we'd
1901 * have to handle that anyway.
1902 */
1903 if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) {
1904 return 0;
1905 }
1906
1907 if (!cp->c_cpentry) {
1908 /* unprotected node */
1909 return 0;
1910 }
1911
1912 if (!S_ISREG(cp->c_mode)) {
1913 return 0;
1914 }
1915
1916 /* Deny all access for class A files */
1917 switch (CP_CLASS(cp->c_cpentry->cp_pclass)) {
1918 case PROTECTION_CLASS_A: {
1919 error = EPERM;
1920 break;
1921 }
1922 default:
1923 error = 0;
1924 break;
1925 }
1926
1927 return error;
1928 }
1929
1930 /*
1931 * Respond to a lock or unlock event.
1932 * On lock: clear out keys from memory, then flush file contents.
1933 * On unlock: nothing (function not called).
1934 */
1935 static int
1936 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1937 {
1938 cnode_t *cp = NULL;
1939 struct cprotect *entry = NULL;
1940 int error = 0;
1941 int locked = 1;
1942 unsigned long action = 0;
1943 int took_truncate_lock = 0;
1944
1945 error = vnode_getwithref (vp);
1946 if (error) {
1947 return error;
1948 }
1949
1950 cp = VTOC(vp);
1951
1952 /*
1953 * When cleaning cnodes due to a lock event, we must
1954 * take the truncate lock AND the cnode lock. By taking
1955 * the truncate lock here, we force (nearly) all pending IOs
1956 * to drain before we can acquire the truncate lock. All HFS cluster
1957 * io calls except for swapfile IO need to acquire the truncate lock
1958 * prior to calling into the cluster layer.
1959 */
1960 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1961 took_truncate_lock = 1;
1962
1963 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1964
1965 entry = cp->c_cpentry;
1966 if (!entry) {
1967 /* unprotected vnode: not a regular file */
1968 goto out;
1969 }
1970
1971 action = (unsigned long) arg;
1972 switch (action) {
1973 case CP_LOCKED_STATE: {
1974 vfs_context_t ctx;
1975 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A ||
1976 vnode_isdir(vp)) {
1977 /*
1978 * There is no change at lock for other classes than A.
1979 * B is kept in memory for writing, and class F (for VM) does
1980 * not have a wrapped key, so there is no work needed for
1981 * wrapping/unwrapping.
1982 *
1983 * Note that 'class F' is relevant here because if
1984 * hfs_vnop_strategy does not take the cnode lock
1985 * to protect the cp blob across IO operations, we rely
1986 * implicitly on the truncate lock to be held when doing IO.
1987 * The only case where the truncate lock is not held is during
1988 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1989 * directly to cluster_pageout.
1990 */
1991 goto out;
1992 }
1993
1994 /* Before doing anything else, zero-fill sparse ranges as needed */
1995 ctx = vfs_context_current();
1996 (void) hfs_filedone (vp, ctx, 0);
1997
1998 /* first, sync back dirty pages */
1999 hfs_unlock (cp);
2000 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2001 hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2002
2003 /* flush keys:
2004 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
2005 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
2006 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
2007 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
2008 * Also verified that the cached data in IOFS is overwritten by other data, and there
2009 * is no key leakage in that layer.
2010 */
2011
2012 cp_flush_cached_keys(entry);
2013
2014 /* some write may have arrived in the mean time. dump those pages */
2015 hfs_unlock(cp);
2016 locked = 0;
2017
2018 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
2019 break;
2020 }
2021 case CP_UNLOCKED_STATE: {
2022 /* no-op */
2023 break;
2024 }
2025 default:
2026 panic("Content Protection: unknown lock action %lu\n", action);
2027 }
2028
2029 out:
2030 if (locked) {
2031 hfs_unlock(cp);
2032 }
2033
2034 if (took_truncate_lock) {
2035 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
2036 }
2037
2038 vnode_put (vp);
2039 return error;
2040 }
2041
2042
2043 /*
2044 * cp_rewrap:
2045 *
2046 * Generate a new wrapped key based on the existing cache key.
2047 */
2048
2049 int
2050 cp_rewrap(struct cnode *cp, __unused hfsmount_t *hfsmp,
2051 cp_key_class_t *newclass, cp_key_pair_t *cpkp, const void *old_holder,
2052 cp_new_alloc_fn alloc_fn, void **pholder)
2053 {
2054 struct cprotect *entry = cp->c_cpentry;
2055
2056 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2057 unsigned keylen = CP_MAX_WRAPPEDKEYSIZE;
2058 int error = 0;
2059 const cp_key_class_t key_class = CP_CLASS(*newclass);
2060
2061 /* Structures passed between HFS and AKS */
2062 struct aks_cred_s access_in;
2063 struct aks_wrapped_key_s wrapped_key_in;
2064 struct aks_wrapped_key_s wrapped_key_out;
2065
2066 /*
2067 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2068 * key that is only good as long as the file is open. There is no
2069 * wrapped key, so there isn't anything to wrap.
2070 */
2071 if (key_class == PROTECTION_CLASS_F) {
2072 return EINVAL;
2073 }
2074
2075 cp_init_access(&access_in, cp);
2076
2077 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2078 wrapped_key_in.key = cpkp_pers_key(cpkp);
2079 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
2080 /* Use the persistent class when talking to AKS */
2081 wrapped_key_in.dp_class = entry->cp_pclass;
2082
2083 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2084 wrapped_key_out.key = new_persistent_key;
2085 wrapped_key_out.key_len = keylen;
2086
2087 /*
2088 * inode is passed here to find the backup bag wrapped blob
2089 * from userspace. This lookup will occur shortly after creation
2090 * and only if the file still exists. Beyond this lookup the
2091 * inode is not used. Technically there is a race, we practically
2092 * don't lose.
2093 */
2094 error = hfs_rewrap_key(&access_in,
2095 key_class, /* new class */
2096 &wrapped_key_in,
2097 &wrapped_key_out);
2098
2099 keylen = wrapped_key_out.key_len;
2100
2101 if (error == 0) {
2102 /*
2103 * Verify that AKS returned to us a wrapped key of the
2104 * target class requested.
2105 */
2106 /* Get the effective class here */
2107 cp_key_class_t effective = CP_CLASS(wrapped_key_out.dp_class);
2108 if (effective != key_class) {
2109 /*
2110 * Fail the operation if defaults or some other enforcement
2111 * dictated that the class be wrapped differently.
2112 */
2113
2114 /* TODO: Invalidate the key when 12170074 unblocked */
2115 return EPERM;
2116 }
2117
2118 /* Allocate a new cpentry */
2119 cp_key_pair_t *new_cpkp;
2120 *pholder = alloc_fn(old_holder, keylen, CP_MAX_CACHEBUFLEN, &new_cpkp);
2121
2122 /* copy the new key into the entry */
2123 cpkp_set_pers_key_len(new_cpkp, keylen);
2124 memcpy(cpkp_pers_key(new_cpkp), new_persistent_key, keylen);
2125
2126 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
2127 *newclass = wrapped_key_out.dp_class;
2128 }
2129 else {
2130 error = EPERM;
2131 }
2132
2133 return error;
2134 }
2135
2136 static int cpkp_unwrap(cnode_t *cp, cp_key_class_t key_class, cp_key_pair_t *cpkp)
2137 {
2138 int error = 0;
2139 uint8_t iv_key[CP_IV_KEYSIZE];
2140 cpx_t cpx = cpkp_cpx(cpkp);
2141
2142 /* Structures passed between HFS and AKS */
2143 struct aks_cred_s access_in;
2144 struct aks_wrapped_key_s wrapped_key_in;
2145 struct aks_raw_key_s key_out;
2146
2147 cp_init_access(&access_in, cp);
2148
2149 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2150 wrapped_key_in.key = cpkp_pers_key(cpkp);
2151 wrapped_key_in.key_len = cpkp_max_pers_key_len(cpkp);
2152 /* Use the persistent class when talking to AKS */
2153 wrapped_key_in.dp_class = key_class;
2154
2155 bzero(&key_out, sizeof(key_out));
2156 key_out.iv_key = iv_key;
2157 key_out.key = cpx_key(cpx);
2158 /*
2159 * The unwrapper should validate/set the key length for
2160 * the IV key length and the cache key length, however we need
2161 * to supply the correct buffer length so that AKS knows how
2162 * many bytes it has to work with.
2163 */
2164 key_out.iv_key_len = CP_IV_KEYSIZE;
2165 key_out.key_len = cpx_max_key_len(cpx);
2166
2167 error = hfs_unwrap_key(&access_in, &wrapped_key_in, &key_out);
2168 if (!error) {
2169 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2170 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len);
2171 }
2172
2173 if (key_out.iv_key_len != CP_IV_KEYSIZE)
2174 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len);
2175
2176 cpx_set_key_len(cpx, key_out.key_len);
2177
2178 cpx_set_aes_iv_key(cpx, iv_key);
2179 cpx_set_is_sep_wrapped_key(cpx, ISSET(key_out.flags, AKS_RAW_KEY_WRAPPEDKEY));
2180 } else {
2181 error = EPERM;
2182 }
2183
2184 return error;
2185 }
2186
2187 static int
2188 cp_unwrap(__unused struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
2189 {
2190 /*
2191 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2192 * key that is only good as long as the file is open. There is no
2193 * wrapped key, so there isn't anything to unwrap.
2194 */
2195 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
2196 return EPERM;
2197 }
2198
2199 int error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_keys);
2200
2201 #if HFS_CONFIG_KEY_ROLL
2202 if (!error && entry->cp_key_roll_ctx) {
2203 error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_key_roll_ctx->ckr_keys);
2204 if (error)
2205 cpx_flush(cpkp_cpx(&entry->cp_keys));
2206 }
2207 #endif
2208
2209 return error;
2210 }
2211
2212 /*
2213 * cp_generate_keys
2214 *
2215 * Take a cnode that has already been initialized and establish persistent and
2216 * cache keys for it at this time. Note that at the time this is called, the
2217 * directory entry has already been created and we are holding the cnode lock
2218 * on 'cp'.
2219 *
2220 */
2221 int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, cp_key_class_t targetclass,
2222 uint32_t keyflags, struct cprotect **newentry)
2223 {
2224
2225 int error = 0;
2226 struct cprotect *newcp = NULL;
2227 *newentry = NULL;
2228
2229 /* Target class must be an effective class only */
2230 targetclass = CP_CLASS(targetclass);
2231
2232 /* Validate that it has a cprotect already */
2233 if (cp->c_cpentry == NULL) {
2234 /* We can't do anything if it shouldn't be protected. */
2235 return 0;
2236 }
2237
2238 /* Asserts for the underlying cprotect */
2239 if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
2240 /* should already have an xattr by this point. */
2241 error = EINVAL;
2242 goto out;
2243 }
2244
2245 if (S_ISREG(cp->c_mode)) {
2246 if (!cp_needs_pers_key(cp->c_cpentry)) {
2247 error = EINVAL;
2248 goto out;
2249 }
2250 }
2251
2252 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
2253
2254 error = cp_new (&targetclass, hfsmp, cp, cp->c_mode, keyflags, key_revision,
2255 (cp_new_alloc_fn)cp_entry_alloc, (void **)&newcp);
2256 if (error) {
2257 /*
2258 * Key generation failed. This is not necessarily fatal
2259 * since the device could have transitioned into the lock
2260 * state before we called this.
2261 */
2262 error = EPERM;
2263 goto out;
2264 }
2265
2266 newcp->cp_pclass = targetclass;
2267 newcp->cp_key_os_version = cp_os_version();
2268 newcp->cp_key_revision = key_revision;
2269
2270 /*
2271 * If we got here, then we have a new cprotect.
2272 * Attempt to write the new one out.
2273 */
2274 error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
2275
2276 if (error) {
2277 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2278 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2279 if (newcp) {
2280 cp_entry_destroy(hfsmp, newcp);
2281 }
2282 goto out;
2283 }
2284
2285 /*
2286 * If we get here then we can assert that:
2287 * 1) generated wrapped/unwrapped keys.
2288 * 2) wrote the new keys to disk.
2289 * 3) cprotect is ready to go.
2290 */
2291
2292 *newentry = newcp;
2293
2294 out:
2295 return error;
2296
2297 }
2298
2299 void cp_replace_entry (hfsmount_t *hfsmp, struct cnode *cp, struct cprotect *newentry)
2300 {
2301 if (cp->c_cpentry) {
2302 #if HFS_CONFIG_KEY_ROLL
2303 // Transfer the tentative reservation
2304 if (cp->c_cpentry->cp_key_roll_ctx && newentry->cp_key_roll_ctx) {
2305 newentry->cp_key_roll_ctx->ckr_tentative_reservation
2306 = cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation;
2307
2308 cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation = NULL;
2309 }
2310 #endif
2311
2312 cp_entry_destroy (hfsmp, cp->c_cpentry);
2313 }
2314 cp->c_cpentry = newentry;
2315 newentry->cp_backing_cnode = cp;
2316
2317 return;
2318 }
2319
2320
2321 /*
2322 * cp_new
2323 *
2324 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2325 * allocate a cprotect, and vend it back to the caller.
2326 *
2327 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2328 * but they do not have keys.
2329 *
2330 */
2331
2332 int
2333 cp_new(cp_key_class_t *newclass_eff, __unused struct hfsmount *hfsmp, struct cnode *cp,
2334 mode_t cmode, int32_t keyflags, cp_key_revision_t key_revision,
2335 cp_new_alloc_fn alloc_fn, void **pholder)
2336 {
2337 int error = 0;
2338 uint8_t new_key[CP_MAX_CACHEBUFLEN];
2339 unsigned new_key_len = CP_MAX_CACHEBUFLEN; /* AKS tell us the proper key length, how much of this is used */
2340 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2341 unsigned new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
2342 uint8_t iv_key[CP_IV_KEYSIZE];
2343 unsigned iv_key_len = CP_IV_KEYSIZE;
2344 int iswrapped = 0;
2345 cp_key_class_t key_class = CP_CLASS(*newclass_eff);
2346
2347 /* Structures passed between HFS and AKS */
2348 struct aks_cred_s access_in;
2349 struct aks_wrapped_key_s wrapped_key_out;
2350 struct aks_raw_key_s key_out;
2351
2352 /* Sanity check that it's a file or directory here */
2353 if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) {
2354 return EPERM;
2355 }
2356
2357 /*
2358 * Step 1: Generate Keys if needed.
2359 *
2360 * For class F files, the kernel provides the key.
2361 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2362 * key that is only good as long as the file is open. There is no
2363 * wrapped key, so there isn't anything to wrap.
2364 *
2365 * For class A->D files, the key store provides the key
2366 *
2367 * For Directories, we only give them a class ; no keys.
2368 */
2369 if (S_ISDIR (cmode)) {
2370 /* Directories */
2371 new_persistent_len = 0;
2372 new_key_len = 0;
2373
2374 error = 0;
2375 }
2376 else {
2377 /* Must be a file */
2378 if (key_class == PROTECTION_CLASS_F) {
2379 /* class F files are not wrapped; they can still use the max key size */
2380 new_key_len = CP_MAX_KEYSIZE;
2381 read_random (&new_key[0], new_key_len);
2382 new_persistent_len = 0;
2383
2384 error = 0;
2385 }
2386 else {
2387 /*
2388 * The keystore is provided the file ID so that it can associate
2389 * the wrapped backup blob with this key from userspace. This
2390 * lookup occurs after successful file creation. Beyond this, the
2391 * file ID is not used. Note that there is a potential race here if
2392 * the file ID is re-used.
2393 */
2394 cp_init_access(&access_in, cp);
2395
2396 bzero(&key_out, sizeof(key_out));
2397 key_out.key = new_key;
2398 key_out.iv_key = iv_key;
2399 /*
2400 * AKS will override our key length fields, but we need to supply
2401 * the length of the buffer in those length fields so that
2402 * AKS knows hoa many bytes it has to work with.
2403 */
2404 key_out.key_len = new_key_len;
2405 key_out.iv_key_len = iv_key_len;
2406
2407 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2408 wrapped_key_out.key = new_persistent_key;
2409 wrapped_key_out.key_len = new_persistent_len;
2410
2411 access_in.key_revision = key_revision;
2412
2413 error = hfs_new_key(&access_in,
2414 key_class,
2415 &key_out,
2416 &wrapped_key_out);
2417
2418 if (error) {
2419 /* keybag returned failure */
2420 error = EPERM;
2421 goto cpnew_fail;
2422 }
2423
2424 /* Now sanity-check the output from new_key */
2425 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2426 panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len);
2427 }
2428
2429 if (key_out.iv_key_len != CP_IV_KEYSIZE) {
2430 panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len);
2431 }
2432
2433 /*
2434 * AKS is allowed to override our preferences and wrap with a
2435 * different class key for policy reasons. If we were told that
2436 * any class other than the one specified is unacceptable then error out
2437 * if that occurred. Check that the effective class returned by
2438 * AKS is the same as our effective new class
2439 */
2440 if (CP_CLASS(wrapped_key_out.dp_class) != key_class) {
2441 if (!ISSET(keyflags, CP_KEYWRAP_DIFFCLASS)) {
2442 error = EPERM;
2443 /* TODO: When 12170074 fixed, release/invalidate the key! */
2444 goto cpnew_fail;
2445 }
2446 }
2447
2448 *newclass_eff = wrapped_key_out.dp_class;
2449 new_key_len = key_out.key_len;
2450 iv_key_len = key_out.iv_key_len;
2451 new_persistent_len = wrapped_key_out.key_len;
2452
2453 /* Is the key a SEP wrapped key? */
2454 if (key_out.flags & AKS_RAW_KEY_WRAPPEDKEY) {
2455 iswrapped = 1;
2456 }
2457 }
2458 }
2459
2460 /*
2461 * Step 2: allocate cprotect and initialize it.
2462 */
2463
2464 cp_key_pair_t *cpkp;
2465 *pholder = alloc_fn(NULL, new_persistent_len, new_key_len, &cpkp);
2466 if (*pholder == NULL) {
2467 return ENOMEM;
2468 }
2469
2470 /* Copy the cache key & IV keys into place if needed. */
2471 if (new_key_len > 0) {
2472 cpx_t cpx = cpkp_cpx(cpkp);
2473
2474 cpx_set_key_len(cpx, new_key_len);
2475 memcpy(cpx_key(cpx), new_key, new_key_len);
2476
2477 /* Initialize the IV key */
2478 if (key_class != PROTECTION_CLASS_F)
2479 cpx_set_aes_iv_key(cpx, iv_key);
2480
2481 cpx_set_is_sep_wrapped_key(cpx, iswrapped);
2482 }
2483 if (new_persistent_len > 0) {
2484 cpkp_set_pers_key_len(cpkp, new_persistent_len);
2485 memcpy(cpkp_pers_key(cpkp), new_persistent_key, new_persistent_len);
2486 }
2487
2488 cpnew_fail:
2489
2490 #if HFS_TMPDBG
2491 #if !SECURE_KERNEL
2492 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
2493 /* Only introspect the data fork */
2494 cp_log_eperm (cp->c_vp, *newclass_eff, true);
2495 }
2496 #endif
2497 #endif
2498
2499 return error;
2500 }
2501
2502 /* Initialize the aks_cred_t structure passed to AKS */
2503 static void cp_init_access(aks_cred_t access, struct cnode *cp)
2504 {
2505 vfs_context_t context = vfs_context_current();
2506 kauth_cred_t cred = vfs_context_ucred(context);
2507 proc_t proc = vfs_context_proc(context);
2508
2509 bzero(access, sizeof(*access));
2510
2511 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2512 access->inode = cp->c_fileid;
2513 access->pid = proc_pid(proc);
2514 access->uid = kauth_cred_getuid(cred);
2515
2516 if (cp->c_cpentry)
2517 access->key_revision = cp->c_cpentry->cp_key_revision;
2518
2519 return;
2520 }
2521
2522 #if HFS_CONFIG_KEY_ROLL
2523
2524 errno_t cp_set_auto_roll(hfsmount_t *hfsmp,
2525 const hfs_key_auto_roll_args_t *args)
2526 {
2527 // 64 bytes should be OK on the stack
2528 _Static_assert(sizeof(struct cp_root_xattr) < 64, "cp_root_xattr too big!");
2529
2530 struct cp_root_xattr xattr;
2531 errno_t ret;
2532
2533 ret = cp_getrootxattr(hfsmp, &xattr);
2534 if (ret)
2535 return ret;
2536
2537 ret = hfs_start_transaction(hfsmp);
2538 if (ret)
2539 return ret;
2540
2541 xattr.auto_roll_min_version = args->min_key_os_version;
2542 xattr.auto_roll_max_version = args->max_key_os_version;
2543
2544 bool roll_old_class_gen = ISSET(args->flags, HFS_KEY_AUTO_ROLL_OLD_CLASS_GENERATION);
2545
2546 if (roll_old_class_gen)
2547 SET(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2548 else
2549 CLR(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2550
2551 ret = cp_setrootxattr(hfsmp, &xattr);
2552
2553 errno_t ret2 = hfs_end_transaction(hfsmp);
2554
2555 if (!ret)
2556 ret = ret2;
2557
2558 if (ret)
2559 return ret;
2560
2561 hfs_lock_mount(hfsmp);
2562 hfsmp->hfs_auto_roll_min_key_os_version = args->min_key_os_version;
2563 hfsmp->hfs_auto_roll_max_key_os_version = args->max_key_os_version;
2564 hfs_unlock_mount(hfsmp);
2565
2566 return ret;
2567 }
2568
2569 bool cp_should_auto_roll(hfsmount_t *hfsmp, cprotect_t cpr)
2570 {
2571 if (cpr->cp_key_roll_ctx) {
2572 // Already rolling
2573 return false;
2574 }
2575
2576 // Only automatically roll class A, B & C
2577 if (CP_CLASS(cpr->cp_pclass) < PROTECTION_CLASS_A
2578 || CP_CLASS(cpr->cp_pclass) > PROTECTION_CLASS_C) {
2579 return false;
2580 }
2581
2582 if (!cpkp_has_pers_key(&cpr->cp_keys))
2583 return false;
2584
2585 /*
2586 * Remember, the class generation stored in HFS+ is updated at the *end*,
2587 * so it's old if it matches the generation we have stored.
2588 */
2589 if (ISSET(hfsmp->cproot_flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION)
2590 && cp_get_crypto_generation(cpr->cp_pclass) == hfsmp->cp_crypto_generation) {
2591 return true;
2592 }
2593
2594 if (!hfsmp->hfs_auto_roll_min_key_os_version
2595 && !hfsmp->hfs_auto_roll_max_key_os_version) {
2596 // No minimum or maximum set
2597 return false;
2598 }
2599
2600 if (hfsmp->hfs_auto_roll_min_key_os_version
2601 && cpr->cp_key_os_version < hfsmp->hfs_auto_roll_min_key_os_version) {
2602 // Before minimum
2603 return false;
2604 }
2605
2606 if (hfsmp->hfs_auto_roll_max_key_os_version
2607 && cpr->cp_key_os_version >= hfsmp->hfs_auto_roll_max_key_os_version) {
2608 // Greater than maximum
2609 return false;
2610 }
2611
2612 return true;
2613 }
2614
2615 #endif // HFS_CONFIG_KEY_ROLL
2616
2617 errno_t cp_handle_strategy(buf_t bp)
2618 {
2619 vnode_t vp = buf_vnode(bp);
2620 cnode_t *cp = NULL;
2621
2622 if (bufattr_rawencrypted(buf_attr(bp))
2623 || !(cp = cp_get_protected_cnode(vp))
2624 || !cp->c_cpentry) {
2625 // Nothing to do
2626 return 0;
2627 }
2628
2629 /*
2630 * For filesystem resize, we may not have access to the underlying
2631 * file's cache key for whatever reason (device may be locked).
2632 * However, we do not need it since we are going to use the
2633 * temporary HFS-wide resize key which is generated once we start
2634 * relocating file content. If this file's I/O should be done
2635 * using the resize key, it will have been supplied already, so do
2636 * not attach the file's cp blob to the buffer.
2637 */
2638 if (ISSET(cp->c_cpentry->cp_flags, CP_RELOCATION_INFLIGHT))
2639 return 0;
2640
2641 #if HFS_CONFIG_KEY_ROLL
2642 /*
2643 * We don't require any locks here. Pages will be locked so no
2644 * key rolling can take place until this I/O has completed.
2645 */
2646 if (!cp->c_cpentry->cp_key_roll_ctx)
2647 #endif
2648 {
2649 // Fast path
2650 cpx_t cpx = cpkp_cpx(&cp->c_cpentry->cp_keys);
2651
2652 if (cpx_has_key(cpx)) {
2653 bufattr_setcpx(buf_attr(bp), cpx);
2654 return 0;
2655 }
2656 }
2657
2658 /*
2659 * We rely mostly (see note below) upon the truncate lock to
2660 * protect the CP cache key from getting tossed prior to our IO
2661 * finishing here. Nearly all cluster io calls to manipulate file
2662 * payload from HFS take the truncate lock before calling into the
2663 * cluster layer to ensure the file size does not change, or that
2664 * they have exclusive right to change the EOF of the file. That
2665 * same guarantee protects us here since the code that deals with
2666 * CP lock events must now take the truncate lock before doing
2667 * anything.
2668 *
2669 * If you want to change content protection structures, then the
2670 * truncate lock is not sufficient; you must take the truncate
2671 * lock and then wait for outstanding writes to complete. This is
2672 * necessary because asynchronous I/O only holds the truncate lock
2673 * whilst I/O is being queued.
2674 *
2675 * One exception should be the VM swapfile IO, because HFS will
2676 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call
2677 * for the swapfile code only without holding the truncate lock.
2678 * This is because individual swapfiles are maintained at
2679 * fixed-length sizes by the VM code. In non-swapfile IO we use
2680 * PAGEOUT_V2 semantics which allow us to create our own UPL and
2681 * thus take the truncate lock before calling into the cluster
2682 * layer. In that case, however, we are not concerned with the CP
2683 * blob being wiped out in the middle of the IO because there
2684 * isn't anything to toss; the VM swapfile key stays in-core as
2685 * long as the file is open.
2686 */
2687
2688 off_rsrc_t off_rsrc = off_rsrc_make(buf_lblkno(bp) * GetLogicalBlockSize(vp),
2689 VNODE_IS_RSRC(vp));
2690 cp_io_params_t io_params;
2691
2692
2693 /*
2694 * We want to take the cnode lock here and because the vnode write
2695 * count is a pseudo-lock, we need to do something to preserve
2696 * lock ordering; the cnode lock comes before the write count.
2697 * Ideally, the write count would be incremented after the
2698 * strategy routine returns, but that becomes complicated if the
2699 * strategy routine where to call buf_iodone before returning.
2700 * For now, we drop the write count here and then pick it up again
2701 * later.
2702 */
2703 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2704 vnode_writedone(vp);
2705
2706 hfs_lock_always(cp, HFS_SHARED_LOCK);
2707 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2708 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2709 &io_params);
2710 hfs_unlock(cp);
2711
2712 /*
2713 * Last chance: If this data protected I/O does not have unwrapped
2714 * keys present, then try to get them. We already know that it
2715 * should, by this point.
2716 */
2717 if (!cpx_has_key(io_params.cpx)) {
2718 int io_op = ( (buf_flags(bp) & B_READ) ? CP_READ_ACCESS : CP_WRITE_ACCESS);
2719 errno_t error = cp_handle_vnop(vp, io_op, 0);
2720 if (error) {
2721 /*
2722 * We have to be careful here. By this point in the I/O
2723 * path, VM or the cluster engine has prepared a buf_t
2724 * with the proper file offsets and all the rest, so
2725 * simply erroring out will result in us leaking this
2726 * particular buf_t. We need to properly decorate the
2727 * buf_t just as buf_strategy would so as to make it
2728 * appear that the I/O errored out with the particular
2729 * error code.
2730 */
2731 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2732 vnode_startwrite(vp);
2733 buf_seterror (bp, error);
2734 buf_biodone(bp);
2735 return error;
2736 }
2737
2738 hfs_lock_always(cp, HFS_SHARED_LOCK);
2739 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2740 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2741 &io_params);
2742 hfs_unlock(cp);
2743 }
2744
2745 hfs_assert(buf_count(bp) <= io_params.max_len);
2746 bufattr_setcpx(buf_attr(bp), io_params.cpx);
2747
2748 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2749 vnode_startwrite(vp);
2750
2751 return 0;
2752 }
2753
2754 #endif /* CONFIG_PROTECT */