]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_cprotect.c
hfs-522.100.5.tar.gz
[apple/hfs.git] / core / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #if CONFIG_PROTECT
29
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/vnode_if.h>
34 #include <sys/fcntl.h>
35 #include <libkern/OSByteOrder.h>
36 #include <libkern/crypto/sha1.h>
37 #include <sys/proc.h>
38 #include <sys/kauth.h>
39 #include <sys/sysctl.h>
40 #include <sys/ubc.h>
41 #include <uuid/uuid.h>
42
43 #include "hfs.h"
44 #include "hfs_cnode.h"
45 #include "hfs_fsctl.h"
46 #include "hfs_cprotect.h"
47 #include "hfs_iokit.h"
48
49 #if HFS_CONFIG_KEY_ROLL
50 #include "hfs_key_roll.h"
51 #endif
52
53 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
54
55 extern int (**hfs_vnodeop_p) (void *);
56
57 /*
58 * CP private functions
59 */
60 static int cp_root_major_vers(mount_t mp);
61 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
62 static void cp_entry_dealloc(hfsmount_t *hfsmp, struct cprotect *entry);
63 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
64 static int cp_lock_vnode_callback(vnode_t, void *);
65 static int cp_vnode_is_eligible (vnode_t);
66 static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop);
67 static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
68 static void cp_init_access(aks_cred_t access, struct cnode *cp);
69
70 // -- cp_key_pair accessors --
71
72 void cpkp_init(cp_key_pair_t *cpkp, uint16_t max_pers_key_len,
73 uint16_t max_cached_key_len)
74 {
75 cpkp->cpkp_max_pers_key_len = max_pers_key_len;
76 cpkp->cpkp_pers_key_len = 0;
77 cpx_init(cpkp_cpx(cpkp), max_cached_key_len);
78
79 // Default to using offsets
80 cpx_set_use_offset_for_iv(cpkp_cpx(cpkp), true);
81 }
82
83 uint16_t cpkp_max_pers_key_len(const cp_key_pair_t *cpkp)
84 {
85 return cpkp->cpkp_max_pers_key_len;
86 }
87
88 uint16_t cpkp_pers_key_len(const cp_key_pair_t *cpkp)
89 {
90 return cpkp->cpkp_pers_key_len;
91 }
92
93 static bool cpkp_has_pers_key(const cp_key_pair_t *cpkp)
94 {
95 return cpkp->cpkp_pers_key_len > 0;
96 }
97
98 static void *cpkp_pers_key(const cp_key_pair_t *cpkp)
99 {
100 return PTR_ADD(void *, &cpkp->cpkp_cpx, cpx_sizex(cpkp_cpx(cpkp)));
101 }
102
103 static void cpkp_set_pers_key_len(cp_key_pair_t *cpkp, uint16_t key_len)
104 {
105 if (key_len > cpkp->cpkp_max_pers_key_len)
106 panic("hfs_cprotect: key too big!");
107 cpkp->cpkp_pers_key_len = key_len;
108 }
109
110 #pragma clang diagnostic push
111 #pragma clang diagnostic ignored "-Wcast-qual"
112 cpx_t cpkp_cpx(const cp_key_pair_t *cpkp)
113 {
114 // Cast to remove const qualifier
115 return (cpx_t)&cpkp->cpkp_cpx;
116 }
117 #pragma clang diagnostic pop
118
119 size_t cpkp_size(uint16_t pers_key_len, uint16_t cached_key_len)
120 {
121 return sizeof(cp_key_pair_t) + pers_key_len + cpx_size(cached_key_len);
122 }
123
124 size_t cpkp_sizex(const cp_key_pair_t *cpkp)
125 {
126 return cpkp_size(cpkp->cpkp_max_pers_key_len, cpx_max_key_len(cpkp_cpx(cpkp)));
127 }
128
129 void cpkp_flush(cp_key_pair_t *cpkp)
130 {
131 cpx_flush(cpkp_cpx(cpkp));
132 cpkp->cpkp_pers_key_len = 0;
133 bzero(cpkp_pers_key(cpkp), cpkp->cpkp_max_pers_key_len);
134 }
135
136 bool cpkp_can_copy(const cp_key_pair_t *src, const cp_key_pair_t *dst)
137 {
138 return (cpkp_pers_key_len(src) <= dst->cpkp_max_pers_key_len
139 && cpx_can_copy(cpkp_cpx(src), cpkp_cpx(dst)));
140 }
141
142 void cpkp_copy(const cp_key_pair_t *src, cp_key_pair_t *dst)
143 {
144 const uint16_t key_len = cpkp_pers_key_len(src);
145 cpkp_set_pers_key_len(dst, key_len);
146 memcpy(cpkp_pers_key(dst), cpkp_pers_key(src), key_len);
147 cpx_copy(cpkp_cpx(src), cpkp_cpx(dst));
148 }
149
150 // --
151
152 bool cp_is_supported_version(uint16_t vers)
153 {
154 return vers == CP_VERS_4 || vers == CP_VERS_5;
155 }
156
157 /*
158 * Return the appropriate key and, if requested, the physical offset and
159 * maximum length for a particular I/O operation.
160 */
161 void cp_io_params(__unused hfsmount_t *hfsmp, cprotect_t cpr,
162 __unused off_rsrc_t off_rsrc,
163 __unused int direction, cp_io_params_t *io_params)
164 {
165 #if HFS_CONFIG_KEY_ROLL
166 hfs_cp_key_roll_ctx_t *ckr = cpr->cp_key_roll_ctx;
167
168 if (ckr && off_rsrc < ckr->ckr_off_rsrc) {
169 /*
170 * When we're in the process of rolling an extent, ckr_off_rsrc will
171 * indicate the end of the extent.
172 */
173 const off_rsrc_t roll_loc = ckr->ckr_off_rsrc
174 - hfs_blk_to_bytes(ckr->ckr_roll_extent.blockCount,
175 hfsmp->blockSize);
176
177 if (off_rsrc < roll_loc) {
178 io_params->max_len = roll_loc - off_rsrc;
179 io_params->phys_offset = -1;
180 } else {
181 /*
182 * We should never get reads to the extent we're rolling
183 * because the pages should be locked in the UBC. If we
184 * did get reads it's not obvious what the right thing to
185 * do is either: we could read from the old location, but
186 * we might have written later data to the new location,
187 * or we could read from the new location, but data might
188 * not have been written there yet.
189 *
190 * Note that whilst raw encrypted reads don't lock any
191 * pages, or take a cluster_read_direct lock, the call to
192 * hfs_key_roll_up_to in hfs_vnop_read will have ensured
193 * that the file has been rolled beyond the offset being
194 * read so this path should never be taken in that case.
195 */
196 hfs_assert(direction == VNODE_WRITE);
197
198 // For release builds, just in case...
199 if (direction == VNODE_READ) {
200 // Use the old key and offset
201 goto old_key;
202 }
203
204 io_params->max_len = ckr->ckr_off_rsrc - off_rsrc;
205 io_params->phys_offset = hfs_blk_to_bytes(ckr->ckr_roll_extent.startBlock,
206 hfsmp->blockSize) + off_rsrc - roll_loc;
207 }
208
209 // Use new key
210 io_params->cpx = cpkp_cpx(&ckr->ckr_keys);
211 return;
212 }
213 old_key:
214 // Use old key...
215 #endif
216
217 io_params->max_len = INT64_MAX;
218 io_params->phys_offset = -1;
219 io_params->cpx = cpkp_cpx(&cpr->cp_keys);
220 }
221
222 static void cp_flush_cached_keys(cprotect_t cpr)
223 {
224 cpx_flush(cpkp_cpx(&cpr->cp_keys));
225 #if HFS_CONFIG_KEY_ROLL
226 if (cpr->cp_key_roll_ctx)
227 cpx_flush(cpkp_cpx(&cpr->cp_key_roll_ctx->ckr_keys));
228 #endif
229 }
230
231 static bool cp_needs_pers_key(cprotect_t cpr)
232 {
233 if (CP_CLASS(cpr->cp_pclass) == PROTECTION_CLASS_F)
234 return !cpx_has_key(cpkp_cpx(&cpr->cp_keys));
235 else
236 return !cpkp_has_pers_key(&cpr->cp_keys);
237 }
238
239 static cp_key_revision_t cp_initial_key_revision(__unused hfsmount_t *hfsmp)
240 {
241 return 1;
242 }
243
244 cp_key_revision_t cp_next_key_revision(cp_key_revision_t rev)
245 {
246 rev = (rev + 0x0100) ^ (mach_absolute_time() & 0xff);
247 if (!rev)
248 rev = 1;
249 return rev;
250 }
251
252 /*
253 * Allocate and initialize a cprotect blob for a new cnode.
254 * Called from hfs_getnewvnode: cnode is locked exclusive.
255 *
256 * Read xattr data off the cnode. Then, if conditions permit,
257 * unwrap the file key and cache it in the cprotect blob.
258 */
259 int
260 cp_entry_init(struct cnode *cp, struct mount *mp)
261 {
262 struct cprotect *entry = NULL;
263 int error = 0;
264 struct hfsmount *hfsmp = VFSTOHFS(mp);
265
266 /*
267 * The cnode should be locked at this point, regardless of whether or not
268 * we are creating a new item in the namespace or vending a vnode on behalf
269 * of lookup. The only time we tell getnewvnode to skip the lock is when
270 * constructing a resource fork vnode. But a resource fork vnode must come
271 * after the regular data fork cnode has already been constructed.
272 */
273 if (!cp_fs_protected (mp)) {
274 cp->c_cpentry = NULL;
275 return 0;
276 }
277
278 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
279 cp->c_cpentry = NULL;
280 return 0;
281 }
282
283 if (hfsmp->hfs_running_cp_major_vers == 0) {
284 panic ("hfs cp: no running mount point version! ");
285 }
286
287 hfs_assert(cp->c_cpentry == NULL);
288
289 error = cp_getxattr(cp, hfsmp, &entry);
290 if (error == ENOATTR) {
291 /*
292 * Normally, we should always have a CP EA for a file or directory that
293 * we are initializing here. However, there are some extenuating circumstances,
294 * such as the root directory immediately following a newfs_hfs.
295 *
296 * As a result, we leave code here to deal with an ENOATTR which will always
297 * default to a 'D/NONE' key, though we don't expect to use it much.
298 */
299 cp_key_class_t target_class = PROTECTION_CLASS_D;
300
301 if (S_ISDIR(cp->c_mode)) {
302 target_class = PROTECTION_CLASS_DIR_NONE;
303 }
304
305 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
306
307 /* allow keybag to override our class preferences */
308 error = cp_new (&target_class, hfsmp, cp, cp->c_mode, CP_KEYWRAP_DIFFCLASS,
309 key_revision, (cp_new_alloc_fn)cp_entry_alloc, (void **)&entry);
310 if (error == 0) {
311 entry->cp_pclass = target_class;
312 entry->cp_key_os_version = cp_os_version();
313 entry->cp_key_revision = key_revision;
314 error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
315 }
316 }
317
318 /*
319 * Bail out if:
320 * a) error was not ENOATTR (we got something bad from the getxattr call)
321 * b) we encountered an error setting the xattr above.
322 * c) we failed to generate a new cprotect data structure.
323 */
324 if (error) {
325 goto out;
326 }
327
328 cp->c_cpentry = entry;
329
330 out:
331 if (error == 0) {
332 entry->cp_backing_cnode = cp;
333 }
334 else {
335 if (entry) {
336 cp_entry_destroy(hfsmp, entry);
337 }
338 cp->c_cpentry = NULL;
339 }
340
341 return error;
342 }
343
344 /*
345 * cp_setup_newentry
346 *
347 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
348 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
349 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
350 * and the file/directory is established, then we can ask it to generate keys. Note that
351 * this introduces a potential race; If the device is locked and the wrapping
352 * keys are purged between the time we call this function and the time we ask it to generate
353 * keys for us, we could have to fail the open(2) call and back out the entry.
354 */
355
356 int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp,
357 cp_key_class_t suppliedclass, mode_t cmode,
358 struct cprotect **tmpentry)
359 {
360 int isdir = 0;
361 struct cprotect *entry = NULL;
362 uint32_t target_class = hfsmp->default_cp_class;
363 suppliedclass = CP_CLASS(suppliedclass);
364
365 if (hfsmp->hfs_running_cp_major_vers == 0) {
366 panic ("CP: major vers not set in mount!");
367 }
368
369 if (S_ISDIR (cmode)) {
370 isdir = 1;
371 }
372
373 /* Decide the target class. Input argument takes priority. */
374 if (cp_is_valid_class (isdir, suppliedclass)) {
375 /* caller supplies -1 if it was not specified so we will default to the mount point value */
376 target_class = suppliedclass;
377 /*
378 * One exception, F is never valid for a directory
379 * because its children may inherit and userland will be
380 * unable to read/write to the files.
381 */
382 if (isdir) {
383 if (target_class == PROTECTION_CLASS_F) {
384 *tmpentry = NULL;
385 return EINVAL;
386 }
387 }
388 }
389 else {
390 /*
391 * If no valid class was supplied, behave differently depending on whether or not
392 * the item being created is a file or directory.
393 *
394 * for FILE:
395 * If parent directory has a non-zero class, use that.
396 * If parent directory has a zero class (not set), then attempt to
397 * apply the mount point default.
398 *
399 * for DIRECTORY:
400 * Directories always inherit from the parent; if the parent
401 * has a NONE class set, then we can continue to use that.
402 */
403 if ((dcp) && (dcp->c_cpentry)) {
404 uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass);
405 /* If the parent class is not valid, default to the mount point value */
406 if (cp_is_valid_class(1, parentclass)) {
407 if (isdir) {
408 target_class = parentclass;
409 }
410 else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
411 /* files can inherit so long as it's not NONE */
412 target_class = parentclass;
413 }
414 }
415 /* Otherwise, we already defaulted to the mount point's default */
416 }
417 }
418
419 /* Generate the cprotect to vend out */
420 entry = cp_entry_alloc(NULL, 0, 0, NULL);
421 if (entry == NULL) {
422 *tmpentry = NULL;
423 return ENOMEM;
424 }
425
426 /*
427 * We don't have keys yet, so fill in what we can. At this point
428 * this blob has no keys and it has no backing xattr. We just know the
429 * target class.
430 */
431 entry->cp_flags = CP_NO_XATTR;
432 /* Note this is only the effective class */
433 entry->cp_pclass = target_class;
434 *tmpentry = entry;
435
436 return 0;
437 }
438
439 /*
440 * Set up an initial key/class pair for a disassociated cprotect entry.
441 * This function is used to generate transient keys that will never be
442 * written to disk. We use class F for this since it provides the exact
443 * semantics that are needed here. Because we never attach this blob to
444 * a cnode directly, we take a pointer to the cprotect struct.
445 *
446 * This function is primarily used in the HFS FS truncation codepath
447 * where we may rely on AES symmetry to relocate encrypted data from
448 * one spot in the disk to another.
449 */
450 int cpx_gentempkeys(cpx_t *pcpx, __unused struct hfsmount *hfsmp)
451 {
452 cpx_t cpx = cpx_alloc(CP_MAX_KEYSIZE);
453
454 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
455 read_random(cpx_key(cpx), CP_MAX_KEYSIZE);
456 cpx_set_use_offset_for_iv(cpx, true);
457
458 *pcpx = cpx;
459
460 return 0;
461 }
462
463 /*
464 * Tear down and clear a cprotect blob for a closing file.
465 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
466 */
467 void
468 cp_entry_destroy(hfsmount_t *hfsmp, struct cprotect *entry_ptr)
469 {
470 if (entry_ptr == NULL) {
471 /* nothing to clean up */
472 return;
473 }
474 cp_entry_dealloc(hfsmp, entry_ptr);
475 }
476
477
478 int
479 cp_fs_protected (mount_t mnt)
480 {
481 return (vfs_flags(mnt) & MNT_CPROTECT);
482 }
483
484
485 /*
486 * Return a pointer to underlying cnode if there is one for this vnode.
487 * Done without taking cnode lock, inspecting only vnode state.
488 */
489 struct cnode *
490 cp_get_protected_cnode(struct vnode *vp)
491 {
492 if (!cp_vnode_is_eligible(vp)) {
493 return NULL;
494 }
495
496 if (!cp_fs_protected(VTOVFS(vp))) {
497 /* mount point doesn't support it */
498 return NULL;
499 }
500
501 return vnode_fsnode(vp);
502 }
503
504
505 /*
506 * Sets *class to persistent class associated with vnode,
507 * or returns error.
508 */
509 int
510 cp_vnode_getclass(struct vnode *vp, cp_key_class_t *class)
511 {
512 struct cprotect *entry;
513 int error = 0;
514 struct cnode *cp;
515 int took_truncate_lock = 0;
516 struct hfsmount *hfsmp = NULL;
517
518 /* Is this an interesting vp? */
519 if (!cp_vnode_is_eligible (vp)) {
520 return EBADF;
521 }
522
523 /* Is the mount point formatted for content protection? */
524 if (!cp_fs_protected(VTOVFS(vp))) {
525 return ENOTSUP;
526 }
527
528 cp = VTOC(vp);
529 hfsmp = VTOHFS(vp);
530
531 /*
532 * Take the truncate lock up-front in shared mode because we may need
533 * to manipulate the CP blob. Pend lock events until we're done here.
534 */
535 hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
536 took_truncate_lock = 1;
537
538 /*
539 * We take only the shared cnode lock up-front. If it turns out that
540 * we need to manipulate the CP blob to write a key out, drop the
541 * shared cnode lock and acquire an exclusive lock.
542 */
543 error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
544 if (error) {
545 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
546 return error;
547 }
548
549 /* pull the class from the live entry */
550 entry = cp->c_cpentry;
551
552 if (entry == NULL) {
553 panic("Content Protection: uninitialized cnode %p", cp);
554 }
555
556 /* Note that we may not have keys yet, but we know the target class. */
557
558 if (error == 0) {
559 *class = CP_CLASS(entry->cp_pclass);
560 }
561
562 if (took_truncate_lock) {
563 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
564 }
565
566 hfs_unlock(cp);
567 return error;
568 }
569
570 /*
571 * Sets persistent class for this file or directory.
572 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
573 * If the new class can't be accessed now, EPERM.
574 * Otherwise, record class and re-wrap key if the mount point is content-protected.
575 */
576 int
577 cp_vnode_setclass(struct vnode *vp, cp_key_class_t newclass)
578 {
579 struct cnode *cp;
580 struct cprotect *entry = 0;
581 int error = 0;
582 int took_truncate_lock = 0;
583 struct hfsmount *hfsmp = NULL;
584 int isdir = 0;
585
586 if (vnode_isdir (vp)) {
587 isdir = 1;
588 }
589
590 /* Ensure we only use the effective class here */
591 newclass = CP_CLASS(newclass);
592
593 if (!cp_is_valid_class(isdir, newclass)) {
594 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
595 return EINVAL;
596 }
597
598 /* Is this an interesting vp? */
599 if (!cp_vnode_is_eligible(vp)) {
600 return EBADF;
601 }
602
603 /* Is the mount point formatted for content protection? */
604 if (!cp_fs_protected(VTOVFS(vp))) {
605 return ENOTSUP;
606 }
607
608 hfsmp = VTOHFS(vp);
609 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
610 return EROFS;
611 }
612
613 /*
614 * Take the cnode truncate lock exclusive because we want to manipulate the
615 * CP blob. The lock-event handling code is doing the same. This also forces
616 * all pending IOs to drain before we can re-write the persistent and cache keys.
617 */
618 cp = VTOC(vp);
619 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
620 took_truncate_lock = 1;
621
622 /*
623 * The truncate lock is not sufficient to guarantee the CP blob
624 * isn't being used. We must wait for existing writes to finish.
625 */
626 vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass");
627
628 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
629 return EINVAL;
630 }
631
632 entry = cp->c_cpentry;
633 if (entry == NULL) {
634 error = EINVAL;
635 goto out;
636 }
637
638 /*
639 * re-wrap per-file key with new class.
640 * Generate an entirely new key if switching to F.
641 */
642 if (vnode_isreg(vp)) {
643 /*
644 * The vnode is a file. Before proceeding with the re-wrap, we need
645 * to unwrap the keys before proceeding. This is to ensure that
646 * the destination class's properties still work appropriately for the
647 * target class (since B allows I/O but an unwrap prior to the next unlock
648 * will not be allowed).
649 */
650 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
651 error = cp_restore_keys (entry, hfsmp, cp);
652 if (error) {
653 goto out;
654 }
655 }
656
657 if (newclass == PROTECTION_CLASS_F) {
658 /* Verify that file is blockless if switching to class F */
659 if (cp->c_datafork->ff_size > 0) {
660 error = EINVAL;
661 goto out;
662 }
663
664 cp_key_pair_t *cpkp;
665 cprotect_t new_entry = cp_entry_alloc(NULL, 0, CP_MAX_KEYSIZE, &cpkp);
666
667 if (!new_entry) {
668 error = ENOMEM;
669 goto out;
670 }
671
672 /* newclass is only the effective class */
673 new_entry->cp_pclass = newclass;
674 new_entry->cp_key_os_version = cp_os_version();
675 new_entry->cp_key_revision = cp_next_key_revision(entry->cp_key_revision);
676
677 cpx_t cpx = cpkp_cpx(cpkp);
678
679 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
680 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
681 read_random (cpx_key(cpx), CP_MAX_KEYSIZE);
682
683 cp_replace_entry(hfsmp, cp, new_entry);
684
685 error = 0;
686 goto out;
687 }
688
689 /* Deny the setclass if file is to be moved from F to something else */
690 if (entry->cp_pclass == PROTECTION_CLASS_F) {
691 error = EPERM;
692 goto out;
693 }
694
695 if (!cpkp_has_pers_key(&entry->cp_keys)) {
696 struct cprotect *new_entry = NULL;
697 /*
698 * We want to fail if we can't wrap to the target class. By not setting
699 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
700 * to 'newclass' then error out.
701 */
702 uint32_t flags = 0;
703 error = cp_generate_keys (hfsmp, cp, newclass, flags, &new_entry);
704 if (error == 0) {
705 cp_replace_entry (hfsmp, cp, new_entry);
706 }
707 /* Bypass the setxattr code below since generate_keys does it for us */
708 goto out;
709 }
710
711 cprotect_t new_entry;
712 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_keys, entry,
713 (cp_new_alloc_fn)cp_entry_alloc, (void **)&new_entry);
714 if (error) {
715 /* we didn't have perms to set this class. leave file as-is and error out */
716 goto out;
717 }
718
719 #if HFS_CONFIG_KEY_ROLL
720 hfs_cp_key_roll_ctx_t *new_key_roll_ctx = NULL;
721 if (entry->cp_key_roll_ctx) {
722 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_key_roll_ctx->ckr_keys,
723 entry->cp_key_roll_ctx,
724 (cp_new_alloc_fn)hfs_key_roll_ctx_alloc,
725 (void **)&new_key_roll_ctx);
726
727 if (error) {
728 cp_entry_dealloc(hfsmp, new_entry);
729 goto out;
730 }
731
732 new_entry->cp_key_roll_ctx = new_key_roll_ctx;
733 }
734 #endif
735
736 new_entry->cp_pclass = newclass;
737
738 cp_replace_entry(hfsmp, cp, new_entry);
739 entry = new_entry;
740 }
741 else if (vnode_isdir(vp)) {
742 /* For directories, just update the pclass. newclass is only effective class */
743 entry->cp_pclass = newclass;
744 error = 0;
745 }
746 else {
747 /* anything else, just error out */
748 error = EINVAL;
749 goto out;
750 }
751
752 /*
753 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
754 * existed. If the keys were never generated, then they'll skip the setxattr calls.
755 */
756
757 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
758 if (error == ENOATTR) {
759 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
760 }
761
762 out:
763
764 if (took_truncate_lock) {
765 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
766 }
767 hfs_unlock(cp);
768 return error;
769 }
770
771
772 int cp_vnode_transcode(vnode_t vp, cp_key_t *k)
773 {
774 struct cnode *cp;
775 struct cprotect *entry = 0;
776 int error = 0;
777 int took_truncate_lock = 0;
778 struct hfsmount *hfsmp = NULL;
779
780 /* Structures passed between HFS and AKS */
781 struct aks_cred_s access_in;
782 struct aks_wrapped_key_s wrapped_key_in, wrapped_key_out;
783
784 /* Is this an interesting vp? */
785 if (!cp_vnode_is_eligible(vp)) {
786 return EBADF;
787 }
788
789 /* Is the mount point formatted for content protection? */
790 if (!cp_fs_protected(VTOVFS(vp))) {
791 return ENOTSUP;
792 }
793
794 cp = VTOC(vp);
795 hfsmp = VTOHFS(vp);
796
797 /*
798 * Take the cnode truncate lock exclusive because we want to manipulate the
799 * CP blob. The lock-event handling code is doing the same. This also forces
800 * all pending IOs to drain before we can re-write the persistent and cache keys.
801 */
802 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
803 took_truncate_lock = 1;
804
805 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
806 return EINVAL;
807 }
808
809 entry = cp->c_cpentry;
810 if (entry == NULL) {
811 error = EINVAL;
812 goto out;
813 }
814
815 /* Send the per-file key in wrapped form for re-wrap with the current class information
816 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
817 * Don't need to process any outputs, so just clear the locks and pass along the error. */
818 if (vnode_isreg(vp)) {
819
820 /* Picked up the following from cp_wrap().
821 * If needed, more comments available there. */
822
823 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
824 error = EINVAL;
825 goto out;
826 }
827
828 cp_init_access(&access_in, cp);
829
830 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
831 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
832
833 cp_key_pair_t *cpkp = &entry->cp_keys;
834
835 #if HFS_CONFIG_KEY_ROLL
836 if (entry->cp_key_roll_ctx)
837 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
838 #endif
839
840 wrapped_key_in.key = cpkp_pers_key(cpkp);
841 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
842
843 if (!wrapped_key_in.key_len) {
844 error = EINVAL;
845 goto out;
846 }
847
848 /* Use the actual persistent class when talking to AKS */
849 wrapped_key_in.dp_class = entry->cp_pclass;
850 wrapped_key_out.key = k->key;
851 wrapped_key_out.key_len = k->len;
852
853 error = hfs_backup_key(&access_in,
854 &wrapped_key_in,
855 &wrapped_key_out);
856
857 if(error)
858 error = EPERM;
859 else
860 k->len = wrapped_key_out.key_len;
861 }
862
863 out:
864 if (took_truncate_lock) {
865 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
866 }
867 hfs_unlock(cp);
868 return error;
869 }
870
871
872 /*
873 * Check permission for the given operation (read, write) on this node.
874 * Additionally, if the node needs work, do it:
875 * - create a new key for the file if one hasn't been set before
876 * - write out the xattr if it hasn't already been saved
877 * - unwrap the key if needed
878 *
879 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
880 *
881 * Note that this function does *NOT* take the cnode truncate lock. This is because
882 * the thread calling us may already have the truncate lock. It is not necessary
883 * because either we successfully finish this function before the keys are tossed
884 * and the IO will fail, or the keys are tossed and then this function will fail.
885 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
886 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
887 */
888 int
889 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
890 {
891 struct cprotect *entry;
892 int error = 0;
893 struct hfsmount *hfsmp = NULL;
894 struct cnode *cp = NULL;
895
896 /*
897 * First, do validation against the vnode before proceeding any further:
898 * Is this vnode originating from a valid content-protected filesystem ?
899 */
900 if (cp_vnode_is_eligible(vp) == 0) {
901 /*
902 * It is either not HFS or not a file/dir. Just return success. This is a valid
903 * case if servicing i/o against another filesystem type from VFS
904 */
905 return 0;
906 }
907
908 if (cp_fs_protected (VTOVFS(vp)) == 0) {
909 /*
910 * The underlying filesystem does not support content protection. This is also
911 * a valid case. Simply return success.
912 */
913 return 0;
914 }
915
916 /*
917 * At this point, we know we have a HFS vnode that backs a file or directory on a
918 * filesystem that supports content protection
919 */
920 cp = VTOC(vp);
921
922 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
923 return error;
924 }
925
926 entry = cp->c_cpentry;
927
928 if (entry == NULL) {
929 /*
930 * If this cnode is not content protected, simply return success.
931 * Note that this function is called by all I/O-based call sites
932 * when CONFIG_PROTECT is enabled during XNU building.
933 */
934
935 /*
936 * All files should have cprotect structs. It's possible to encounter
937 * a directory from a V2.0 CP system but all files should have protection
938 * EAs
939 */
940 if (vnode_isreg(vp)) {
941 error = EPERM;
942 }
943
944 goto out;
945 }
946
947 vp = CTOV(cp, 0);
948 if (vp == NULL) {
949 /* is it a rsrc */
950 vp = CTOV(cp,1);
951 if (vp == NULL) {
952 error = EINVAL;
953 goto out;
954 }
955 }
956 hfsmp = VTOHFS(vp);
957
958 if ((error = cp_check_access(cp, hfsmp, vnop))) {
959 /* check for raw encrypted access before bailing out */
960 if ((ioflag & IO_ENCRYPTED)
961 #if HFS_CONFIG_KEY_ROLL
962 // If we're rolling, we need the keys
963 && !hfs_is_key_rolling(cp)
964 #endif
965 && (vnop == CP_READ_ACCESS)) {
966 /*
967 * read access only + asking for the raw encrypted bytes
968 * is legitimate, so reset the error value to 0
969 */
970 error = 0;
971 }
972 else {
973 goto out;
974 }
975 }
976
977 if (!ISSET(entry->cp_flags, CP_NO_XATTR)) {
978 if (!S_ISREG(cp->c_mode))
979 goto out;
980
981 // If we have a persistent key and the cached key, we're done
982 if (!cp_needs_pers_key(entry)
983 && cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
984 goto out;
985 }
986 }
987
988 /* upgrade to exclusive lock */
989 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
990 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
991 return error;
992 }
993 } else {
994 cp->c_lockowner = current_thread();
995 }
996
997 /* generate new keys if none have ever been saved */
998 if (cp_needs_pers_key(entry)) {
999 struct cprotect *newentry = NULL;
1000 /*
1001 * It's ok if this ends up being wrapped in a different class than 'pclass'.
1002 * class modification is OK here.
1003 */
1004 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1005
1006 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1007 if (error == 0) {
1008 cp_replace_entry (hfsmp, cp, newentry);
1009 entry = newentry;
1010 }
1011 else {
1012 goto out;
1013 }
1014 }
1015
1016 /* unwrap keys if needed */
1017 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1018 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
1019 /* no need to try to restore keys; they are not going to be used */
1020 error = 0;
1021 }
1022 else {
1023 error = cp_restore_keys(entry, hfsmp, cp);
1024 if (error) {
1025 goto out;
1026 }
1027 }
1028 }
1029
1030 /* write out the xattr if it's new */
1031 if (entry->cp_flags & CP_NO_XATTR)
1032 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
1033
1034 out:
1035
1036 hfs_unlock(cp);
1037 return error;
1038 }
1039
1040 #if HFS_TMPDBG
1041 #if !SECURE_KERNEL
1042 static void cp_log_eperm (struct vnode* vp, int pclass, boolean_t create) {
1043 char procname[256] = {};
1044 const char *fname = "unknown";
1045 const char *dbgop = "open";
1046
1047 int ppid = proc_selfpid();
1048 /* selfname does a strlcpy so we're OK */
1049 proc_selfname(procname, sizeof(procname));
1050 if (vp && vp->v_name) {
1051 /* steal from the namecache */
1052 fname = vp->v_name;
1053 }
1054
1055 if (create) {
1056 dbgop = "create";
1057 }
1058
1059 printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname, ppid, pclass, dbgop, fname);
1060 }
1061 #endif
1062 #endif
1063
1064
1065 int
1066 cp_handle_open(struct vnode *vp, int mode)
1067 {
1068 struct cnode *cp = NULL ;
1069 struct cprotect *entry = NULL;
1070 struct hfsmount *hfsmp;
1071 int error = 0;
1072
1073 /* If vnode not eligible, just return success */
1074 if (!cp_vnode_is_eligible(vp)) {
1075 return 0;
1076 }
1077
1078 /* If mount point not properly set up, then also return success */
1079 if (!cp_fs_protected(VTOVFS(vp))) {
1080 return 0;
1081 }
1082
1083 cp = VTOC(vp);
1084
1085 // Allow if raw encrypted mode requested
1086 if (ISSET(mode, FENCRYPTED)) {
1087 #if HFS_CONFIG_KEY_ROLL
1088 // If we're rolling, we need the keys
1089 hfs_lock_always(cp, HFS_SHARED_LOCK);
1090 bool rolling = hfs_is_key_rolling(cp);
1091 hfs_unlock(cp);
1092 if (!rolling)
1093 return 0;
1094 #else
1095 return 0;
1096 #endif
1097 }
1098 if (ISSET(mode, FUNENCRYPTED)) {
1099 return 0;
1100 }
1101
1102 /* We know the vnode is in a valid state. Acquire cnode and validate */
1103 hfsmp = VTOHFS(vp);
1104
1105 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1106 return error;
1107 }
1108
1109 entry = cp->c_cpentry;
1110 if (entry == NULL) {
1111 /*
1112 * If the mount is protected and we couldn't get a cprotect for this vnode,
1113 * then it's not valid for opening.
1114 */
1115 if (vnode_isreg(vp)) {
1116 error = EPERM;
1117 }
1118 goto out;
1119 }
1120
1121 if (!S_ISREG(cp->c_mode))
1122 goto out;
1123
1124 /*
1125 * Does the cnode have keys yet? If not, then generate them.
1126 */
1127 if (cp_needs_pers_key(entry)) {
1128 struct cprotect *newentry = NULL;
1129 /* Allow the keybag to override our class preferences */
1130 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1131 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1132 if (error == 0) {
1133 cp_replace_entry (hfsmp, cp, newentry);
1134 entry = newentry;
1135 }
1136 else {
1137 goto out;
1138 }
1139 }
1140
1141 /*
1142 * We want to minimize the number of unwraps that we'll have to do since
1143 * the cost can vary, depending on the platform we're running.
1144 */
1145 switch (CP_CLASS(entry->cp_pclass)) {
1146 case PROTECTION_CLASS_B:
1147 if (mode & O_CREAT) {
1148 /*
1149 * Class B always allows creation. Since O_CREAT was passed through
1150 * we infer that this was a newly created vnode/cnode. Even though a potential
1151 * race exists when multiple threads attempt to create/open a particular
1152 * file, only one can "win" and actually create it. VFS will unset the
1153 * O_CREAT bit on the loser.
1154 *
1155 * Note that skipping the unwrap check here is not a security issue --
1156 * we have to unwrap the key permanently upon the first I/O.
1157 */
1158 break;
1159 }
1160
1161 if (cpx_has_key(cpkp_cpx(&entry->cp_keys)) && !ISSET(mode, FENCRYPTED)) {
1162 /*
1163 * For a class B file, attempt the unwrap if we have the key in
1164 * core already.
1165 * The device could have just transitioned into the lock state, and
1166 * this vnode may not yet have been purged from the vnode cache (which would
1167 * remove the keys).
1168 */
1169 struct aks_cred_s access_in;
1170 struct aks_wrapped_key_s wrapped_key_in;
1171
1172 cp_init_access(&access_in, cp);
1173 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1174 wrapped_key_in.key = cpkp_pers_key(&entry->cp_keys);
1175 wrapped_key_in.key_len = cpkp_pers_key_len(&entry->cp_keys);
1176 /* Use the persistent class when talking to AKS */
1177 wrapped_key_in.dp_class = entry->cp_pclass;
1178 error = hfs_unwrap_key(&access_in, &wrapped_key_in, NULL);
1179 if (error) {
1180 error = EPERM;
1181 }
1182 break;
1183 }
1184 /* otherwise, fall through to attempt the unwrap/restore */
1185 case PROTECTION_CLASS_A:
1186 case PROTECTION_CLASS_C:
1187 /*
1188 * At this point, we know that we need to attempt an unwrap if needed; we want
1189 * to makes sure that open(2) fails properly if the device is either just-locked
1190 * or never made it past first unlock. Since the keybag serializes access to the
1191 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1192 * we will get here in two cases:
1193 *
1194 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1195 * purged, the vnode will get flushed if needed.
1196 *
1197 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1198 *
1199 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1200 * we can always attempt the restore.
1201 */
1202 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1203 error = cp_restore_keys(entry, hfsmp, cp);
1204 }
1205
1206 if (error) {
1207 error = EPERM;
1208 }
1209
1210 break;
1211
1212 case PROTECTION_CLASS_D:
1213 default:
1214 break;
1215 }
1216
1217 out:
1218
1219 #if HFS_TMPDBG
1220 #if !SECURE_KERNEL
1221 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
1222 cp_log_eperm (vp, CP_CLASS(entry->cp_pclass), false);
1223 }
1224 #endif
1225 #endif
1226
1227 hfs_unlock(cp);
1228 return error;
1229 }
1230
1231
1232 /*
1233 * cp_getrootxattr:
1234 * Gets the EA we set on the root folder (fileid 1) to get information about the
1235 * version of Content Protection that was used to write to this filesystem.
1236 * Note that all multi-byte fields are written to disk little endian so they must be
1237 * converted to native endian-ness as needed.
1238 */
1239 int
1240 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
1241 {
1242 void *buf;
1243
1244 /*
1245 * We allow for an extra 64 bytes to cater for upgrades. This wouldn't
1246 * be necessary if the xattr routines just returned what we asked for.
1247 */
1248 size_t bufsize = roundup(sizeof(struct cp_root_xattr) + 64, 64);
1249
1250 int error = 0;
1251
1252 hfs_assert(outxattr);
1253
1254 buf = hfs_malloc(bufsize);
1255
1256 uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
1257
1258 uio_addiov(uio, CAST_USER_ADDR_T(buf), bufsize);
1259
1260 size_t attrsize = bufsize;
1261
1262 struct vnop_getxattr_args args = {
1263 .a_uio = uio,
1264 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1265 .a_size = &attrsize
1266 };
1267
1268 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
1269
1270 uio_free(uio);
1271
1272 if (error != 0) {
1273 goto out;
1274 }
1275
1276 if (attrsize < CP_ROOT_XATTR_MIN_LEN) {
1277 error = HFS_EINCONSISTENT;
1278 goto out;
1279 }
1280
1281 const struct cp_root_xattr *xattr = buf;
1282
1283 bzero(outxattr, sizeof(*outxattr));
1284
1285 /* Now convert the multi-byte fields to native endianness */
1286 outxattr->major_version = OSSwapLittleToHostInt16(xattr->major_version);
1287 outxattr->minor_version = OSSwapLittleToHostInt16(xattr->minor_version);
1288 outxattr->flags = OSSwapLittleToHostInt64(xattr->flags);
1289
1290 if (outxattr->major_version >= CP_VERS_5) {
1291 if (attrsize < sizeof(struct cp_root_xattr)) {
1292 error = HFS_EINCONSISTENT;
1293 goto out;
1294 }
1295 #if HFS_CONFIG_KEY_ROLL
1296 outxattr->auto_roll_min_version = OSSwapLittleToHostInt32(xattr->auto_roll_min_version);
1297 outxattr->auto_roll_max_version = OSSwapLittleToHostInt32(xattr->auto_roll_max_version);
1298 #endif
1299 }
1300
1301 out:
1302 hfs_free(buf, bufsize);
1303 return error;
1304 }
1305
1306 /*
1307 * cp_setrootxattr:
1308 * Sets the EA we set on the root folder (fileid 1) to get information about the
1309 * version of Content Protection that was used to write to this filesystem.
1310 * Note that all multi-byte fields are written to disk little endian so they must be
1311 * converted to little endian as needed.
1312 *
1313 * This will be written to the disk when it detects the EA is not there, or when we need
1314 * to make a modification to the on-disk version that can be done in-place.
1315 */
1316 int
1317 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
1318 {
1319 int error = 0;
1320 struct vnop_setxattr_args args;
1321
1322 args.a_desc = NULL;
1323 args.a_vp = NULL;
1324 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1325 args.a_uio = NULL; //pass data ptr instead
1326 args.a_options = 0;
1327 args.a_context = NULL; //no context needed, only done from mount.
1328
1329 const uint64_t flags = newxattr->flags;
1330
1331 /* Now convert the multi-byte fields to little endian before writing to disk. */
1332 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
1333
1334 int xattr_size = sizeof(struct cp_root_xattr);
1335
1336 #if HFS_CONFIG_KEY_ROLL
1337 bool upgraded = false;
1338
1339 if (newxattr->auto_roll_min_version || newxattr->auto_roll_max_version) {
1340 if (newxattr->major_version < CP_VERS_5) {
1341 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1342
1343 newxattr->major_version = CP_CURRENT_VERS;
1344 newxattr->minor_version = CP_MINOR_VERS;
1345
1346 upgraded = true;
1347 }
1348
1349 newxattr->auto_roll_min_version = OSSwapHostToLittleInt32(newxattr->auto_roll_min_version);
1350 newxattr->auto_roll_max_version = OSSwapHostToLittleInt32(newxattr->auto_roll_max_version);
1351 } else if (newxattr->major_version == CP_VERS_4)
1352 xattr_size = offsetof(struct cp_root_xattr, auto_roll_min_version);
1353 #endif
1354
1355 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
1356 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
1357
1358 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
1359 xattr_size, &args, hfsmp, 1);
1360
1361 if (!error) {
1362 hfsmp->cproot_flags = flags;
1363 #if HFS_CONFIG_KEY_ROLL
1364 if (upgraded)
1365 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1366 #endif
1367 }
1368
1369 return error;
1370 }
1371
1372
1373 /*
1374 * Stores new xattr data on the cnode.
1375 * cnode lock held exclusive (if available).
1376 *
1377 * This function is also invoked during file creation.
1378 */
1379 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp,
1380 uint32_t fileid, int options)
1381 {
1382 int error = 0;
1383 cp_key_pair_t *cpkp = &entry->cp_keys;
1384 #if HFS_CONFIG_KEY_ROLL
1385 bool rolling = entry->cp_key_roll_ctx != NULL;
1386
1387 if (rolling && entry->cp_key_roll_ctx->ckr_off_rsrc == INT64_MAX) {
1388 // We've finished rolling, but we still have the context
1389 rolling = false;
1390 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
1391 }
1392 #endif
1393
1394 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
1395 return EROFS;
1396 }
1397
1398 if (hfsmp->hfs_running_cp_major_vers < CP_CURRENT_VERS) {
1399 // Upgrade
1400 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1401
1402 struct cp_root_xattr root_xattr;
1403
1404 error = cp_getrootxattr(hfsmp, &root_xattr);
1405 if (error)
1406 return error;
1407
1408 root_xattr.major_version = CP_CURRENT_VERS;
1409 root_xattr.minor_version = CP_MINOR_VERS;
1410
1411 error = cp_setrootxattr(hfsmp, &root_xattr);
1412 if (error)
1413 return error;
1414
1415 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1416 }
1417
1418 struct cp_xattr_v5 *xattr;
1419 xattr = hfs_malloc(sizeof(*xattr));
1420
1421 xattr->xattr_major_version = OSSwapHostToLittleConstInt16(CP_VERS_5);
1422 xattr->xattr_minor_version = OSSwapHostToLittleConstInt16(CP_MINOR_VERS);
1423 xattr->flags = 0;
1424 #if HFS_CONFIG_KEY_ROLL
1425 if (rolling)
1426 xattr->flags |= CP_XAF_KEY_ROLLING;
1427 #endif
1428 xattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1429 xattr->key_os_version = OSSwapHostToLittleInt32(entry->cp_key_os_version);
1430 xattr->key_revision = OSSwapHostToLittleInt16(entry->cp_key_revision);
1431
1432 uint16_t key_len = cpkp_pers_key_len(cpkp);
1433 xattr->key_len = OSSwapHostToLittleInt16(key_len);
1434 memcpy(xattr->persistent_key, cpkp_pers_key(cpkp), key_len);
1435
1436 size_t xattr_len = offsetof(struct cp_xattr_v5, persistent_key) + key_len;
1437
1438 #if HFS_CONFIG_KEY_ROLL
1439 if (rolling) {
1440 struct cp_roll_info *roll_info = PTR_ADD(struct cp_roll_info *, xattr, xattr_len);
1441
1442 roll_info->off_rsrc = OSSwapHostToLittleInt64(entry->cp_key_roll_ctx->ckr_off_rsrc);
1443
1444 key_len = cpkp_pers_key_len(&entry->cp_key_roll_ctx->ckr_keys);
1445 roll_info->key_len = OSSwapHostToLittleInt16(key_len);
1446
1447 memcpy(roll_info->key, cpkp_pers_key(&entry->cp_key_roll_ctx->ckr_keys), key_len);
1448
1449 xattr_len += offsetof(struct cp_roll_info, key) + key_len;
1450 }
1451 #endif
1452
1453 struct vnop_setxattr_args args = {
1454 .a_vp = cp ? cp->c_vp : NULL,
1455 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1456 .a_options = options,
1457 .a_context = vfs_context_current(),
1458 };
1459
1460 error = hfs_setxattr_internal(cp, xattr, xattr_len, &args, hfsmp, fileid);
1461
1462 hfs_free(xattr, sizeof(*xattr));
1463
1464 if (error == 0 ) {
1465 entry->cp_flags &= ~CP_NO_XATTR;
1466 }
1467
1468 return error;
1469 }
1470
1471 /*
1472 * Used by an fcntl to query the underlying FS for its content protection version #
1473 */
1474
1475 int
1476 cp_get_root_major_vers(vnode_t vp, uint32_t *level)
1477 {
1478 int err = 0;
1479 struct hfsmount *hfsmp = NULL;
1480 struct mount *mp = NULL;
1481
1482 mp = VTOVFS(vp);
1483
1484 /* check if it supports content protection */
1485 if (cp_fs_protected(mp) == 0) {
1486 return ENOTSUP;
1487 }
1488
1489 hfsmp = VFSTOHFS(mp);
1490 /* figure out the level */
1491
1492 err = cp_root_major_vers(mp);
1493
1494 if (err == 0) {
1495 *level = hfsmp->hfs_running_cp_major_vers;
1496 }
1497 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1498
1499 return err;
1500 }
1501
1502 /* Used by fcntl to query default protection level of FS */
1503 int cp_get_default_level (struct vnode *vp, uint32_t *level) {
1504 int err = 0;
1505 struct hfsmount *hfsmp = NULL;
1506 struct mount *mp = NULL;
1507
1508 mp = VTOVFS(vp);
1509
1510 /* check if it supports content protection */
1511 if (cp_fs_protected(mp) == 0) {
1512 return ENOTSUP;
1513 }
1514
1515 hfsmp = VFSTOHFS(mp);
1516 /* figure out the default */
1517
1518 *level = hfsmp->default_cp_class;
1519 return err;
1520 }
1521
1522 /********************
1523 * Private Functions
1524 *******************/
1525
1526 static int
1527 cp_root_major_vers(mount_t mp)
1528 {
1529 int err = 0;
1530 struct cp_root_xattr xattr;
1531 struct hfsmount *hfsmp = NULL;
1532
1533 hfsmp = vfs_fsprivate(mp);
1534 err = cp_getrootxattr (hfsmp, &xattr);
1535
1536 if (err == 0) {
1537 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1538 }
1539 else {
1540 return EINVAL;
1541 }
1542
1543 return 0;
1544 }
1545
1546 static int
1547 cp_vnode_is_eligible(struct vnode *vp)
1548 {
1549 return !vnode_issystem(vp) && (vnode_isreg(vp) || vnode_isdir(vp));
1550 }
1551
1552 #if DEBUG
1553 static const uint32_t cp_magic1 = 0x7b727063; // cpr{
1554 static const uint32_t cp_magic2 = 0x7270637d; // }cpr
1555 #endif
1556
1557 struct cprotect *
1558 cp_entry_alloc(cprotect_t old, uint16_t pers_key_len,
1559 uint16_t cached_key_len, cp_key_pair_t **pcpkp)
1560 {
1561 struct cprotect *cp_entry;
1562
1563 if (pers_key_len > CP_MAX_WRAPPEDKEYSIZE)
1564 return (NULL);
1565
1566 size_t size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1567 + cpkp_size(pers_key_len, cached_key_len));
1568
1569 #if DEBUG
1570 size += 4; // Extra for magic2
1571 #endif
1572
1573 cp_entry = hfs_malloc(size);
1574
1575 if (old) {
1576 memcpy(cp_entry, old, offsetof(struct cprotect, cp_keys));
1577
1578 #if HFS_CONFIG_KEY_ROLL
1579 // We don't copy the key roll context
1580 cp_entry->cp_key_roll_ctx = NULL;
1581 #endif
1582 } else {
1583 bzero(cp_entry, offsetof(struct cprotect, cp_keys));
1584 }
1585
1586 #if DEBUG
1587 cp_entry->cp_magic1 = cp_magic1;
1588 *PTR_ADD(uint32_t *, cp_entry, size - 4) = cp_magic2;
1589 #endif
1590
1591 cpkp_init(&cp_entry->cp_keys, pers_key_len, cached_key_len);
1592
1593 /*
1594 * If we've been passed the old entry, then we are in the process of
1595 * rewrapping in which case we need to copy the cached key. This is
1596 * important for class B files when the device is locked because we
1597 * won't be able to unwrap whilst in this state, yet we still need the
1598 * unwrapped key.
1599 */
1600 if (old)
1601 cpx_copy(cpkp_cpx(&old->cp_keys), cpkp_cpx(&cp_entry->cp_keys));
1602
1603 if (pcpkp)
1604 *pcpkp = &cp_entry->cp_keys;
1605
1606 return cp_entry;
1607 }
1608
1609 static void
1610 cp_entry_dealloc(__unused hfsmount_t *hfsmp, struct cprotect *entry)
1611 {
1612 #if HFS_CONFIG_KEY_ROLL
1613 hfs_release_key_roll_ctx(hfsmp, entry);
1614 #endif
1615
1616 cpkp_flush(&entry->cp_keys);
1617
1618 size_t entry_size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1619 + cpkp_sizex(&entry->cp_keys));
1620
1621 #if DEBUG
1622 hfs_assert(entry->cp_magic1 == cp_magic1);
1623 hfs_assert(*PTR_ADD(uint32_t *, entry, (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1624 + cpkp_sizex(&entry->cp_keys) == cp_magic2)));
1625
1626 entry_size += 4; // Extra for magic2
1627 #endif
1628
1629 hfs_free(entry, entry_size);
1630 }
1631
1632 static int cp_read_xattr_v4(__unused hfsmount_t *hfsmp, struct cp_xattr_v4 *xattr,
1633 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1634 {
1635 /* Endian swap the multi-byte fields into host endianness from L.E. */
1636 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1637 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1638 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1639 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1640 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1641 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1642
1643 /*
1644 * Prevent a buffer overflow, and validate the key length obtained from the
1645 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1646 * point.
1647 */
1648 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE)
1649 return HFS_EINCONSISTENT;
1650
1651 size_t min_len = offsetof(struct cp_xattr_v4, persistent_key) + xattr->key_size;
1652 if (xattr_len < min_len)
1653 return HFS_EINCONSISTENT;
1654
1655 /*
1656 * Class F files have no backing key; their keylength should be 0,
1657 * though they should have the proper flags set.
1658 *
1659 * A request to instantiate a CP for a class F file should result
1660 * in a bzero'd cp that just says class F, with key_flushed set.
1661 */
1662 if (CP_CLASS(xattr->persistent_class) == PROTECTION_CLASS_F
1663 || ISSET(xattr->flags, CP_XAF_NEEDS_KEYS)) {
1664 xattr->key_size = 0;
1665 }
1666
1667 /* set up entry with information from xattr */
1668 cp_key_pair_t *cpkp;
1669 cprotect_t entry;
1670
1671 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1672 /* caller passed in a pre-allocated structure to get the basic info */
1673 entry = *pcpr;
1674 bzero(entry, offsetof(struct cprotect, cp_keys));
1675 }
1676 else {
1677 entry = cp_entry_alloc(NULL, xattr->key_size, CP_MAX_CACHEBUFLEN, &cpkp);
1678 }
1679
1680 entry->cp_pclass = xattr->persistent_class;
1681 entry->cp_key_os_version = xattr->key_os_version;
1682
1683
1684 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1685 if (xattr->key_size) {
1686 cpkp_set_pers_key_len(cpkp, xattr->key_size);
1687 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_size);
1688 }
1689
1690 *pcpr = entry;
1691 }
1692 else if (xattr->key_size) {
1693 SET(entry->cp_flags, CP_HAS_A_KEY);
1694 }
1695
1696 return 0;
1697 }
1698
1699 int cp_read_xattr_v5(hfsmount_t *hfsmp, struct cp_xattr_v5 *xattr,
1700 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1701 {
1702 if (xattr->xattr_major_version == OSSwapHostToLittleConstInt16(CP_VERS_4)) {
1703 return cp_read_xattr_v4(hfsmp, (struct cp_xattr_v4 *)xattr, xattr_len, pcpr, options);
1704 }
1705
1706 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1707
1708 if (xattr->xattr_major_version != CP_VERS_5) {
1709 printf("hfs: cp_getxattr: unsupported xattr version %d\n",
1710 xattr->xattr_major_version);
1711 return ENOTSUP;
1712 }
1713
1714 size_t min_len = offsetof(struct cp_xattr_v5, persistent_key);
1715
1716 if (xattr_len < min_len)
1717 return HFS_EINCONSISTENT;
1718
1719 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1720 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1721 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1722 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1723 xattr->key_revision = OSSwapLittleToHostInt16(xattr->key_revision);
1724 xattr->key_len = OSSwapLittleToHostInt16(xattr->key_len);
1725
1726 uint16_t pers_key_len = xattr->key_len;
1727
1728 min_len += pers_key_len;
1729 if (xattr_len < min_len)
1730 return HFS_EINCONSISTENT;
1731
1732 #if HFS_CONFIG_KEY_ROLL
1733 struct cp_roll_info *roll_info = NULL;
1734
1735 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1736 roll_info = PTR_ADD(struct cp_roll_info *, xattr, min_len);
1737
1738 min_len += offsetof(struct cp_roll_info, key);
1739
1740 if (xattr_len < min_len)
1741 return HFS_EINCONSISTENT;
1742
1743 roll_info->off_rsrc = OSSwapLittleToHostInt64(roll_info->off_rsrc);
1744
1745 if (roll_info->off_rsrc % hfsmp->blockSize)
1746 return HFS_EINCONSISTENT;
1747
1748 roll_info->key_len = OSSwapLittleToHostInt16(roll_info->key_len);
1749
1750 min_len += roll_info->key_len;
1751 if (xattr_len < min_len)
1752 return HFS_EINCONSISTENT;
1753 }
1754 #endif
1755
1756 cp_key_pair_t *cpkp;
1757 cprotect_t entry;
1758
1759 /*
1760 * If option CP_GET_XATTR_BASIC_INFO is set, we only return basic
1761 * information about the file's protection (and not the key) and
1762 * we store the result in the structure the caller passed to us.
1763 */
1764 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1765 entry = *pcpr;
1766 bzero(entry, offsetof(struct cprotect, cp_keys));
1767 #if HFS_CONFIG_KEY_ROLL
1768 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1769 SET(entry->cp_flags, CP_KEY_IS_ROLLING);
1770 }
1771 #endif
1772 } else {
1773 entry = cp_entry_alloc(NULL, xattr->key_len, CP_MAX_CACHEBUFLEN, &cpkp);
1774 }
1775
1776 entry->cp_pclass = xattr->persistent_class;
1777 entry->cp_key_os_version = xattr->key_os_version;
1778 entry->cp_key_revision = xattr->key_revision;
1779
1780 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1781 if (xattr->key_len) {
1782 cpkp_set_pers_key_len(cpkp, xattr->key_len);
1783 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_len);
1784 }
1785
1786 #if HFS_CONFIG_KEY_ROLL
1787 if (roll_info) {
1788 entry->cp_key_roll_ctx = hfs_key_roll_ctx_alloc(NULL, roll_info->key_len,
1789 CP_MAX_CACHEBUFLEN, &cpkp);
1790
1791 entry->cp_key_roll_ctx->ckr_off_rsrc = roll_info->off_rsrc;
1792
1793 if (roll_info->key_len) {
1794 cpkp_set_pers_key_len(cpkp, roll_info->key_len);
1795 memcpy(cpkp_pers_key(cpkp), roll_info->key, roll_info->key_len);
1796 }
1797 }
1798 #endif
1799
1800 *pcpr = entry;
1801 }
1802 else if (xattr->key_len) {
1803 SET(entry->cp_flags, CP_HAS_A_KEY);
1804 }
1805
1806 return 0;
1807 }
1808
1809 /*
1810 * Initializes a new cprotect entry with xattr data from the cnode.
1811 * cnode lock held shared
1812 */
1813 static int
1814 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, cprotect_t *outentry)
1815 {
1816 size_t xattr_len;
1817 struct cp_xattr_v5 *xattr;
1818
1819 xattr = hfs_malloc(xattr_len = sizeof(*xattr));
1820
1821 int error = hfs_xattr_read(cp->c_vp, CONTENT_PROTECTION_XATTR_NAME,
1822 xattr, &xattr_len);
1823
1824 if (!error) {
1825 if (xattr_len < CP_XATTR_MIN_LEN)
1826 error = HFS_EINCONSISTENT;
1827 else
1828 error = cp_read_xattr_v5(hfsmp, xattr, xattr_len, outentry, 0);
1829 }
1830
1831 #if DEBUG
1832 if (error && error != ENOATTR) {
1833 printf("cp_getxattr: bad cp xattr (%d):\n", error);
1834 for (size_t i = 0; i < xattr_len; ++i)
1835 printf("%02x ", ((uint8_t *)xattr)[i]);
1836 printf("\n");
1837 }
1838 #endif
1839
1840 hfs_free(xattr, sizeof(*xattr));
1841
1842 return error;
1843 }
1844
1845 /*
1846 * If permitted, restore entry's unwrapped key from the persistent key.
1847 * If not, clear key and set CP_KEY_FLUSHED.
1848 * cnode lock held exclusive
1849 */
1850 static int
1851 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
1852 {
1853 int error = 0;
1854
1855 error = cp_unwrap(hfsmp, entry, cp);
1856 if (error) {
1857 cp_flush_cached_keys(entry);
1858 error = EPERM;
1859 }
1860 return error;
1861 }
1862
1863 void cp_device_locked_callback(mount_t mp, cp_lock_state_t state)
1864 {
1865 struct hfsmount *hfsmp;
1866
1867 /*
1868 * When iterating the various mount points that may
1869 * be present on a content-protected device, we need to skip
1870 * those that do not have it enabled.
1871 */
1872 if (!cp_fs_protected(mp)) {
1873 return;
1874 }
1875
1876 hfsmp = VFSTOHFS(mp);
1877
1878 hfsmp->hfs_cp_lock_state = state;
1879
1880 if (state == CP_LOCKED_STATE) {
1881 /*
1882 * We respond only to lock events. Since cprotect structs
1883 * decrypt/restore keys lazily, the unlock events don't
1884 * actually cause anything to happen.
1885 */
1886 vnode_iterate(mp, 0, cp_lock_vnode_callback, (void *)(uintptr_t)state);
1887 }
1888 }
1889
1890 /*
1891 * Deny access to protected files if keys have been locked.
1892 */
1893 static int
1894 cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused)
1895 {
1896 int error = 0;
1897
1898 /*
1899 * For now it's OK to examine the state variable here without
1900 * holding the HFS lock. This is only a short-circuit; if the state
1901 * transitions (or is in transition) after we examine this field, we'd
1902 * have to handle that anyway.
1903 */
1904 if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) {
1905 return 0;
1906 }
1907
1908 if (!cp->c_cpentry) {
1909 /* unprotected node */
1910 return 0;
1911 }
1912
1913 if (!S_ISREG(cp->c_mode)) {
1914 return 0;
1915 }
1916
1917 /* Deny all access for class A files */
1918 switch (CP_CLASS(cp->c_cpentry->cp_pclass)) {
1919 case PROTECTION_CLASS_A: {
1920 error = EPERM;
1921 break;
1922 }
1923 default:
1924 error = 0;
1925 break;
1926 }
1927
1928 return error;
1929 }
1930
1931 /*
1932 * Respond to a lock or unlock event.
1933 * On lock: clear out keys from memory, then flush file contents.
1934 * On unlock: nothing (function not called).
1935 */
1936 static int
1937 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1938 {
1939 cnode_t *cp = NULL;
1940 struct cprotect *entry = NULL;
1941 int error = 0;
1942 int locked = 1;
1943 unsigned long action = 0;
1944 int took_truncate_lock = 0;
1945
1946 error = vnode_getwithref (vp);
1947 if (error) {
1948 return error;
1949 }
1950
1951 cp = VTOC(vp);
1952
1953 /*
1954 * When cleaning cnodes due to a lock event, we must
1955 * take the truncate lock AND the cnode lock. By taking
1956 * the truncate lock here, we force (nearly) all pending IOs
1957 * to drain before we can acquire the truncate lock. All HFS cluster
1958 * io calls except for swapfile IO need to acquire the truncate lock
1959 * prior to calling into the cluster layer.
1960 */
1961 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1962 took_truncate_lock = 1;
1963
1964 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1965
1966 entry = cp->c_cpentry;
1967 if (!entry) {
1968 /* unprotected vnode: not a regular file */
1969 goto out;
1970 }
1971
1972 action = (unsigned long) arg;
1973 switch (action) {
1974 case CP_LOCKED_STATE: {
1975 vfs_context_t ctx;
1976 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A ||
1977 vnode_isdir(vp)) {
1978 /*
1979 * There is no change at lock for other classes than A.
1980 * B is kept in memory for writing, and class F (for VM) does
1981 * not have a wrapped key, so there is no work needed for
1982 * wrapping/unwrapping.
1983 *
1984 * Note that 'class F' is relevant here because if
1985 * hfs_vnop_strategy does not take the cnode lock
1986 * to protect the cp blob across IO operations, we rely
1987 * implicitly on the truncate lock to be held when doing IO.
1988 * The only case where the truncate lock is not held is during
1989 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
1990 * directly to cluster_pageout.
1991 */
1992 goto out;
1993 }
1994
1995 /* Before doing anything else, zero-fill sparse ranges as needed */
1996 ctx = vfs_context_current();
1997 (void) hfs_filedone (vp, ctx, 0);
1998
1999 /* first, sync back dirty pages */
2000 hfs_unlock (cp);
2001 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2002 hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2003
2004 /* flush keys:
2005 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
2006 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
2007 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
2008 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
2009 * Also verified that the cached data in IOFS is overwritten by other data, and there
2010 * is no key leakage in that layer.
2011 */
2012
2013 cp_flush_cached_keys(entry);
2014
2015 /* some write may have arrived in the mean time. dump those pages */
2016 hfs_unlock(cp);
2017 locked = 0;
2018
2019 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
2020 break;
2021 }
2022 case CP_UNLOCKED_STATE: {
2023 /* no-op */
2024 break;
2025 }
2026 default:
2027 panic("Content Protection: unknown lock action %lu\n", action);
2028 }
2029
2030 out:
2031 if (locked) {
2032 hfs_unlock(cp);
2033 }
2034
2035 if (took_truncate_lock) {
2036 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
2037 }
2038
2039 vnode_put (vp);
2040 return error;
2041 }
2042
2043
2044 /*
2045 * cp_rewrap:
2046 *
2047 * Generate a new wrapped key based on the existing cache key.
2048 */
2049
2050 int
2051 cp_rewrap(struct cnode *cp, __unused hfsmount_t *hfsmp,
2052 cp_key_class_t *newclass, cp_key_pair_t *cpkp, const void *old_holder,
2053 cp_new_alloc_fn alloc_fn, void **pholder)
2054 {
2055 struct cprotect *entry = cp->c_cpentry;
2056
2057 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2058 unsigned keylen = CP_MAX_WRAPPEDKEYSIZE;
2059 int error = 0;
2060 const cp_key_class_t key_class = CP_CLASS(*newclass);
2061
2062 /* Structures passed between HFS and AKS */
2063 struct aks_cred_s access_in;
2064 struct aks_wrapped_key_s wrapped_key_in;
2065 struct aks_wrapped_key_s wrapped_key_out;
2066
2067 /*
2068 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2069 * key that is only good as long as the file is open. There is no
2070 * wrapped key, so there isn't anything to wrap.
2071 */
2072 if (key_class == PROTECTION_CLASS_F) {
2073 return EINVAL;
2074 }
2075
2076 cp_init_access(&access_in, cp);
2077
2078 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2079 wrapped_key_in.key = cpkp_pers_key(cpkp);
2080 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
2081 /* Use the persistent class when talking to AKS */
2082 wrapped_key_in.dp_class = entry->cp_pclass;
2083
2084 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2085 wrapped_key_out.key = new_persistent_key;
2086 wrapped_key_out.key_len = keylen;
2087
2088 /*
2089 * inode is passed here to find the backup bag wrapped blob
2090 * from userspace. This lookup will occur shortly after creation
2091 * and only if the file still exists. Beyond this lookup the
2092 * inode is not used. Technically there is a race, we practically
2093 * don't lose.
2094 */
2095 error = hfs_rewrap_key(&access_in,
2096 key_class, /* new class */
2097 &wrapped_key_in,
2098 &wrapped_key_out);
2099
2100 keylen = wrapped_key_out.key_len;
2101
2102 if (error == 0) {
2103 /*
2104 * Verify that AKS returned to us a wrapped key of the
2105 * target class requested.
2106 */
2107 /* Get the effective class here */
2108 cp_key_class_t effective = CP_CLASS(wrapped_key_out.dp_class);
2109 if (effective != key_class) {
2110 /*
2111 * Fail the operation if defaults or some other enforcement
2112 * dictated that the class be wrapped differently.
2113 */
2114
2115 /* TODO: Invalidate the key when 12170074 unblocked */
2116 return EPERM;
2117 }
2118
2119 /* Allocate a new cpentry */
2120 cp_key_pair_t *new_cpkp;
2121 *pholder = alloc_fn(old_holder, keylen, CP_MAX_CACHEBUFLEN, &new_cpkp);
2122
2123 /* copy the new key into the entry */
2124 cpkp_set_pers_key_len(new_cpkp, keylen);
2125 memcpy(cpkp_pers_key(new_cpkp), new_persistent_key, keylen);
2126
2127 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
2128 *newclass = wrapped_key_out.dp_class;
2129 }
2130 else {
2131 error = EPERM;
2132 }
2133
2134 return error;
2135 }
2136
2137 static int cpkp_unwrap(cnode_t *cp, cp_key_class_t key_class, cp_key_pair_t *cpkp)
2138 {
2139 int error = 0;
2140 uint8_t iv_key[CP_IV_KEYSIZE];
2141 cpx_t cpx = cpkp_cpx(cpkp);
2142
2143 /* Structures passed between HFS and AKS */
2144 struct aks_cred_s access_in;
2145 struct aks_wrapped_key_s wrapped_key_in;
2146 struct aks_raw_key_s key_out;
2147
2148 cp_init_access(&access_in, cp);
2149
2150 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2151 wrapped_key_in.key = cpkp_pers_key(cpkp);
2152 wrapped_key_in.key_len = cpkp_max_pers_key_len(cpkp);
2153 /* Use the persistent class when talking to AKS */
2154 wrapped_key_in.dp_class = key_class;
2155
2156 bzero(&key_out, sizeof(key_out));
2157 key_out.iv_key = iv_key;
2158 key_out.key = cpx_key(cpx);
2159 /*
2160 * The unwrapper should validate/set the key length for
2161 * the IV key length and the cache key length, however we need
2162 * to supply the correct buffer length so that AKS knows how
2163 * many bytes it has to work with.
2164 */
2165 key_out.iv_key_len = CP_IV_KEYSIZE;
2166 key_out.key_len = cpx_max_key_len(cpx);
2167
2168 error = hfs_unwrap_key(&access_in, &wrapped_key_in, &key_out);
2169 if (!error) {
2170 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2171 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len);
2172 }
2173
2174 if (key_out.iv_key_len != CP_IV_KEYSIZE)
2175 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len);
2176
2177 cpx_set_key_len(cpx, key_out.key_len);
2178
2179 cpx_set_aes_iv_key(cpx, iv_key);
2180 cpx_set_is_sep_wrapped_key(cpx, ISSET(key_out.flags, AKS_RAW_KEY_WRAPPEDKEY));
2181 } else {
2182 error = EPERM;
2183 }
2184
2185 return error;
2186 }
2187
2188 static int
2189 cp_unwrap(__unused struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
2190 {
2191 /*
2192 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2193 * key that is only good as long as the file is open. There is no
2194 * wrapped key, so there isn't anything to unwrap.
2195 */
2196 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
2197 return EPERM;
2198 }
2199
2200 int error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_keys);
2201
2202 #if HFS_CONFIG_KEY_ROLL
2203 if (!error && entry->cp_key_roll_ctx) {
2204 error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_key_roll_ctx->ckr_keys);
2205 if (error)
2206 cpx_flush(cpkp_cpx(&entry->cp_keys));
2207 }
2208 #endif
2209
2210 return error;
2211 }
2212
2213 /*
2214 * cp_generate_keys
2215 *
2216 * Take a cnode that has already been initialized and establish persistent and
2217 * cache keys for it at this time. Note that at the time this is called, the
2218 * directory entry has already been created and we are holding the cnode lock
2219 * on 'cp'.
2220 *
2221 */
2222 int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, cp_key_class_t targetclass,
2223 uint32_t keyflags, struct cprotect **newentry)
2224 {
2225
2226 int error = 0;
2227 struct cprotect *newcp = NULL;
2228 *newentry = NULL;
2229
2230 /* Target class must be an effective class only */
2231 targetclass = CP_CLASS(targetclass);
2232
2233 /* Validate that it has a cprotect already */
2234 if (cp->c_cpentry == NULL) {
2235 /* We can't do anything if it shouldn't be protected. */
2236 return 0;
2237 }
2238
2239 /* Asserts for the underlying cprotect */
2240 if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
2241 /* should already have an xattr by this point. */
2242 error = EINVAL;
2243 goto out;
2244 }
2245
2246 if (S_ISREG(cp->c_mode)) {
2247 if (!cp_needs_pers_key(cp->c_cpentry)) {
2248 error = EINVAL;
2249 goto out;
2250 }
2251 }
2252
2253 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
2254
2255 error = cp_new (&targetclass, hfsmp, cp, cp->c_mode, keyflags, key_revision,
2256 (cp_new_alloc_fn)cp_entry_alloc, (void **)&newcp);
2257 if (error) {
2258 /*
2259 * Key generation failed. This is not necessarily fatal
2260 * since the device could have transitioned into the lock
2261 * state before we called this.
2262 */
2263 error = EPERM;
2264 goto out;
2265 }
2266
2267 newcp->cp_pclass = targetclass;
2268 newcp->cp_key_os_version = cp_os_version();
2269 newcp->cp_key_revision = key_revision;
2270
2271 /*
2272 * If we got here, then we have a new cprotect.
2273 * Attempt to write the new one out.
2274 */
2275 error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
2276
2277 if (error) {
2278 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2279 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2280 if (newcp) {
2281 cp_entry_destroy(hfsmp, newcp);
2282 }
2283 goto out;
2284 }
2285
2286 /*
2287 * If we get here then we can assert that:
2288 * 1) generated wrapped/unwrapped keys.
2289 * 2) wrote the new keys to disk.
2290 * 3) cprotect is ready to go.
2291 */
2292
2293 *newentry = newcp;
2294
2295 out:
2296 return error;
2297
2298 }
2299
2300 void cp_replace_entry (hfsmount_t *hfsmp, struct cnode *cp, struct cprotect *newentry)
2301 {
2302 if (cp->c_cpentry) {
2303 #if HFS_CONFIG_KEY_ROLL
2304 // Transfer the tentative reservation
2305 if (cp->c_cpentry->cp_key_roll_ctx && newentry->cp_key_roll_ctx) {
2306 newentry->cp_key_roll_ctx->ckr_tentative_reservation
2307 = cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation;
2308
2309 cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation = NULL;
2310 }
2311 #endif
2312
2313 cp_entry_destroy (hfsmp, cp->c_cpentry);
2314 }
2315 cp->c_cpentry = newentry;
2316 newentry->cp_backing_cnode = cp;
2317
2318 return;
2319 }
2320
2321
2322 /*
2323 * cp_new
2324 *
2325 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2326 * allocate a cprotect, and vend it back to the caller.
2327 *
2328 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2329 * but they do not have keys.
2330 *
2331 */
2332
2333 int
2334 cp_new(cp_key_class_t *newclass_eff, __unused struct hfsmount *hfsmp, struct cnode *cp,
2335 mode_t cmode, int32_t keyflags, cp_key_revision_t key_revision,
2336 cp_new_alloc_fn alloc_fn, void **pholder)
2337 {
2338 int error = 0;
2339 uint8_t new_key[CP_MAX_CACHEBUFLEN];
2340 unsigned new_key_len = CP_MAX_CACHEBUFLEN; /* AKS tell us the proper key length, how much of this is used */
2341 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2342 unsigned new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
2343 uint8_t iv_key[CP_IV_KEYSIZE];
2344 unsigned iv_key_len = CP_IV_KEYSIZE;
2345 int iswrapped = 0;
2346 cp_key_class_t key_class = CP_CLASS(*newclass_eff);
2347
2348 /* Structures passed between HFS and AKS */
2349 struct aks_cred_s access_in;
2350 struct aks_wrapped_key_s wrapped_key_out;
2351 struct aks_raw_key_s key_out;
2352
2353 /* Sanity check that it's a file or directory here */
2354 if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) {
2355 return EPERM;
2356 }
2357
2358 /*
2359 * Step 1: Generate Keys if needed.
2360 *
2361 * For class F files, the kernel provides the key.
2362 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2363 * key that is only good as long as the file is open. There is no
2364 * wrapped key, so there isn't anything to wrap.
2365 *
2366 * For class A->D files, the key store provides the key
2367 *
2368 * For Directories, we only give them a class ; no keys.
2369 */
2370 if (S_ISDIR (cmode)) {
2371 /* Directories */
2372 new_persistent_len = 0;
2373 new_key_len = 0;
2374
2375 error = 0;
2376 }
2377 else {
2378 /* Must be a file */
2379 if (key_class == PROTECTION_CLASS_F) {
2380 /* class F files are not wrapped; they can still use the max key size */
2381 new_key_len = CP_MAX_KEYSIZE;
2382 read_random (&new_key[0], new_key_len);
2383 new_persistent_len = 0;
2384
2385 error = 0;
2386 }
2387 else {
2388 /*
2389 * The keystore is provided the file ID so that it can associate
2390 * the wrapped backup blob with this key from userspace. This
2391 * lookup occurs after successful file creation. Beyond this, the
2392 * file ID is not used. Note that there is a potential race here if
2393 * the file ID is re-used.
2394 */
2395 cp_init_access(&access_in, cp);
2396
2397 bzero(&key_out, sizeof(key_out));
2398 key_out.key = new_key;
2399 key_out.iv_key = iv_key;
2400 /*
2401 * AKS will override our key length fields, but we need to supply
2402 * the length of the buffer in those length fields so that
2403 * AKS knows hoa many bytes it has to work with.
2404 */
2405 key_out.key_len = new_key_len;
2406 key_out.iv_key_len = iv_key_len;
2407
2408 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2409 wrapped_key_out.key = new_persistent_key;
2410 wrapped_key_out.key_len = new_persistent_len;
2411
2412 access_in.key_revision = key_revision;
2413
2414 error = hfs_new_key(&access_in,
2415 key_class,
2416 &key_out,
2417 &wrapped_key_out);
2418
2419 if (error) {
2420 /* keybag returned failure */
2421 error = EPERM;
2422 goto cpnew_fail;
2423 }
2424
2425 /* Now sanity-check the output from new_key */
2426 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2427 panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len);
2428 }
2429
2430 if (key_out.iv_key_len != CP_IV_KEYSIZE) {
2431 panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len);
2432 }
2433
2434 /*
2435 * AKS is allowed to override our preferences and wrap with a
2436 * different class key for policy reasons. If we were told that
2437 * any class other than the one specified is unacceptable then error out
2438 * if that occurred. Check that the effective class returned by
2439 * AKS is the same as our effective new class
2440 */
2441 if (CP_CLASS(wrapped_key_out.dp_class) != key_class) {
2442 if (!ISSET(keyflags, CP_KEYWRAP_DIFFCLASS)) {
2443 error = EPERM;
2444 /* TODO: When 12170074 fixed, release/invalidate the key! */
2445 goto cpnew_fail;
2446 }
2447 }
2448
2449 *newclass_eff = wrapped_key_out.dp_class;
2450 new_key_len = key_out.key_len;
2451 iv_key_len = key_out.iv_key_len;
2452 new_persistent_len = wrapped_key_out.key_len;
2453
2454 /* Is the key a SEP wrapped key? */
2455 if (key_out.flags & AKS_RAW_KEY_WRAPPEDKEY) {
2456 iswrapped = 1;
2457 }
2458 }
2459 }
2460
2461 /*
2462 * Step 2: allocate cprotect and initialize it.
2463 */
2464
2465 cp_key_pair_t *cpkp;
2466 *pholder = alloc_fn(NULL, new_persistent_len, new_key_len, &cpkp);
2467 if (*pholder == NULL) {
2468 return ENOMEM;
2469 }
2470
2471 /* Copy the cache key & IV keys into place if needed. */
2472 if (new_key_len > 0) {
2473 cpx_t cpx = cpkp_cpx(cpkp);
2474
2475 cpx_set_key_len(cpx, new_key_len);
2476 memcpy(cpx_key(cpx), new_key, new_key_len);
2477
2478 /* Initialize the IV key */
2479 if (key_class != PROTECTION_CLASS_F)
2480 cpx_set_aes_iv_key(cpx, iv_key);
2481
2482 cpx_set_is_sep_wrapped_key(cpx, iswrapped);
2483 }
2484 if (new_persistent_len > 0) {
2485 cpkp_set_pers_key_len(cpkp, new_persistent_len);
2486 memcpy(cpkp_pers_key(cpkp), new_persistent_key, new_persistent_len);
2487 }
2488
2489 cpnew_fail:
2490
2491 #if HFS_TMPDBG
2492 #if !SECURE_KERNEL
2493 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
2494 /* Only introspect the data fork */
2495 cp_log_eperm (cp->c_vp, *newclass_eff, true);
2496 }
2497 #endif
2498 #endif
2499
2500 return error;
2501 }
2502
2503
2504 /* Initialize the aks_cred_t structure passed to AKS */
2505 static void cp_init_access(aks_cred_t access, struct cnode *cp)
2506 {
2507 vfs_context_t context = vfs_context_current();
2508 kauth_cred_t cred = vfs_context_ucred(context);
2509 proc_t proc = vfs_context_proc(context);
2510 struct hfsmount *hfsmp;
2511 struct vnode *vp;
2512 uuid_t hfs_uuid;
2513
2514 bzero(access, sizeof(*access));
2515
2516 vp = CTOV(cp, 0);
2517 if (vp == NULL) {
2518 /* is it a rsrc */
2519 vp = CTOV(cp,1);
2520 if (vp == NULL) {
2521 //leave the struct bzeroed.
2522 return;
2523 }
2524 }
2525
2526 hfsmp = VTOHFS(vp);
2527 hfs_getvoluuid(hfsmp, hfs_uuid);
2528
2529 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2530 access->inode = cp->c_fileid;
2531 access->pid = proc_pid(proc);
2532 access->uid = kauth_cred_getuid(cred);
2533 uuid_copy (access->volume_uuid, hfs_uuid);
2534
2535 if (cp->c_cpentry)
2536 access->key_revision = cp->c_cpentry->cp_key_revision;
2537
2538 return;
2539 }
2540
2541 #if HFS_CONFIG_KEY_ROLL
2542
2543 errno_t cp_set_auto_roll(hfsmount_t *hfsmp,
2544 const hfs_key_auto_roll_args_t *args)
2545 {
2546 // 64 bytes should be OK on the stack
2547 _Static_assert(sizeof(struct cp_root_xattr) < 64, "cp_root_xattr too big!");
2548
2549 struct cp_root_xattr xattr;
2550 errno_t ret;
2551
2552 ret = cp_getrootxattr(hfsmp, &xattr);
2553 if (ret)
2554 return ret;
2555
2556 ret = hfs_start_transaction(hfsmp);
2557 if (ret)
2558 return ret;
2559
2560 xattr.auto_roll_min_version = args->min_key_os_version;
2561 xattr.auto_roll_max_version = args->max_key_os_version;
2562
2563 bool roll_old_class_gen = ISSET(args->flags, HFS_KEY_AUTO_ROLL_OLD_CLASS_GENERATION);
2564
2565 if (roll_old_class_gen)
2566 SET(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2567 else
2568 CLR(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2569
2570 ret = cp_setrootxattr(hfsmp, &xattr);
2571
2572 errno_t ret2 = hfs_end_transaction(hfsmp);
2573
2574 if (!ret)
2575 ret = ret2;
2576
2577 if (ret)
2578 return ret;
2579
2580 hfs_lock_mount(hfsmp);
2581 hfsmp->hfs_auto_roll_min_key_os_version = args->min_key_os_version;
2582 hfsmp->hfs_auto_roll_max_key_os_version = args->max_key_os_version;
2583 hfs_unlock_mount(hfsmp);
2584
2585 return ret;
2586 }
2587
2588 bool cp_should_auto_roll(hfsmount_t *hfsmp, cprotect_t cpr)
2589 {
2590 if (cpr->cp_key_roll_ctx) {
2591 // Already rolling
2592 return false;
2593 }
2594
2595 // Only automatically roll class A, B & C
2596 if (CP_CLASS(cpr->cp_pclass) < PROTECTION_CLASS_A
2597 || CP_CLASS(cpr->cp_pclass) > PROTECTION_CLASS_C) {
2598 return false;
2599 }
2600
2601 if (!cpkp_has_pers_key(&cpr->cp_keys))
2602 return false;
2603
2604 /*
2605 * Remember, the class generation stored in HFS+ is updated at the *end*,
2606 * so it's old if it matches the generation we have stored.
2607 */
2608 if (ISSET(hfsmp->cproot_flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION)
2609 && cp_get_crypto_generation(cpr->cp_pclass) == hfsmp->cp_crypto_generation) {
2610 return true;
2611 }
2612
2613 if (!hfsmp->hfs_auto_roll_min_key_os_version
2614 && !hfsmp->hfs_auto_roll_max_key_os_version) {
2615 // No minimum or maximum set
2616 return false;
2617 }
2618
2619 if (hfsmp->hfs_auto_roll_min_key_os_version
2620 && cpr->cp_key_os_version < hfsmp->hfs_auto_roll_min_key_os_version) {
2621 // Before minimum
2622 return false;
2623 }
2624
2625 if (hfsmp->hfs_auto_roll_max_key_os_version
2626 && cpr->cp_key_os_version >= hfsmp->hfs_auto_roll_max_key_os_version) {
2627 // Greater than maximum
2628 return false;
2629 }
2630
2631 return true;
2632 }
2633
2634 #endif // HFS_CONFIG_KEY_ROLL
2635
2636 errno_t cp_handle_strategy(buf_t bp)
2637 {
2638 vnode_t vp = buf_vnode(bp);
2639 cnode_t *cp = NULL;
2640
2641 if (bufattr_rawencrypted(buf_attr(bp))
2642 || !(cp = cp_get_protected_cnode(vp))
2643 || !cp->c_cpentry) {
2644 // Nothing to do
2645 return 0;
2646 }
2647
2648 /*
2649 * For filesystem resize, we may not have access to the underlying
2650 * file's cache key for whatever reason (device may be locked).
2651 * However, we do not need it since we are going to use the
2652 * temporary HFS-wide resize key which is generated once we start
2653 * relocating file content. If this file's I/O should be done
2654 * using the resize key, it will have been supplied already, so do
2655 * not attach the file's cp blob to the buffer.
2656 */
2657 if (ISSET(cp->c_cpentry->cp_flags, CP_RELOCATION_INFLIGHT))
2658 return 0;
2659
2660 #if HFS_CONFIG_KEY_ROLL
2661 /*
2662 * We don't require any locks here. Pages will be locked so no
2663 * key rolling can take place until this I/O has completed.
2664 */
2665 if (!cp->c_cpentry->cp_key_roll_ctx)
2666 #endif
2667 {
2668 // Fast path
2669 cpx_t cpx = cpkp_cpx(&cp->c_cpentry->cp_keys);
2670
2671 if (cpx_has_key(cpx)) {
2672 bufattr_setcpx(buf_attr(bp), cpx);
2673 return 0;
2674 }
2675 }
2676
2677 /*
2678 * We rely mostly (see note below) upon the truncate lock to
2679 * protect the CP cache key from getting tossed prior to our IO
2680 * finishing here. Nearly all cluster io calls to manipulate file
2681 * payload from HFS take the truncate lock before calling into the
2682 * cluster layer to ensure the file size does not change, or that
2683 * they have exclusive right to change the EOF of the file. That
2684 * same guarantee protects us here since the code that deals with
2685 * CP lock events must now take the truncate lock before doing
2686 * anything.
2687 *
2688 * If you want to change content protection structures, then the
2689 * truncate lock is not sufficient; you must take the truncate
2690 * lock and then wait for outstanding writes to complete. This is
2691 * necessary because asynchronous I/O only holds the truncate lock
2692 * whilst I/O is being queued.
2693 *
2694 * One exception should be the VM swapfile IO, because HFS will
2695 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call
2696 * for the swapfile code only without holding the truncate lock.
2697 * This is because individual swapfiles are maintained at
2698 * fixed-length sizes by the VM code. In non-swapfile IO we use
2699 * PAGEOUT_V2 semantics which allow us to create our own UPL and
2700 * thus take the truncate lock before calling into the cluster
2701 * layer. In that case, however, we are not concerned with the CP
2702 * blob being wiped out in the middle of the IO because there
2703 * isn't anything to toss; the VM swapfile key stays in-core as
2704 * long as the file is open.
2705 */
2706
2707 off_rsrc_t off_rsrc = off_rsrc_make(buf_lblkno(bp) * GetLogicalBlockSize(vp),
2708 VNODE_IS_RSRC(vp));
2709 cp_io_params_t io_params;
2710
2711
2712 /*
2713 * We want to take the cnode lock here and because the vnode write
2714 * count is a pseudo-lock, we need to do something to preserve
2715 * lock ordering; the cnode lock comes before the write count.
2716 * Ideally, the write count would be incremented after the
2717 * strategy routine returns, but that becomes complicated if the
2718 * strategy routine where to call buf_iodone before returning.
2719 * For now, we drop the write count here and then pick it up again
2720 * later.
2721 */
2722 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2723 vnode_writedone(vp);
2724
2725 hfs_lock_always(cp, HFS_SHARED_LOCK);
2726 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2727 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2728 &io_params);
2729 hfs_unlock(cp);
2730
2731 /*
2732 * Last chance: If this data protected I/O does not have unwrapped
2733 * keys present, then try to get them. We already know that it
2734 * should, by this point.
2735 */
2736 if (!cpx_has_key(io_params.cpx)) {
2737 int io_op = ( (buf_flags(bp) & B_READ) ? CP_READ_ACCESS : CP_WRITE_ACCESS);
2738 errno_t error = cp_handle_vnop(vp, io_op, 0);
2739 if (error) {
2740 /*
2741 * We have to be careful here. By this point in the I/O
2742 * path, VM or the cluster engine has prepared a buf_t
2743 * with the proper file offsets and all the rest, so
2744 * simply erroring out will result in us leaking this
2745 * particular buf_t. We need to properly decorate the
2746 * buf_t just as buf_strategy would so as to make it
2747 * appear that the I/O errored out with the particular
2748 * error code.
2749 */
2750 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2751 vnode_startwrite(vp);
2752 buf_seterror (bp, error);
2753 buf_biodone(bp);
2754 return error;
2755 }
2756
2757 hfs_lock_always(cp, HFS_SHARED_LOCK);
2758 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2759 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2760 &io_params);
2761 hfs_unlock(cp);
2762 }
2763
2764 hfs_assert(buf_count(bp) <= io_params.max_len);
2765 bufattr_setcpx(buf_attr(bp), io_params.cpx);
2766
2767 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2768 vnode_startwrite(vp);
2769
2770 return 0;
2771 }
2772
2773 #endif /* CONFIG_PROTECT */