]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_cprotect.c
hfs-556.60.1.tar.gz
[apple/hfs.git] / core / hfs_cprotect.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #if CONFIG_PROTECT
29
30 #include <sys/mount.h>
31 #include <sys/random.h>
32 #include <sys/xattr.h>
33 #include <sys/vnode_if.h>
34 #include <sys/fcntl.h>
35 #include <libkern/OSByteOrder.h>
36 #include <libkern/crypto/sha1.h>
37 #include <sys/proc.h>
38 #include <sys/kauth.h>
39 #include <sys/sysctl.h>
40 #include <sys/ubc.h>
41 #include <uuid/uuid.h>
42
43 #include "hfs.h"
44 #include "hfs_cnode.h"
45 #include "hfs_fsctl.h"
46 #include "hfs_cprotect.h"
47 #include "hfs_iokit.h"
48
49 #if HFS_CONFIG_KEY_ROLL
50 #include "hfs_key_roll.h"
51 #endif
52
53 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
54
55 extern int (**hfs_vnodeop_p) (void *);
56
57 /*
58 * CP private functions
59 */
60 static int cp_root_major_vers(mount_t mp);
61 static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **);
62 static void cp_entry_dealloc(hfsmount_t *hfsmp, struct cprotect *entry);
63 static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *);
64 static int cp_lock_vnode_callback(vnode_t, void *);
65 static int cp_vnode_is_eligible (vnode_t);
66 static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop);
67 static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *);
68 static void cp_init_access(aks_cred_t access, struct cnode *cp);
69
70 // -- cp_key_pair accessors --
71
72 void cpkp_init(cp_key_pair_t *cpkp, uint16_t max_pers_key_len,
73 uint16_t max_cached_key_len)
74 {
75 cpkp->cpkp_max_pers_key_len = max_pers_key_len;
76 cpkp->cpkp_pers_key_len = 0;
77
78 cpx_t embedded_cpx = cpkp_cpx(cpkp);
79 /* XNU requires us to allocate the AES context separately */
80 cpx_alloc_ctx (embedded_cpx);
81
82 cpx_init(cpkp_cpx(cpkp), max_cached_key_len);
83
84 // Default to using offsets
85 cpx_set_use_offset_for_iv(cpkp_cpx(cpkp), true);
86 }
87
88 uint16_t cpkp_max_pers_key_len(const cp_key_pair_t *cpkp)
89 {
90 return cpkp->cpkp_max_pers_key_len;
91 }
92
93 uint16_t cpkp_pers_key_len(const cp_key_pair_t *cpkp)
94 {
95 return cpkp->cpkp_pers_key_len;
96 }
97
98 static bool cpkp_has_pers_key(const cp_key_pair_t *cpkp)
99 {
100 return cpkp->cpkp_pers_key_len > 0;
101 }
102
103 static void *cpkp_pers_key(const cp_key_pair_t *cpkp)
104 {
105 return PTR_ADD(void *, &cpkp->cpkp_cpx, cpx_sizex(cpkp_cpx(cpkp)));
106 }
107
108 static void cpkp_set_pers_key_len(cp_key_pair_t *cpkp, uint16_t key_len)
109 {
110 if (key_len > cpkp->cpkp_max_pers_key_len)
111 panic("hfs_cprotect: key too big!");
112 cpkp->cpkp_pers_key_len = key_len;
113 }
114
115 #pragma clang diagnostic push
116 #pragma clang diagnostic ignored "-Wcast-qual"
117 cpx_t cpkp_cpx(const cp_key_pair_t *cpkp)
118 {
119 // Cast to remove const qualifier
120 return (cpx_t)&cpkp->cpkp_cpx;
121 }
122 #pragma clang diagnostic pop
123
124 size_t cpkp_size(uint16_t pers_key_len, uint16_t cached_key_len)
125 {
126 return sizeof(cp_key_pair_t) + pers_key_len + cpx_size(cached_key_len);
127 }
128
129 size_t cpkp_sizex(const cp_key_pair_t *cpkp)
130 {
131 return cpkp_size(cpkp->cpkp_max_pers_key_len, cpx_max_key_len(cpkp_cpx(cpkp)));
132 }
133
134 void cpkp_flush(cp_key_pair_t *cpkp)
135 {
136 cpx_flush(cpkp_cpx(cpkp));
137 cpkp->cpkp_pers_key_len = 0;
138 bzero(cpkp_pers_key(cpkp), cpkp->cpkp_max_pers_key_len);
139 }
140
141 bool cpkp_can_copy(const cp_key_pair_t *src, const cp_key_pair_t *dst)
142 {
143 return (cpkp_pers_key_len(src) <= dst->cpkp_max_pers_key_len
144 && cpx_can_copy(cpkp_cpx(src), cpkp_cpx(dst)));
145 }
146
147 void cpkp_copy(const cp_key_pair_t *src, cp_key_pair_t *dst)
148 {
149 const uint16_t key_len = cpkp_pers_key_len(src);
150 cpkp_set_pers_key_len(dst, key_len);
151 memcpy(cpkp_pers_key(dst), cpkp_pers_key(src), key_len);
152 cpx_copy(cpkp_cpx(src), cpkp_cpx(dst));
153 }
154
155 // --
156
157 bool cp_is_supported_version(uint16_t vers)
158 {
159 return vers == CP_VERS_4 || vers == CP_VERS_5;
160 }
161
162 /*
163 * Return the appropriate key and, if requested, the physical offset and
164 * maximum length for a particular I/O operation.
165 */
166 void cp_io_params(__unused hfsmount_t *hfsmp, cprotect_t cpr,
167 __unused off_rsrc_t off_rsrc,
168 __unused int direction, cp_io_params_t *io_params)
169 {
170 #if HFS_CONFIG_KEY_ROLL
171 hfs_cp_key_roll_ctx_t *ckr = cpr->cp_key_roll_ctx;
172
173 if (ckr && off_rsrc < ckr->ckr_off_rsrc) {
174 /*
175 * When we're in the process of rolling an extent, ckr_off_rsrc will
176 * indicate the end of the extent.
177 */
178 const off_rsrc_t roll_loc = ckr->ckr_off_rsrc
179 - hfs_blk_to_bytes(ckr->ckr_roll_extent.blockCount,
180 hfsmp->blockSize);
181
182 if (off_rsrc < roll_loc) {
183 io_params->max_len = roll_loc - off_rsrc;
184 io_params->phys_offset = -1;
185 } else {
186 /*
187 * We should never get reads to the extent we're rolling
188 * because the pages should be locked in the UBC. If we
189 * did get reads it's not obvious what the right thing to
190 * do is either: we could read from the old location, but
191 * we might have written later data to the new location,
192 * or we could read from the new location, but data might
193 * not have been written there yet.
194 *
195 * Note that whilst raw encrypted reads don't lock any
196 * pages, or take a cluster_read_direct lock, the call to
197 * hfs_key_roll_up_to in hfs_vnop_read will have ensured
198 * that the file has been rolled beyond the offset being
199 * read so this path should never be taken in that case.
200 */
201 hfs_assert(direction == VNODE_WRITE);
202
203 // For release builds, just in case...
204 if (direction == VNODE_READ) {
205 // Use the old key and offset
206 goto old_key;
207 }
208
209 io_params->max_len = ckr->ckr_off_rsrc - off_rsrc;
210 io_params->phys_offset = hfs_blk_to_bytes(ckr->ckr_roll_extent.startBlock,
211 hfsmp->blockSize) + off_rsrc - roll_loc;
212 }
213
214 // Use new key
215 io_params->cpx = cpkp_cpx(&ckr->ckr_keys);
216 return;
217 }
218 old_key:
219 // Use old key...
220 #endif
221
222 io_params->max_len = INT64_MAX;
223 io_params->phys_offset = -1;
224 io_params->cpx = cpkp_cpx(&cpr->cp_keys);
225 }
226
227 static void cp_flush_cached_keys(cprotect_t cpr)
228 {
229 cpx_flush(cpkp_cpx(&cpr->cp_keys));
230 #if HFS_CONFIG_KEY_ROLL
231 if (cpr->cp_key_roll_ctx)
232 cpx_flush(cpkp_cpx(&cpr->cp_key_roll_ctx->ckr_keys));
233 #endif
234 }
235
236 static bool cp_needs_pers_key(cprotect_t cpr)
237 {
238 if (CP_CLASS(cpr->cp_pclass) == PROTECTION_CLASS_F)
239 return !cpx_has_key(cpkp_cpx(&cpr->cp_keys));
240 else
241 return !cpkp_has_pers_key(&cpr->cp_keys);
242 }
243
244 static cp_key_revision_t cp_initial_key_revision(__unused hfsmount_t *hfsmp)
245 {
246 return 1;
247 }
248
249 cp_key_revision_t cp_next_key_revision(cp_key_revision_t rev)
250 {
251 rev = (rev + 0x0100) ^ (mach_absolute_time() & 0xff);
252 if (!rev)
253 rev = 1;
254 return rev;
255 }
256
257 /*
258 * Allocate and initialize a cprotect blob for a new cnode.
259 * Called from hfs_getnewvnode: cnode is locked exclusive.
260 *
261 * Read xattr data off the cnode. Then, if conditions permit,
262 * unwrap the file key and cache it in the cprotect blob.
263 */
264 int
265 cp_entry_init(struct cnode *cp, struct mount *mp)
266 {
267 struct cprotect *entry = NULL;
268 int error = 0;
269 struct hfsmount *hfsmp = VFSTOHFS(mp);
270
271 /*
272 * The cnode should be locked at this point, regardless of whether or not
273 * we are creating a new item in the namespace or vending a vnode on behalf
274 * of lookup. The only time we tell getnewvnode to skip the lock is when
275 * constructing a resource fork vnode. But a resource fork vnode must come
276 * after the regular data fork cnode has already been constructed.
277 */
278 if (!cp_fs_protected (mp)) {
279 cp->c_cpentry = NULL;
280 return 0;
281 }
282
283 if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) {
284 cp->c_cpentry = NULL;
285 return 0;
286 }
287
288 if (hfsmp->hfs_running_cp_major_vers == 0) {
289 panic ("hfs cp: no running mount point version! ");
290 }
291
292 hfs_assert(cp->c_cpentry == NULL);
293
294 error = cp_getxattr(cp, hfsmp, &entry);
295 if (error == ENOATTR) {
296 /*
297 * Normally, we should always have a CP EA for a file or directory that
298 * we are initializing here. However, there are some extenuating circumstances,
299 * such as the root directory immediately following a newfs_hfs.
300 *
301 * As a result, we leave code here to deal with an ENOATTR which will always
302 * default to a 'D/NONE' key, though we don't expect to use it much.
303 */
304 cp_key_class_t target_class = PROTECTION_CLASS_D;
305
306 if (S_ISDIR(cp->c_mode)) {
307 target_class = PROTECTION_CLASS_DIR_NONE;
308 }
309
310 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
311
312 /* allow keybag to override our class preferences */
313 error = cp_new (&target_class, hfsmp, cp, cp->c_mode, CP_KEYWRAP_DIFFCLASS,
314 key_revision, (cp_new_alloc_fn)cp_entry_alloc, (void **)&entry);
315 if (error == 0) {
316 entry->cp_pclass = target_class;
317 entry->cp_key_os_version = cp_os_version();
318 entry->cp_key_revision = key_revision;
319 error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE);
320 }
321 }
322
323 /*
324 * Bail out if:
325 * a) error was not ENOATTR (we got something bad from the getxattr call)
326 * b) we encountered an error setting the xattr above.
327 * c) we failed to generate a new cprotect data structure.
328 */
329 if (error) {
330 goto out;
331 }
332
333 cp->c_cpentry = entry;
334
335 out:
336 if (error == 0) {
337 entry->cp_backing_cnode = cp;
338 }
339 else {
340 if (entry) {
341 cp_entry_destroy(hfsmp, entry);
342 }
343 cp->c_cpentry = NULL;
344 }
345
346 return error;
347 }
348
349 /*
350 * cp_setup_newentry
351 *
352 * Generate a keyless cprotect structure for use with the new AppleKeyStore kext.
353 * Since the kext is now responsible for vending us both wrapped/unwrapped keys
354 * we need to create a keyless xattr upon file / directory creation. When we have the inode value
355 * and the file/directory is established, then we can ask it to generate keys. Note that
356 * this introduces a potential race; If the device is locked and the wrapping
357 * keys are purged between the time we call this function and the time we ask it to generate
358 * keys for us, we could have to fail the open(2) call and back out the entry.
359 */
360
361 int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp,
362 cp_key_class_t suppliedclass, mode_t cmode,
363 struct cprotect **tmpentry)
364 {
365 int isdir = 0;
366 struct cprotect *entry = NULL;
367 uint32_t target_class = hfsmp->default_cp_class;
368 suppliedclass = CP_CLASS(suppliedclass);
369
370 if (hfsmp->hfs_running_cp_major_vers == 0) {
371 panic ("CP: major vers not set in mount!");
372 }
373
374 if (S_ISDIR (cmode)) {
375 isdir = 1;
376 }
377
378 /* Decide the target class. Input argument takes priority. */
379 if (cp_is_valid_class (isdir, suppliedclass)) {
380 /* caller supplies -1 if it was not specified so we will default to the mount point value */
381 target_class = suppliedclass;
382 /*
383 * One exception, F is never valid for a directory
384 * because its children may inherit and userland will be
385 * unable to read/write to the files.
386 */
387 if (isdir) {
388 if (target_class == PROTECTION_CLASS_F) {
389 *tmpentry = NULL;
390 return EINVAL;
391 }
392 }
393 }
394 else {
395 /*
396 * If no valid class was supplied, behave differently depending on whether or not
397 * the item being created is a file or directory.
398 *
399 * for FILE:
400 * If parent directory has a non-zero class, use that.
401 * If parent directory has a zero class (not set), then attempt to
402 * apply the mount point default.
403 *
404 * for DIRECTORY:
405 * Directories always inherit from the parent; if the parent
406 * has a NONE class set, then we can continue to use that.
407 */
408 if ((dcp) && (dcp->c_cpentry)) {
409 uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass);
410 /* If the parent class is not valid, default to the mount point value */
411 if (cp_is_valid_class(1, parentclass)) {
412 if (isdir) {
413 target_class = parentclass;
414 }
415 else if (parentclass != PROTECTION_CLASS_DIR_NONE) {
416 /* files can inherit so long as it's not NONE */
417 target_class = parentclass;
418 }
419 }
420 /* Otherwise, we already defaulted to the mount point's default */
421 }
422 }
423
424 /* Generate the cprotect to vend out */
425 entry = cp_entry_alloc(NULL, 0, 0, NULL);
426 if (entry == NULL) {
427 *tmpentry = NULL;
428 return ENOMEM;
429 }
430
431 /*
432 * We don't have keys yet, so fill in what we can. At this point
433 * this blob has no keys and it has no backing xattr. We just know the
434 * target class.
435 */
436 entry->cp_flags = CP_NO_XATTR;
437 /* Note this is only the effective class */
438 entry->cp_pclass = target_class;
439 *tmpentry = entry;
440
441 return 0;
442 }
443
444 /*
445 * Set up an initial key/class pair for a disassociated cprotect entry.
446 * This function is used to generate transient keys that will never be
447 * written to disk. We use class F for this since it provides the exact
448 * semantics that are needed here. Because we never attach this blob to
449 * a cnode directly, we take a pointer to the cprotect struct.
450 *
451 * This function is primarily used in the HFS FS truncation codepath
452 * where we may rely on AES symmetry to relocate encrypted data from
453 * one spot in the disk to another.
454 */
455 int cpx_gentempkeys(cpx_t *pcpx, __unused struct hfsmount *hfsmp)
456 {
457 cpx_t cpx = cpx_alloc(CP_MAX_KEYSIZE, true);
458
459 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
460 read_random(cpx_key(cpx), CP_MAX_KEYSIZE);
461 cpx_set_use_offset_for_iv(cpx, true);
462
463 *pcpx = cpx;
464
465 return 0;
466 }
467
468 /*
469 * Tear down and clear a cprotect blob for a closing file.
470 * Called at hfs_reclaim_cnode: cnode is locked exclusive.
471 */
472 void
473 cp_entry_destroy(hfsmount_t *hfsmp, struct cprotect *entry_ptr)
474 {
475 if (entry_ptr == NULL) {
476 /* nothing to clean up */
477 return;
478 }
479 cp_entry_dealloc(hfsmp, entry_ptr);
480 }
481
482
483 int
484 cp_fs_protected (mount_t mnt)
485 {
486 return (vfs_flags(mnt) & MNT_CPROTECT);
487 }
488
489
490 /*
491 * Return a pointer to underlying cnode if there is one for this vnode.
492 * Done without taking cnode lock, inspecting only vnode state.
493 */
494 struct cnode *
495 cp_get_protected_cnode(struct vnode *vp)
496 {
497 if (!cp_vnode_is_eligible(vp)) {
498 return NULL;
499 }
500
501 if (!cp_fs_protected(VTOVFS(vp))) {
502 /* mount point doesn't support it */
503 return NULL;
504 }
505
506 return vnode_fsnode(vp);
507 }
508
509
510 /*
511 * Sets *class to persistent class associated with vnode,
512 * or returns error.
513 */
514 int
515 cp_vnode_getclass(struct vnode *vp, cp_key_class_t *class)
516 {
517 struct cprotect *entry;
518 int error = 0;
519 struct cnode *cp;
520 int took_truncate_lock = 0;
521 struct hfsmount *hfsmp = NULL;
522
523 /* Is this an interesting vp? */
524 if (!cp_vnode_is_eligible (vp)) {
525 return EBADF;
526 }
527
528 /* Is the mount point formatted for content protection? */
529 if (!cp_fs_protected(VTOVFS(vp))) {
530 return ENOTSUP;
531 }
532
533 cp = VTOC(vp);
534 hfsmp = VTOHFS(vp);
535
536 /*
537 * Take the truncate lock up-front in shared mode because we may need
538 * to manipulate the CP blob. Pend lock events until we're done here.
539 */
540 hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
541 took_truncate_lock = 1;
542
543 /*
544 * We take only the shared cnode lock up-front. If it turns out that
545 * we need to manipulate the CP blob to write a key out, drop the
546 * shared cnode lock and acquire an exclusive lock.
547 */
548 error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
549 if (error) {
550 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
551 return error;
552 }
553
554 /* pull the class from the live entry */
555 entry = cp->c_cpentry;
556
557 if (entry == NULL) {
558 panic("Content Protection: uninitialized cnode %p", cp);
559 }
560
561 /* Note that we may not have keys yet, but we know the target class. */
562
563 if (error == 0) {
564 *class = CP_CLASS(entry->cp_pclass);
565 }
566
567 if (took_truncate_lock) {
568 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
569 }
570
571 hfs_unlock(cp);
572 return error;
573 }
574
575 /*
576 * Sets persistent class for this file or directory.
577 * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF.
578 * If the new class can't be accessed now, EPERM.
579 * Otherwise, record class and re-wrap key if the mount point is content-protected.
580 */
581 int
582 cp_vnode_setclass(struct vnode *vp, cp_key_class_t newclass)
583 {
584 struct cnode *cp;
585 struct cprotect *entry = 0;
586 int error = 0;
587 int took_truncate_lock = 0;
588 struct hfsmount *hfsmp = NULL;
589 int isdir = 0;
590
591 if (vnode_isdir (vp)) {
592 isdir = 1;
593 }
594
595 /* Ensure we only use the effective class here */
596 newclass = CP_CLASS(newclass);
597
598 if (!cp_is_valid_class(isdir, newclass)) {
599 printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass);
600 return EINVAL;
601 }
602
603 /* Is this an interesting vp? */
604 if (!cp_vnode_is_eligible(vp)) {
605 return EBADF;
606 }
607
608 /* Is the mount point formatted for content protection? */
609 if (!cp_fs_protected(VTOVFS(vp))) {
610 return ENOTSUP;
611 }
612
613 hfsmp = VTOHFS(vp);
614 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
615 return EROFS;
616 }
617
618 /*
619 * Take the cnode truncate lock exclusive because we want to manipulate the
620 * CP blob. The lock-event handling code is doing the same. This also forces
621 * all pending IOs to drain before we can re-write the persistent and cache keys.
622 */
623 cp = VTOC(vp);
624 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
625 took_truncate_lock = 1;
626
627 /*
628 * The truncate lock is not sufficient to guarantee the CP blob
629 * isn't being used. We must wait for existing writes to finish.
630 */
631 vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass");
632
633 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
634 return EINVAL;
635 }
636
637 entry = cp->c_cpentry;
638 if (entry == NULL) {
639 error = EINVAL;
640 goto out;
641 }
642
643 /*
644 * re-wrap per-file key with new class.
645 * Generate an entirely new key if switching to F.
646 */
647 if (vnode_isreg(vp)) {
648 /*
649 * The vnode is a file. Before proceeding with the re-wrap, we need
650 * to unwrap the keys before proceeding. This is to ensure that
651 * the destination class's properties still work appropriately for the
652 * target class (since B allows I/O but an unwrap prior to the next unlock
653 * will not be allowed).
654 */
655 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
656 error = cp_restore_keys (entry, hfsmp, cp);
657 if (error) {
658 goto out;
659 }
660 }
661
662 if (newclass == PROTECTION_CLASS_F) {
663 /* Verify that file is blockless if switching to class F */
664 if (cp->c_datafork->ff_size > 0) {
665 error = EINVAL;
666 goto out;
667 }
668
669 cp_key_pair_t *cpkp = NULL;
670 cprotect_t new_entry = cp_entry_alloc(NULL, 0, CP_MAX_KEYSIZE, &cpkp);
671
672 if (!new_entry) {
673 error = ENOMEM;
674 goto out;
675 }
676
677 /* newclass is only the effective class */
678 new_entry->cp_pclass = newclass;
679 new_entry->cp_key_os_version = cp_os_version();
680 new_entry->cp_key_revision = cp_next_key_revision(entry->cp_key_revision);
681
682 cpx_t cpx = cpkp_cpx(cpkp);
683
684 /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */
685 cpx_set_key_len(cpx, CP_MAX_KEYSIZE);
686 read_random (cpx_key(cpx), CP_MAX_KEYSIZE);
687
688 cp_replace_entry(hfsmp, cp, new_entry);
689
690 error = 0;
691 goto out;
692 }
693
694 /* Deny the setclass if file is to be moved from F to something else */
695 if (entry->cp_pclass == PROTECTION_CLASS_F) {
696 error = EPERM;
697 goto out;
698 }
699
700 if (!cpkp_has_pers_key(&entry->cp_keys)) {
701 struct cprotect *new_entry = NULL;
702 /*
703 * We want to fail if we can't wrap to the target class. By not setting
704 * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap
705 * to 'newclass' then error out.
706 */
707 uint32_t flags = 0;
708 error = cp_generate_keys (hfsmp, cp, newclass, flags, &new_entry);
709 if (error == 0) {
710 cp_replace_entry (hfsmp, cp, new_entry);
711 }
712 /* Bypass the setxattr code below since generate_keys does it for us */
713 goto out;
714 }
715
716 cprotect_t new_entry;
717 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_keys, entry,
718 (cp_new_alloc_fn)cp_entry_alloc, (void **)&new_entry);
719 if (error) {
720 /* we didn't have perms to set this class. leave file as-is and error out */
721 goto out;
722 }
723
724 #if HFS_CONFIG_KEY_ROLL
725 hfs_cp_key_roll_ctx_t *new_key_roll_ctx = NULL;
726 if (entry->cp_key_roll_ctx) {
727 error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_key_roll_ctx->ckr_keys,
728 entry->cp_key_roll_ctx,
729 (cp_new_alloc_fn)hfs_key_roll_ctx_alloc,
730 (void **)&new_key_roll_ctx);
731
732 if (error) {
733 cp_entry_dealloc(hfsmp, new_entry);
734 goto out;
735 }
736
737 new_entry->cp_key_roll_ctx = new_key_roll_ctx;
738 }
739 #endif
740
741 new_entry->cp_pclass = newclass;
742
743 cp_replace_entry(hfsmp, cp, new_entry);
744 entry = new_entry;
745 }
746 else if (vnode_isdir(vp)) {
747 /* For directories, just update the pclass. newclass is only effective class */
748 entry->cp_pclass = newclass;
749 error = 0;
750 }
751 else {
752 /* anything else, just error out */
753 error = EINVAL;
754 goto out;
755 }
756
757 /*
758 * We get here if the new class was F, or if we were re-wrapping a cprotect that already
759 * existed. If the keys were never generated, then they'll skip the setxattr calls.
760 */
761
762 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE);
763 if (error == ENOATTR) {
764 error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE);
765 }
766
767 out:
768
769 if (took_truncate_lock) {
770 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
771 }
772 hfs_unlock(cp);
773 return error;
774 }
775
776
777 int cp_vnode_transcode(vnode_t vp, cp_key_t *k)
778 {
779 struct cnode *cp;
780 struct cprotect *entry = 0;
781 int error = 0;
782 int took_truncate_lock = 0;
783 struct hfsmount *hfsmp = NULL;
784
785 /* Structures passed between HFS and AKS */
786 struct aks_cred_s access_in;
787 struct aks_wrapped_key_s wrapped_key_in, wrapped_key_out;
788
789 /* Is this an interesting vp? */
790 if (!cp_vnode_is_eligible(vp)) {
791 return EBADF;
792 }
793
794 /* Is the mount point formatted for content protection? */
795 if (!cp_fs_protected(VTOVFS(vp))) {
796 return ENOTSUP;
797 }
798
799 cp = VTOC(vp);
800 hfsmp = VTOHFS(vp);
801
802 /*
803 * Take the cnode truncate lock exclusive because we want to manipulate the
804 * CP blob. The lock-event handling code is doing the same. This also forces
805 * all pending IOs to drain before we can re-write the persistent and cache keys.
806 */
807 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
808 took_truncate_lock = 1;
809
810 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) {
811 return EINVAL;
812 }
813
814 entry = cp->c_cpentry;
815 if (entry == NULL) {
816 error = EINVAL;
817 goto out;
818 }
819
820 /* Send the per-file key in wrapped form for re-wrap with the current class information
821 * Send NULLs in the output parameters of the wrapper() and AKS will do the rest.
822 * Don't need to process any outputs, so just clear the locks and pass along the error. */
823 if (vnode_isreg(vp)) {
824
825 /* Picked up the following from cp_wrap().
826 * If needed, more comments available there. */
827
828 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
829 error = EINVAL;
830 goto out;
831 }
832
833 cp_init_access(&access_in, cp);
834
835 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
836 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
837
838 cp_key_pair_t *cpkp = &entry->cp_keys;
839
840 #if HFS_CONFIG_KEY_ROLL
841 if (entry->cp_key_roll_ctx)
842 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
843 #endif
844
845 wrapped_key_in.key = cpkp_pers_key(cpkp);
846 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
847
848 if (!wrapped_key_in.key_len) {
849 error = EINVAL;
850 goto out;
851 }
852
853 /* Use the actual persistent class when talking to AKS */
854 wrapped_key_in.dp_class = entry->cp_pclass;
855 wrapped_key_out.key = k->key;
856 wrapped_key_out.key_len = k->len;
857
858 error = hfs_backup_key(&access_in,
859 &wrapped_key_in,
860 &wrapped_key_out);
861
862 if(error)
863 error = EPERM;
864 else
865 k->len = wrapped_key_out.key_len;
866 }
867
868 out:
869 if (took_truncate_lock) {
870 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
871 }
872 hfs_unlock(cp);
873 return error;
874 }
875
876
877 /*
878 * Check permission for the given operation (read, write) on this node.
879 * Additionally, if the node needs work, do it:
880 * - create a new key for the file if one hasn't been set before
881 * - write out the xattr if it hasn't already been saved
882 * - unwrap the key if needed
883 *
884 * Takes cnode lock, and upgrades to exclusive if modifying cprotect.
885 *
886 * Note that this function does *NOT* take the cnode truncate lock. This is because
887 * the thread calling us may already have the truncate lock. It is not necessary
888 * because either we successfully finish this function before the keys are tossed
889 * and the IO will fail, or the keys are tossed and then this function will fail.
890 * Either way, the cnode lock still ultimately guards the keys. We only rely on the
891 * truncate lock to protect us against tossing the keys as a cluster call is in-flight.
892 */
893 int
894 cp_handle_vnop(struct vnode *vp, int vnop, int ioflag)
895 {
896 struct cprotect *entry;
897 int error = 0;
898 struct hfsmount *hfsmp = NULL;
899 struct cnode *cp = NULL;
900
901 /*
902 * First, do validation against the vnode before proceeding any further:
903 * Is this vnode originating from a valid content-protected filesystem ?
904 */
905 if (cp_vnode_is_eligible(vp) == 0) {
906 /*
907 * It is either not HFS or not a file/dir. Just return success. This is a valid
908 * case if servicing i/o against another filesystem type from VFS
909 */
910 return 0;
911 }
912
913 if (cp_fs_protected (VTOVFS(vp)) == 0) {
914 /*
915 * The underlying filesystem does not support content protection. This is also
916 * a valid case. Simply return success.
917 */
918 return 0;
919 }
920
921 /*
922 * At this point, we know we have a HFS vnode that backs a file or directory on a
923 * filesystem that supports content protection
924 */
925 cp = VTOC(vp);
926
927 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
928 return error;
929 }
930
931 entry = cp->c_cpentry;
932
933 if (entry == NULL) {
934 /*
935 * If this cnode is not content protected, simply return success.
936 * Note that this function is called by all I/O-based call sites
937 * when CONFIG_PROTECT is enabled during XNU building.
938 */
939
940 /*
941 * All files should have cprotect structs. It's possible to encounter
942 * a directory from a V2.0 CP system but all files should have protection
943 * EAs
944 */
945 if (vnode_isreg(vp)) {
946 error = EPERM;
947 }
948
949 goto out;
950 }
951
952 vp = CTOV(cp, 0);
953 if (vp == NULL) {
954 /* is it a rsrc */
955 vp = CTOV(cp,1);
956 if (vp == NULL) {
957 error = EINVAL;
958 goto out;
959 }
960 }
961 hfsmp = VTOHFS(vp);
962
963 if ((error = cp_check_access(cp, hfsmp, vnop))) {
964 /* check for raw encrypted access before bailing out */
965 if ((ioflag & IO_ENCRYPTED)
966 #if HFS_CONFIG_KEY_ROLL
967 // If we're rolling, we need the keys
968 && !hfs_is_key_rolling(cp)
969 #endif
970 && (vnop == CP_READ_ACCESS)) {
971 /*
972 * read access only + asking for the raw encrypted bytes
973 * is legitimate, so reset the error value to 0
974 */
975 error = 0;
976 }
977 else {
978 goto out;
979 }
980 }
981
982 if (!ISSET(entry->cp_flags, CP_NO_XATTR)) {
983 if (!S_ISREG(cp->c_mode))
984 goto out;
985
986 // If we have a persistent key and the cached key, we're done
987 if (!cp_needs_pers_key(entry)
988 && cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
989 goto out;
990 }
991 }
992
993 /* upgrade to exclusive lock */
994 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) {
995 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
996 return error;
997 }
998 } else {
999 cp->c_lockowner = current_thread();
1000 }
1001
1002 /* generate new keys if none have ever been saved */
1003 if (cp_needs_pers_key(entry)) {
1004 struct cprotect *newentry = NULL;
1005 /*
1006 * It's ok if this ends up being wrapped in a different class than 'pclass'.
1007 * class modification is OK here.
1008 */
1009 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1010
1011 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1012 if (error == 0) {
1013 cp_replace_entry (hfsmp, cp, newentry);
1014 entry = newentry;
1015 }
1016 else {
1017 goto out;
1018 }
1019 }
1020
1021 /* unwrap keys if needed */
1022 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1023 if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) {
1024 /* no need to try to restore keys; they are not going to be used */
1025 error = 0;
1026 }
1027 else {
1028 error = cp_restore_keys(entry, hfsmp, cp);
1029 if (error) {
1030 goto out;
1031 }
1032 }
1033 }
1034
1035 /* write out the xattr if it's new */
1036 if (entry->cp_flags & CP_NO_XATTR)
1037 error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE);
1038
1039 out:
1040
1041 hfs_unlock(cp);
1042 return error;
1043 }
1044
1045 #if HFS_TMPDBG
1046 #if !SECURE_KERNEL
1047 static void cp_log_eperm (struct vnode* vp, int pclass, boolean_t create) {
1048 char procname[256] = {};
1049 const char *fname = "unknown";
1050 const char *dbgop = "open";
1051
1052 int ppid = proc_selfpid();
1053 /* selfname does a strlcpy so we're OK */
1054 proc_selfname(procname, sizeof(procname));
1055 if (vp && vp->v_name) {
1056 /* steal from the namecache */
1057 fname = vp->v_name;
1058 }
1059
1060 if (create) {
1061 dbgop = "create";
1062 }
1063
1064 printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname, ppid, pclass, dbgop, fname);
1065 }
1066 #endif
1067 #endif
1068
1069
1070 int
1071 cp_handle_open(struct vnode *vp, int mode)
1072 {
1073 struct cnode *cp = NULL ;
1074 struct cprotect *entry = NULL;
1075 struct hfsmount *hfsmp;
1076 int error = 0;
1077
1078 /* If vnode not eligible, just return success */
1079 if (!cp_vnode_is_eligible(vp)) {
1080 return 0;
1081 }
1082
1083 /* If mount point not properly set up, then also return success */
1084 if (!cp_fs_protected(VTOVFS(vp))) {
1085 return 0;
1086 }
1087
1088 cp = VTOC(vp);
1089
1090 // Allow if raw encrypted mode requested
1091 if (ISSET(mode, FENCRYPTED)) {
1092 #if HFS_CONFIG_KEY_ROLL
1093 // If we're rolling, we need the keys
1094 hfs_lock_always(cp, HFS_SHARED_LOCK);
1095 bool rolling = hfs_is_key_rolling(cp);
1096 hfs_unlock(cp);
1097 if (!rolling)
1098 return 0;
1099 #else
1100 return 0;
1101 #endif
1102 }
1103 if (ISSET(mode, FUNENCRYPTED)) {
1104 return 0;
1105 }
1106
1107 /* We know the vnode is in a valid state. Acquire cnode and validate */
1108 hfsmp = VTOHFS(vp);
1109
1110 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1111 return error;
1112 }
1113
1114 entry = cp->c_cpentry;
1115 if (entry == NULL) {
1116 /*
1117 * If the mount is protected and we couldn't get a cprotect for this vnode,
1118 * then it's not valid for opening.
1119 */
1120 if (vnode_isreg(vp)) {
1121 error = EPERM;
1122 }
1123 goto out;
1124 }
1125
1126 if (!S_ISREG(cp->c_mode))
1127 goto out;
1128
1129 /*
1130 * Does the cnode have keys yet? If not, then generate them.
1131 */
1132 if (cp_needs_pers_key(entry)) {
1133 struct cprotect *newentry = NULL;
1134 /* Allow the keybag to override our class preferences */
1135 uint32_t flags = CP_KEYWRAP_DIFFCLASS;
1136 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry);
1137 if (error == 0) {
1138 cp_replace_entry (hfsmp, cp, newentry);
1139 entry = newentry;
1140 }
1141 else {
1142 goto out;
1143 }
1144 }
1145
1146 /*
1147 * We want to minimize the number of unwraps that we'll have to do since
1148 * the cost can vary, depending on the platform we're running.
1149 */
1150 switch (CP_CLASS(entry->cp_pclass)) {
1151 case PROTECTION_CLASS_B:
1152 if (mode & O_CREAT) {
1153 /*
1154 * Class B always allows creation. Since O_CREAT was passed through
1155 * we infer that this was a newly created vnode/cnode. Even though a potential
1156 * race exists when multiple threads attempt to create/open a particular
1157 * file, only one can "win" and actually create it. VFS will unset the
1158 * O_CREAT bit on the loser.
1159 *
1160 * Note that skipping the unwrap check here is not a security issue --
1161 * we have to unwrap the key permanently upon the first I/O.
1162 */
1163 break;
1164 }
1165
1166 if (cpx_has_key(cpkp_cpx(&entry->cp_keys)) && !ISSET(mode, FENCRYPTED)) {
1167 /*
1168 * For a class B file, attempt the unwrap if we have the key in
1169 * core already.
1170 * The device could have just transitioned into the lock state, and
1171 * this vnode may not yet have been purged from the vnode cache (which would
1172 * remove the keys).
1173 */
1174 struct aks_cred_s access_in;
1175 struct aks_wrapped_key_s wrapped_key_in;
1176
1177 cp_init_access(&access_in, cp);
1178 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
1179 wrapped_key_in.key = cpkp_pers_key(&entry->cp_keys);
1180 wrapped_key_in.key_len = cpkp_pers_key_len(&entry->cp_keys);
1181 /* Use the persistent class when talking to AKS */
1182 wrapped_key_in.dp_class = entry->cp_pclass;
1183 error = hfs_unwrap_key(&access_in, &wrapped_key_in, NULL);
1184 if (error) {
1185 error = EPERM;
1186 }
1187 break;
1188 }
1189 /* otherwise, fall through to attempt the unwrap/restore */
1190 case PROTECTION_CLASS_A:
1191 case PROTECTION_CLASS_C:
1192 /*
1193 * At this point, we know that we need to attempt an unwrap if needed; we want
1194 * to makes sure that open(2) fails properly if the device is either just-locked
1195 * or never made it past first unlock. Since the keybag serializes access to the
1196 * unwrapping keys for us and only calls our VFS callback once they've been purged,
1197 * we will get here in two cases:
1198 *
1199 * A) we're in a window before the wrapping keys are purged; this is OK since when they get
1200 * purged, the vnode will get flushed if needed.
1201 *
1202 * B) The keys are already gone. In this case, the restore_keys call below will fail.
1203 *
1204 * Since this function is bypassed entirely if we're opening a raw encrypted file,
1205 * we can always attempt the restore.
1206 */
1207 if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) {
1208 error = cp_restore_keys(entry, hfsmp, cp);
1209 }
1210
1211 if (error) {
1212 error = EPERM;
1213 }
1214
1215 break;
1216
1217 case PROTECTION_CLASS_D:
1218 default:
1219 break;
1220 }
1221
1222 out:
1223
1224 #if HFS_TMPDBG
1225 #if !SECURE_KERNEL
1226 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
1227 cp_log_eperm (vp, CP_CLASS(entry->cp_pclass), false);
1228 }
1229 #endif
1230 #endif
1231
1232 hfs_unlock(cp);
1233 return error;
1234 }
1235
1236
1237 /*
1238 * cp_getrootxattr:
1239 * Gets the EA we set on the root folder (fileid 1) to get information about the
1240 * version of Content Protection that was used to write to this filesystem.
1241 * Note that all multi-byte fields are written to disk little endian so they must be
1242 * converted to native endian-ness as needed.
1243 */
1244 int
1245 cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr)
1246 {
1247 void *buf;
1248
1249 /*
1250 * We allow for an extra 64 bytes to cater for upgrades. This wouldn't
1251 * be necessary if the xattr routines just returned what we asked for.
1252 */
1253 size_t bufsize = roundup(sizeof(struct cp_root_xattr) + 64, 64);
1254
1255 int error = 0;
1256
1257 hfs_assert(outxattr);
1258
1259 buf = hfs_malloc(bufsize);
1260
1261 uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
1262
1263 uio_addiov(uio, CAST_USER_ADDR_T(buf), bufsize);
1264
1265 size_t attrsize = bufsize;
1266
1267 struct vnop_getxattr_args args = {
1268 .a_uio = uio,
1269 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1270 .a_size = &attrsize
1271 };
1272
1273 error = hfs_getxattr_internal(NULL, &args, hfsmp, 1);
1274
1275 uio_free(uio);
1276
1277 if (error != 0) {
1278 goto out;
1279 }
1280
1281 if (attrsize < CP_ROOT_XATTR_MIN_LEN) {
1282 error = HFS_EINCONSISTENT;
1283 goto out;
1284 }
1285
1286 const struct cp_root_xattr *xattr = buf;
1287
1288 bzero(outxattr, sizeof(*outxattr));
1289
1290 /* Now convert the multi-byte fields to native endianness */
1291 outxattr->major_version = OSSwapLittleToHostInt16(xattr->major_version);
1292 outxattr->minor_version = OSSwapLittleToHostInt16(xattr->minor_version);
1293 outxattr->flags = OSSwapLittleToHostInt64(xattr->flags);
1294
1295 if (outxattr->major_version >= CP_VERS_5) {
1296 if (attrsize < sizeof(struct cp_root_xattr)) {
1297 error = HFS_EINCONSISTENT;
1298 goto out;
1299 }
1300 #if HFS_CONFIG_KEY_ROLL
1301 outxattr->auto_roll_min_version = OSSwapLittleToHostInt32(xattr->auto_roll_min_version);
1302 outxattr->auto_roll_max_version = OSSwapLittleToHostInt32(xattr->auto_roll_max_version);
1303 #endif
1304 }
1305
1306 out:
1307 hfs_free(buf, bufsize);
1308 return error;
1309 }
1310
1311 /*
1312 * cp_setrootxattr:
1313 * Sets the EA we set on the root folder (fileid 1) to get information about the
1314 * version of Content Protection that was used to write to this filesystem.
1315 * Note that all multi-byte fields are written to disk little endian so they must be
1316 * converted to little endian as needed.
1317 *
1318 * This will be written to the disk when it detects the EA is not there, or when we need
1319 * to make a modification to the on-disk version that can be done in-place.
1320 */
1321 int
1322 cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr)
1323 {
1324 int error = 0;
1325 struct vnop_setxattr_args args;
1326
1327 args.a_desc = NULL;
1328 args.a_vp = NULL;
1329 args.a_name = CONTENT_PROTECTION_XATTR_NAME;
1330 args.a_uio = NULL; //pass data ptr instead
1331 args.a_options = 0;
1332 args.a_context = NULL; //no context needed, only done from mount.
1333
1334 const uint64_t flags = newxattr->flags;
1335
1336 /* Now convert the multi-byte fields to little endian before writing to disk. */
1337 newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags);
1338
1339 int xattr_size = sizeof(struct cp_root_xattr);
1340
1341 #if HFS_CONFIG_KEY_ROLL
1342 bool upgraded = false;
1343
1344 if (newxattr->auto_roll_min_version || newxattr->auto_roll_max_version) {
1345 if (newxattr->major_version < CP_VERS_5) {
1346 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1347
1348 newxattr->major_version = CP_CURRENT_VERS;
1349 newxattr->minor_version = CP_MINOR_VERS;
1350
1351 upgraded = true;
1352 }
1353
1354 newxattr->auto_roll_min_version = OSSwapHostToLittleInt32(newxattr->auto_roll_min_version);
1355 newxattr->auto_roll_max_version = OSSwapHostToLittleInt32(newxattr->auto_roll_max_version);
1356 } else if (newxattr->major_version == CP_VERS_4)
1357 xattr_size = offsetof(struct cp_root_xattr, auto_roll_min_version);
1358 #endif
1359
1360 newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version);
1361 newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version);
1362
1363 error = hfs_setxattr_internal(NULL, (caddr_t)newxattr,
1364 xattr_size, &args, hfsmp, 1);
1365
1366 if (!error) {
1367 hfsmp->cproot_flags = flags;
1368 #if HFS_CONFIG_KEY_ROLL
1369 if (upgraded)
1370 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1371 #endif
1372 }
1373
1374 return error;
1375 }
1376
1377
1378 /*
1379 * Stores new xattr data on the cnode.
1380 * cnode lock held exclusive (if available).
1381 *
1382 * This function is also invoked during file creation.
1383 */
1384 int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp,
1385 uint32_t fileid, int options)
1386 {
1387 int error = 0;
1388 cp_key_pair_t *cpkp = &entry->cp_keys;
1389 #if HFS_CONFIG_KEY_ROLL
1390 bool rolling = entry->cp_key_roll_ctx != NULL;
1391
1392 if (rolling && entry->cp_key_roll_ctx->ckr_off_rsrc == INT64_MAX) {
1393 // We've finished rolling, but we still have the context
1394 rolling = false;
1395 cpkp = &entry->cp_key_roll_ctx->ckr_keys;
1396 }
1397 #endif
1398
1399 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
1400 return EROFS;
1401 }
1402
1403 if (hfsmp->hfs_running_cp_major_vers < CP_CURRENT_VERS) {
1404 // Upgrade
1405 printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS);
1406
1407 struct cp_root_xattr root_xattr;
1408
1409 error = cp_getrootxattr(hfsmp, &root_xattr);
1410 if (error)
1411 return error;
1412
1413 root_xattr.major_version = CP_CURRENT_VERS;
1414 root_xattr.minor_version = CP_MINOR_VERS;
1415
1416 error = cp_setrootxattr(hfsmp, &root_xattr);
1417 if (error)
1418 return error;
1419
1420 hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS;
1421 }
1422
1423 struct cp_xattr_v5 *xattr;
1424 xattr = hfs_malloc(sizeof(*xattr));
1425
1426 xattr->xattr_major_version = OSSwapHostToLittleConstInt16(CP_VERS_5);
1427 xattr->xattr_minor_version = OSSwapHostToLittleConstInt16(CP_MINOR_VERS);
1428 xattr->flags = 0;
1429 #if HFS_CONFIG_KEY_ROLL
1430 if (rolling)
1431 xattr->flags |= CP_XAF_KEY_ROLLING;
1432 #endif
1433 xattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass);
1434 xattr->key_os_version = OSSwapHostToLittleInt32(entry->cp_key_os_version);
1435 xattr->key_revision = OSSwapHostToLittleInt16(entry->cp_key_revision);
1436
1437 uint16_t key_len = cpkp_pers_key_len(cpkp);
1438 xattr->key_len = OSSwapHostToLittleInt16(key_len);
1439 memcpy(xattr->persistent_key, cpkp_pers_key(cpkp), key_len);
1440
1441 size_t xattr_len = offsetof(struct cp_xattr_v5, persistent_key) + key_len;
1442
1443 #if HFS_CONFIG_KEY_ROLL
1444 if (rolling) {
1445 struct cp_roll_info *roll_info = PTR_ADD(struct cp_roll_info *, xattr, xattr_len);
1446
1447 roll_info->off_rsrc = OSSwapHostToLittleInt64(entry->cp_key_roll_ctx->ckr_off_rsrc);
1448
1449 key_len = cpkp_pers_key_len(&entry->cp_key_roll_ctx->ckr_keys);
1450 roll_info->key_len = OSSwapHostToLittleInt16(key_len);
1451
1452 memcpy(roll_info->key, cpkp_pers_key(&entry->cp_key_roll_ctx->ckr_keys), key_len);
1453
1454 xattr_len += offsetof(struct cp_roll_info, key) + key_len;
1455 }
1456 #endif
1457
1458 struct vnop_setxattr_args args = {
1459 .a_vp = cp ? cp->c_vp : NULL,
1460 .a_name = CONTENT_PROTECTION_XATTR_NAME,
1461 .a_options = options,
1462 .a_context = vfs_context_current(),
1463 };
1464
1465 error = hfs_setxattr_internal(cp, xattr, xattr_len, &args, hfsmp, fileid);
1466
1467 hfs_free(xattr, sizeof(*xattr));
1468
1469 if (error == 0 ) {
1470 entry->cp_flags &= ~CP_NO_XATTR;
1471 }
1472
1473 return error;
1474 }
1475
1476 /*
1477 * Used by an fcntl to query the underlying FS for its content protection version #
1478 */
1479
1480 int
1481 cp_get_root_major_vers(vnode_t vp, uint32_t *level)
1482 {
1483 int err = 0;
1484 struct hfsmount *hfsmp = NULL;
1485 struct mount *mp = NULL;
1486
1487 mp = VTOVFS(vp);
1488
1489 /* check if it supports content protection */
1490 if (cp_fs_protected(mp) == 0) {
1491 return ENOTSUP;
1492 }
1493
1494 hfsmp = VFSTOHFS(mp);
1495 /* figure out the level */
1496
1497 err = cp_root_major_vers(mp);
1498
1499 if (err == 0) {
1500 *level = hfsmp->hfs_running_cp_major_vers;
1501 }
1502 /* in error case, cp_root_major_vers will just return EINVAL. Use that */
1503
1504 return err;
1505 }
1506
1507 /* Used by fcntl to query default protection level of FS */
1508 int cp_get_default_level (struct vnode *vp, uint32_t *level) {
1509 int err = 0;
1510 struct hfsmount *hfsmp = NULL;
1511 struct mount *mp = NULL;
1512
1513 mp = VTOVFS(vp);
1514
1515 /* check if it supports content protection */
1516 if (cp_fs_protected(mp) == 0) {
1517 return ENOTSUP;
1518 }
1519
1520 hfsmp = VFSTOHFS(mp);
1521 /* figure out the default */
1522
1523 *level = hfsmp->default_cp_class;
1524 return err;
1525 }
1526
1527 /********************
1528 * Private Functions
1529 *******************/
1530
1531 static int
1532 cp_root_major_vers(mount_t mp)
1533 {
1534 int err = 0;
1535 struct cp_root_xattr xattr;
1536 struct hfsmount *hfsmp = NULL;
1537
1538 hfsmp = vfs_fsprivate(mp);
1539 err = cp_getrootxattr (hfsmp, &xattr);
1540
1541 if (err == 0) {
1542 hfsmp->hfs_running_cp_major_vers = xattr.major_version;
1543 }
1544 else {
1545 return EINVAL;
1546 }
1547
1548 return 0;
1549 }
1550
1551 static int
1552 cp_vnode_is_eligible(struct vnode *vp)
1553 {
1554 return !vnode_issystem(vp) && (vnode_isreg(vp) || vnode_isdir(vp));
1555 }
1556
1557 #if DEBUG
1558 static const uint32_t cp_magic1 = 0x7b727063; // cpr{
1559 static const uint32_t cp_magic2 = 0x7270637d; // }cpr
1560 #endif
1561
1562 struct cprotect *
1563 cp_entry_alloc(cprotect_t old, uint16_t pers_key_len,
1564 uint16_t cached_key_len, cp_key_pair_t **pcpkp)
1565 {
1566 struct cprotect *cp_entry;
1567
1568 if (pers_key_len > CP_MAX_WRAPPEDKEYSIZE)
1569 return (NULL);
1570
1571 size_t size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1572 + cpkp_size(pers_key_len, cached_key_len));
1573
1574 #if DEBUG
1575 size += 4; // Extra for magic2
1576 #endif
1577
1578 cp_entry = hfs_mallocz(size);
1579
1580 if (old) {
1581 memcpy(cp_entry, old, offsetof(struct cprotect, cp_keys));
1582
1583 #if HFS_CONFIG_KEY_ROLL
1584 // We don't copy the key roll context
1585 cp_entry->cp_key_roll_ctx = NULL;
1586 #endif
1587 }
1588
1589 #if DEBUG
1590 cp_entry->cp_magic1 = cp_magic1;
1591 *PTR_ADD(uint32_t *, cp_entry, size - 4) = cp_magic2;
1592 #endif
1593
1594 cpkp_init(&cp_entry->cp_keys, pers_key_len, cached_key_len);
1595
1596 /*
1597 * If we've been passed the old entry, then we are in the process of
1598 * rewrapping in which case we need to copy the cached key. This is
1599 * important for class B files when the device is locked because we
1600 * won't be able to unwrap whilst in this state, yet we still need the
1601 * unwrapped key.
1602 */
1603 if (old)
1604 cpx_copy(cpkp_cpx(&old->cp_keys), cpkp_cpx(&cp_entry->cp_keys));
1605
1606 if (pcpkp)
1607 *pcpkp = &cp_entry->cp_keys;
1608
1609 return cp_entry;
1610 }
1611
1612 static void
1613 cp_entry_dealloc(__unused hfsmount_t *hfsmp, struct cprotect *entry)
1614 {
1615 #if HFS_CONFIG_KEY_ROLL
1616 hfs_release_key_roll_ctx(hfsmp, entry);
1617 #endif
1618
1619 cpkp_flush(&entry->cp_keys);
1620
1621 size_t entry_size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1622 + cpkp_sizex(&entry->cp_keys));
1623
1624 /*
1625 * We are freeing the HFS cprotect, which contains the memory for 'cpx'
1626 * Don't forget to release the CPX AES context
1627 */
1628 cpx_t embedded_cpx = cpkp_cpx(&entry->cp_keys);
1629 cpx_free_ctx (embedded_cpx);
1630
1631 #if DEBUG
1632 hfs_assert(entry->cp_magic1 == cp_magic1);
1633 hfs_assert(*PTR_ADD(uint32_t *, entry, (sizeof(struct cprotect) - sizeof(cp_key_pair_t)
1634 + cpkp_sizex(&entry->cp_keys) == cp_magic2)));
1635
1636 entry_size += 4; // Extra for magic2
1637 #endif
1638
1639 hfs_free(entry, entry_size);
1640 }
1641
1642 static int cp_read_xattr_v4(__unused hfsmount_t *hfsmp, struct cp_xattr_v4 *xattr,
1643 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1644 {
1645 /* Endian swap the multi-byte fields into host endianness from L.E. */
1646 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1647 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1648 xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size);
1649 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1650 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1651 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1652
1653 /*
1654 * Prevent a buffer overflow, and validate the key length obtained from the
1655 * EA. If it's too big, then bail out, because the EA can't be trusted at this
1656 * point.
1657 */
1658 if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE)
1659 return HFS_EINCONSISTENT;
1660
1661 size_t min_len = offsetof(struct cp_xattr_v4, persistent_key) + xattr->key_size;
1662 if (xattr_len < min_len)
1663 return HFS_EINCONSISTENT;
1664
1665 /*
1666 * Class F files have no backing key; their keylength should be 0,
1667 * though they should have the proper flags set.
1668 *
1669 * A request to instantiate a CP for a class F file should result
1670 * in a bzero'd cp that just says class F, with key_flushed set.
1671 */
1672 if (CP_CLASS(xattr->persistent_class) == PROTECTION_CLASS_F
1673 || ISSET(xattr->flags, CP_XAF_NEEDS_KEYS)) {
1674 xattr->key_size = 0;
1675 }
1676
1677 /* set up entry with information from xattr */
1678 cp_key_pair_t *cpkp = NULL;
1679 cprotect_t entry;
1680
1681 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1682 /* caller passed in a pre-allocated structure to get the basic info */
1683 entry = *pcpr;
1684 bzero(entry, offsetof(struct cprotect, cp_keys));
1685 }
1686 else {
1687 entry = cp_entry_alloc(NULL, xattr->key_size, CP_MAX_CACHEBUFLEN, &cpkp);
1688 }
1689
1690 entry->cp_pclass = xattr->persistent_class;
1691 entry->cp_key_os_version = xattr->key_os_version;
1692
1693
1694 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1695 if (xattr->key_size) {
1696 cpkp_set_pers_key_len(cpkp, xattr->key_size);
1697 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_size);
1698 }
1699
1700 *pcpr = entry;
1701 }
1702 else if (xattr->key_size) {
1703 SET(entry->cp_flags, CP_HAS_A_KEY);
1704 }
1705
1706 return 0;
1707 }
1708
1709 int cp_read_xattr_v5(hfsmount_t *hfsmp, struct cp_xattr_v5 *xattr,
1710 size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options)
1711 {
1712 if (xattr->xattr_major_version == OSSwapHostToLittleConstInt16(CP_VERS_4)) {
1713 return cp_read_xattr_v4(hfsmp, (struct cp_xattr_v4 *)xattr, xattr_len, pcpr, options);
1714 }
1715
1716 xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version);
1717
1718 if (xattr->xattr_major_version != CP_VERS_5) {
1719 printf("hfs: cp_getxattr: unsupported xattr version %d\n",
1720 xattr->xattr_major_version);
1721 return ENOTSUP;
1722 }
1723
1724 size_t min_len = offsetof(struct cp_xattr_v5, persistent_key);
1725
1726 if (xattr_len < min_len)
1727 return HFS_EINCONSISTENT;
1728
1729 xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version);
1730 xattr->flags = OSSwapLittleToHostInt32(xattr->flags);
1731 xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class);
1732 xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version);
1733 xattr->key_revision = OSSwapLittleToHostInt16(xattr->key_revision);
1734 xattr->key_len = OSSwapLittleToHostInt16(xattr->key_len);
1735
1736 uint16_t pers_key_len = xattr->key_len;
1737
1738 min_len += pers_key_len;
1739 if (xattr_len < min_len)
1740 return HFS_EINCONSISTENT;
1741
1742 #if HFS_CONFIG_KEY_ROLL
1743 struct cp_roll_info *roll_info = NULL;
1744
1745 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1746 roll_info = PTR_ADD(struct cp_roll_info *, xattr, min_len);
1747
1748 min_len += offsetof(struct cp_roll_info, key);
1749
1750 if (xattr_len < min_len)
1751 return HFS_EINCONSISTENT;
1752
1753 roll_info->off_rsrc = OSSwapLittleToHostInt64(roll_info->off_rsrc);
1754
1755 if (roll_info->off_rsrc % hfsmp->blockSize)
1756 return HFS_EINCONSISTENT;
1757
1758 roll_info->key_len = OSSwapLittleToHostInt16(roll_info->key_len);
1759
1760 min_len += roll_info->key_len;
1761 if (xattr_len < min_len)
1762 return HFS_EINCONSISTENT;
1763 }
1764 #endif
1765
1766 cp_key_pair_t *cpkp = NULL;
1767 cprotect_t entry;
1768
1769 /*
1770 * If option CP_GET_XATTR_BASIC_INFO is set, we only return basic
1771 * information about the file's protection (and not the key) and
1772 * we store the result in the structure the caller passed to us.
1773 */
1774 if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1775 entry = *pcpr;
1776 bzero(entry, offsetof(struct cprotect, cp_keys));
1777 #if HFS_CONFIG_KEY_ROLL
1778 if (ISSET(xattr->flags, CP_XAF_KEY_ROLLING)) {
1779 SET(entry->cp_flags, CP_KEY_IS_ROLLING);
1780 }
1781 #endif
1782 } else {
1783 entry = cp_entry_alloc(NULL, xattr->key_len, CP_MAX_CACHEBUFLEN, &cpkp);
1784 }
1785
1786 entry->cp_pclass = xattr->persistent_class;
1787 entry->cp_key_os_version = xattr->key_os_version;
1788 entry->cp_key_revision = xattr->key_revision;
1789
1790 if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) {
1791 if (xattr->key_len) {
1792 cpkp_set_pers_key_len(cpkp, xattr->key_len);
1793 memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_len);
1794 }
1795
1796 #if HFS_CONFIG_KEY_ROLL
1797 if (roll_info) {
1798 entry->cp_key_roll_ctx = hfs_key_roll_ctx_alloc(NULL, roll_info->key_len,
1799 CP_MAX_CACHEBUFLEN, &cpkp);
1800
1801 entry->cp_key_roll_ctx->ckr_off_rsrc = roll_info->off_rsrc;
1802
1803 if (roll_info->key_len) {
1804 cpkp_set_pers_key_len(cpkp, roll_info->key_len);
1805 memcpy(cpkp_pers_key(cpkp), roll_info->key, roll_info->key_len);
1806 }
1807 }
1808 #endif
1809
1810 *pcpr = entry;
1811 }
1812 else if (xattr->key_len) {
1813 SET(entry->cp_flags, CP_HAS_A_KEY);
1814 }
1815
1816 return 0;
1817 }
1818
1819 /*
1820 * Initializes a new cprotect entry with xattr data from the cnode.
1821 * cnode lock held shared
1822 */
1823 static int
1824 cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, cprotect_t *outentry)
1825 {
1826 size_t xattr_len;
1827 struct cp_xattr_v5 *xattr;
1828
1829 xattr = hfs_malloc(xattr_len = sizeof(*xattr));
1830
1831 int error = hfs_xattr_read(cp->c_vp, CONTENT_PROTECTION_XATTR_NAME,
1832 xattr, &xattr_len);
1833
1834 if (!error) {
1835 if (xattr_len < CP_XATTR_MIN_LEN)
1836 error = HFS_EINCONSISTENT;
1837 else
1838 error = cp_read_xattr_v5(hfsmp, xattr, xattr_len, outentry, 0);
1839 }
1840
1841 #if DEBUG
1842 if (error && error != ENOATTR) {
1843 printf("cp_getxattr: bad cp xattr (%d):\n", error);
1844 for (size_t i = 0; i < xattr_len; ++i)
1845 printf("%02x ", ((uint8_t *)xattr)[i]);
1846 printf("\n");
1847 }
1848 #endif
1849
1850 hfs_free(xattr, sizeof(*xattr));
1851
1852 return error;
1853 }
1854
1855 /*
1856 * If permitted, restore entry's unwrapped key from the persistent key.
1857 * If not, clear key and set CP_KEY_FLUSHED.
1858 * cnode lock held exclusive
1859 */
1860 static int
1861 cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp)
1862 {
1863 int error = 0;
1864
1865 error = cp_unwrap(hfsmp, entry, cp);
1866 if (error) {
1867 cp_flush_cached_keys(entry);
1868 error = EPERM;
1869 }
1870 return error;
1871 }
1872
1873 void cp_device_locked_callback(mount_t mp, cp_lock_state_t state)
1874 {
1875 struct hfsmount *hfsmp;
1876
1877 /*
1878 * When iterating the various mount points that may
1879 * be present on a content-protected device, we need to skip
1880 * those that do not have it enabled.
1881 */
1882 if (!cp_fs_protected(mp)) {
1883 return;
1884 }
1885
1886 hfsmp = VFSTOHFS(mp);
1887
1888 hfsmp->hfs_cp_lock_state = state;
1889
1890 if (state == CP_LOCKED_STATE) {
1891 /*
1892 * We respond only to lock events. Since cprotect structs
1893 * decrypt/restore keys lazily, the unlock events don't
1894 * actually cause anything to happen.
1895 */
1896 vnode_iterate(mp, 0, cp_lock_vnode_callback, (void *)(uintptr_t)state);
1897 }
1898 }
1899
1900 /*
1901 * Deny access to protected files if keys have been locked.
1902 */
1903 static int
1904 cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused)
1905 {
1906 int error = 0;
1907
1908 /*
1909 * For now it's OK to examine the state variable here without
1910 * holding the HFS lock. This is only a short-circuit; if the state
1911 * transitions (or is in transition) after we examine this field, we'd
1912 * have to handle that anyway.
1913 */
1914 if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) {
1915 return 0;
1916 }
1917
1918 if (!cp->c_cpentry) {
1919 /* unprotected node */
1920 return 0;
1921 }
1922
1923 if (!S_ISREG(cp->c_mode)) {
1924 return 0;
1925 }
1926
1927 /* Deny all access for class A files */
1928 switch (CP_CLASS(cp->c_cpentry->cp_pclass)) {
1929 case PROTECTION_CLASS_A: {
1930 error = EPERM;
1931 break;
1932 }
1933 default:
1934 error = 0;
1935 break;
1936 }
1937
1938 return error;
1939 }
1940
1941 /*
1942 * Respond to a lock or unlock event.
1943 * On lock: clear out keys from memory, then flush file contents.
1944 * On unlock: nothing (function not called).
1945 */
1946 static int
1947 cp_lock_vnode_callback(struct vnode *vp, void *arg)
1948 {
1949 cnode_t *cp = NULL;
1950 struct cprotect *entry = NULL;
1951 int error = 0;
1952 int locked = 1;
1953 unsigned long action = 0;
1954 int took_truncate_lock = 0;
1955
1956 error = vnode_getwithref (vp);
1957 if (error) {
1958 return error;
1959 }
1960
1961 cp = VTOC(vp);
1962
1963 /*
1964 * When cleaning cnodes due to a lock event, we must
1965 * take the truncate lock AND the cnode lock. By taking
1966 * the truncate lock here, we force (nearly) all pending IOs
1967 * to drain before we can acquire the truncate lock. All HFS cluster
1968 * io calls except for swapfile IO need to acquire the truncate lock
1969 * prior to calling into the cluster layer.
1970 */
1971 hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1972 took_truncate_lock = 1;
1973
1974 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
1975
1976 entry = cp->c_cpentry;
1977 if (!entry) {
1978 /* unprotected vnode: not a regular file */
1979 goto out;
1980 }
1981
1982 action = (unsigned long) arg;
1983 switch (action) {
1984 case CP_LOCKED_STATE: {
1985 vfs_context_t ctx;
1986 if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A ||
1987 vnode_isdir(vp)) {
1988 /*
1989 * There is no change at lock for other classes than A.
1990 * B is kept in memory for writing, and class F (for VM) does
1991 * not have a wrapped key, so there is no work needed for
1992 * wrapping/unwrapping.
1993 *
1994 * Note that 'class F' is relevant here because if
1995 * hfs_vnop_strategy does not take the cnode lock
1996 * to protect the cp blob across IO operations, we rely
1997 * implicitly on the truncate lock to be held when doing IO.
1998 * The only case where the truncate lock is not held is during
1999 * swapfile IO because HFS just funnels the VNOP_PAGEOUT
2000 * directly to cluster_pageout.
2001 */
2002 goto out;
2003 }
2004
2005 /* Before doing anything else, zero-fill sparse ranges as needed */
2006 ctx = vfs_context_current();
2007 (void) hfs_filedone (vp, ctx, 0);
2008
2009 /* first, sync back dirty pages */
2010 hfs_unlock (cp);
2011 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2012 hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2013
2014 /* flush keys:
2015 * There was a concern here(9206856) about flushing keys before nand layer is done using them.
2016 * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed.
2017 * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF.
2018 * Which in turn calls buf_biodone() and eventually unblocks ubc_msync()
2019 * Also verified that the cached data in IOFS is overwritten by other data, and there
2020 * is no key leakage in that layer.
2021 */
2022
2023 cp_flush_cached_keys(entry);
2024
2025 /* some write may have arrived in the mean time. dump those pages */
2026 hfs_unlock(cp);
2027 locked = 0;
2028
2029 ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC);
2030 break;
2031 }
2032 case CP_UNLOCKED_STATE: {
2033 /* no-op */
2034 break;
2035 }
2036 default:
2037 panic("Content Protection: unknown lock action %lu\n", action);
2038 }
2039
2040 out:
2041 if (locked) {
2042 hfs_unlock(cp);
2043 }
2044
2045 if (took_truncate_lock) {
2046 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
2047 }
2048
2049 vnode_put (vp);
2050 return error;
2051 }
2052
2053
2054 /*
2055 * cp_rewrap:
2056 *
2057 * Generate a new wrapped key based on the existing cache key.
2058 */
2059
2060 int
2061 cp_rewrap(struct cnode *cp, __unused hfsmount_t *hfsmp,
2062 cp_key_class_t *newclass, cp_key_pair_t *cpkp, const void *old_holder,
2063 cp_new_alloc_fn alloc_fn, void **pholder)
2064 {
2065 struct cprotect *entry = cp->c_cpentry;
2066
2067 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2068 unsigned keylen = CP_MAX_WRAPPEDKEYSIZE;
2069 int error = 0;
2070 const cp_key_class_t key_class = CP_CLASS(*newclass);
2071
2072 /* Structures passed between HFS and AKS */
2073 struct aks_cred_s access_in;
2074 struct aks_wrapped_key_s wrapped_key_in;
2075 struct aks_wrapped_key_s wrapped_key_out;
2076
2077 /*
2078 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2079 * key that is only good as long as the file is open. There is no
2080 * wrapped key, so there isn't anything to wrap.
2081 */
2082 if (key_class == PROTECTION_CLASS_F) {
2083 return EINVAL;
2084 }
2085
2086 cp_init_access(&access_in, cp);
2087
2088 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2089 wrapped_key_in.key = cpkp_pers_key(cpkp);
2090 wrapped_key_in.key_len = cpkp_pers_key_len(cpkp);
2091 /* Use the persistent class when talking to AKS */
2092 wrapped_key_in.dp_class = entry->cp_pclass;
2093
2094 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2095 wrapped_key_out.key = new_persistent_key;
2096 wrapped_key_out.key_len = keylen;
2097
2098 /*
2099 * inode is passed here to find the backup bag wrapped blob
2100 * from userspace. This lookup will occur shortly after creation
2101 * and only if the file still exists. Beyond this lookup the
2102 * inode is not used. Technically there is a race, we practically
2103 * don't lose.
2104 */
2105 error = hfs_rewrap_key(&access_in,
2106 key_class, /* new class */
2107 &wrapped_key_in,
2108 &wrapped_key_out);
2109
2110 keylen = wrapped_key_out.key_len;
2111
2112 if (error == 0) {
2113 /*
2114 * Verify that AKS returned to us a wrapped key of the
2115 * target class requested.
2116 */
2117 /* Get the effective class here */
2118 cp_key_class_t effective = CP_CLASS(wrapped_key_out.dp_class);
2119 if (effective != key_class) {
2120 /*
2121 * Fail the operation if defaults or some other enforcement
2122 * dictated that the class be wrapped differently.
2123 */
2124
2125 /* TODO: Invalidate the key when 12170074 unblocked */
2126 return EPERM;
2127 }
2128
2129 /* Allocate a new cpentry */
2130 cp_key_pair_t *new_cpkp;
2131 *pholder = alloc_fn(old_holder, keylen, CP_MAX_CACHEBUFLEN, &new_cpkp);
2132
2133 /* copy the new key into the entry */
2134 cpkp_set_pers_key_len(new_cpkp, keylen);
2135 memcpy(cpkp_pers_key(new_cpkp), new_persistent_key, keylen);
2136
2137 /* Actually record/store what AKS reported back, not the effective class stored in newclass */
2138 *newclass = wrapped_key_out.dp_class;
2139 }
2140 else {
2141 error = EPERM;
2142 }
2143
2144 return error;
2145 }
2146
2147 static int cpkp_unwrap(cnode_t *cp, cp_key_class_t key_class, cp_key_pair_t *cpkp)
2148 {
2149 int error = 0;
2150 uint8_t iv_key[CP_IV_KEYSIZE];
2151 cpx_t cpx = cpkp_cpx(cpkp);
2152
2153 /* Structures passed between HFS and AKS */
2154 struct aks_cred_s access_in;
2155 struct aks_wrapped_key_s wrapped_key_in;
2156 struct aks_raw_key_s key_out;
2157
2158 cp_init_access(&access_in, cp);
2159
2160 bzero(&wrapped_key_in, sizeof(wrapped_key_in));
2161 wrapped_key_in.key = cpkp_pers_key(cpkp);
2162 wrapped_key_in.key_len = cpkp_max_pers_key_len(cpkp);
2163 /* Use the persistent class when talking to AKS */
2164 wrapped_key_in.dp_class = key_class;
2165
2166 bzero(&key_out, sizeof(key_out));
2167 key_out.iv_key = iv_key;
2168 key_out.key = cpx_key(cpx);
2169 /*
2170 * The unwrapper should validate/set the key length for
2171 * the IV key length and the cache key length, however we need
2172 * to supply the correct buffer length so that AKS knows how
2173 * many bytes it has to work with.
2174 */
2175 key_out.iv_key_len = CP_IV_KEYSIZE;
2176 key_out.key_len = cpx_max_key_len(cpx);
2177
2178 error = hfs_unwrap_key(&access_in, &wrapped_key_in, &key_out);
2179 if (!error) {
2180 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2181 panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len);
2182 }
2183
2184 if (key_out.iv_key_len != CP_IV_KEYSIZE)
2185 panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len);
2186
2187 cpx_set_key_len(cpx, key_out.key_len);
2188
2189 cpx_set_aes_iv_key(cpx, iv_key);
2190 cpx_set_is_sep_wrapped_key(cpx, ISSET(key_out.flags, AKS_RAW_KEY_WRAPPEDKEY));
2191 } else {
2192 error = EPERM;
2193 }
2194
2195 return error;
2196 }
2197
2198 static int
2199 cp_unwrap(__unused struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp)
2200 {
2201 /*
2202 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2203 * key that is only good as long as the file is open. There is no
2204 * wrapped key, so there isn't anything to unwrap.
2205 */
2206 if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) {
2207 return EPERM;
2208 }
2209
2210 int error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_keys);
2211
2212 #if HFS_CONFIG_KEY_ROLL
2213 if (!error && entry->cp_key_roll_ctx) {
2214 error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_key_roll_ctx->ckr_keys);
2215 if (error)
2216 cpx_flush(cpkp_cpx(&entry->cp_keys));
2217 }
2218 #endif
2219
2220 return error;
2221 }
2222
2223 /*
2224 * cp_generate_keys
2225 *
2226 * Take a cnode that has already been initialized and establish persistent and
2227 * cache keys for it at this time. Note that at the time this is called, the
2228 * directory entry has already been created and we are holding the cnode lock
2229 * on 'cp'.
2230 *
2231 */
2232 int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, cp_key_class_t targetclass,
2233 uint32_t keyflags, struct cprotect **newentry)
2234 {
2235
2236 int error = 0;
2237 struct cprotect *newcp = NULL;
2238 *newentry = NULL;
2239
2240 /* Target class must be an effective class only */
2241 targetclass = CP_CLASS(targetclass);
2242
2243 /* Validate that it has a cprotect already */
2244 if (cp->c_cpentry == NULL) {
2245 /* We can't do anything if it shouldn't be protected. */
2246 return 0;
2247 }
2248
2249 /* Asserts for the underlying cprotect */
2250 if (cp->c_cpentry->cp_flags & CP_NO_XATTR) {
2251 /* should already have an xattr by this point. */
2252 error = EINVAL;
2253 goto out;
2254 }
2255
2256 if (S_ISREG(cp->c_mode)) {
2257 if (!cp_needs_pers_key(cp->c_cpentry)) {
2258 error = EINVAL;
2259 goto out;
2260 }
2261 }
2262
2263 cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp);
2264
2265 error = cp_new (&targetclass, hfsmp, cp, cp->c_mode, keyflags, key_revision,
2266 (cp_new_alloc_fn)cp_entry_alloc, (void **)&newcp);
2267 if (error) {
2268 /*
2269 * Key generation failed. This is not necessarily fatal
2270 * since the device could have transitioned into the lock
2271 * state before we called this.
2272 */
2273 error = EPERM;
2274 goto out;
2275 }
2276
2277 newcp->cp_pclass = targetclass;
2278 newcp->cp_key_os_version = cp_os_version();
2279 newcp->cp_key_revision = key_revision;
2280
2281 /*
2282 * If we got here, then we have a new cprotect.
2283 * Attempt to write the new one out.
2284 */
2285 error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE);
2286
2287 if (error) {
2288 /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */
2289 /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */
2290 if (newcp) {
2291 cp_entry_destroy(hfsmp, newcp);
2292 }
2293 goto out;
2294 }
2295
2296 /*
2297 * If we get here then we can assert that:
2298 * 1) generated wrapped/unwrapped keys.
2299 * 2) wrote the new keys to disk.
2300 * 3) cprotect is ready to go.
2301 */
2302
2303 *newentry = newcp;
2304
2305 out:
2306 return error;
2307
2308 }
2309
2310 void cp_replace_entry (hfsmount_t *hfsmp, struct cnode *cp, struct cprotect *newentry)
2311 {
2312 if (cp->c_cpentry) {
2313 #if HFS_CONFIG_KEY_ROLL
2314 // Transfer the tentative reservation
2315 if (cp->c_cpentry->cp_key_roll_ctx && newentry->cp_key_roll_ctx) {
2316 newentry->cp_key_roll_ctx->ckr_tentative_reservation
2317 = cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation;
2318
2319 cp->c_cpentry->cp_key_roll_ctx->ckr_tentative_reservation = NULL;
2320 }
2321 #endif
2322
2323 cp_entry_destroy (hfsmp, cp->c_cpentry);
2324 }
2325 cp->c_cpentry = newentry;
2326 newentry->cp_backing_cnode = cp;
2327
2328 return;
2329 }
2330
2331
2332 /*
2333 * cp_new
2334 *
2335 * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore),
2336 * allocate a cprotect, and vend it back to the caller.
2337 *
2338 * Additionally, decide if keys are even needed -- directories get cprotect data structures
2339 * but they do not have keys.
2340 *
2341 */
2342
2343 int
2344 cp_new(cp_key_class_t *newclass_eff, __unused struct hfsmount *hfsmp, struct cnode *cp,
2345 mode_t cmode, int32_t keyflags, cp_key_revision_t key_revision,
2346 cp_new_alloc_fn alloc_fn, void **pholder)
2347 {
2348 int error = 0;
2349 uint8_t new_key[CP_MAX_CACHEBUFLEN];
2350 unsigned new_key_len = CP_MAX_CACHEBUFLEN; /* AKS tell us the proper key length, how much of this is used */
2351 uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE];
2352 unsigned new_persistent_len = CP_MAX_WRAPPEDKEYSIZE;
2353 uint8_t iv_key[CP_IV_KEYSIZE];
2354 unsigned iv_key_len = CP_IV_KEYSIZE;
2355 int iswrapped = 0;
2356 cp_key_class_t key_class = CP_CLASS(*newclass_eff);
2357
2358 /* Structures passed between HFS and AKS */
2359 struct aks_cred_s access_in;
2360 struct aks_wrapped_key_s wrapped_key_out;
2361 struct aks_raw_key_s key_out;
2362
2363 /* Sanity check that it's a file or directory here */
2364 if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) {
2365 return EPERM;
2366 }
2367
2368 /*
2369 * Step 1: Generate Keys if needed.
2370 *
2371 * For class F files, the kernel provides the key.
2372 * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient
2373 * key that is only good as long as the file is open. There is no
2374 * wrapped key, so there isn't anything to wrap.
2375 *
2376 * For class A->D files, the key store provides the key
2377 *
2378 * For Directories, we only give them a class ; no keys.
2379 */
2380 if (S_ISDIR (cmode)) {
2381 /* Directories */
2382 new_persistent_len = 0;
2383 new_key_len = 0;
2384
2385 error = 0;
2386 }
2387 else {
2388 /* Must be a file */
2389 if (key_class == PROTECTION_CLASS_F) {
2390 /* class F files are not wrapped; they can still use the max key size */
2391 new_key_len = CP_MAX_KEYSIZE;
2392 read_random (&new_key[0], new_key_len);
2393 new_persistent_len = 0;
2394
2395 error = 0;
2396 }
2397 else {
2398 /*
2399 * The keystore is provided the file ID so that it can associate
2400 * the wrapped backup blob with this key from userspace. This
2401 * lookup occurs after successful file creation. Beyond this, the
2402 * file ID is not used. Note that there is a potential race here if
2403 * the file ID is re-used.
2404 */
2405 cp_init_access(&access_in, cp);
2406
2407 bzero(&key_out, sizeof(key_out));
2408 key_out.key = new_key;
2409 key_out.iv_key = iv_key;
2410 /*
2411 * AKS will override our key length fields, but we need to supply
2412 * the length of the buffer in those length fields so that
2413 * AKS knows hoa many bytes it has to work with.
2414 */
2415 key_out.key_len = new_key_len;
2416 key_out.iv_key_len = iv_key_len;
2417
2418 bzero(&wrapped_key_out, sizeof(wrapped_key_out));
2419 wrapped_key_out.key = new_persistent_key;
2420 wrapped_key_out.key_len = new_persistent_len;
2421
2422 access_in.key_revision = key_revision;
2423
2424 error = hfs_new_key(&access_in,
2425 key_class,
2426 &key_out,
2427 &wrapped_key_out);
2428
2429 if (error) {
2430 /* keybag returned failure */
2431 error = EPERM;
2432 goto cpnew_fail;
2433 }
2434
2435 /* Now sanity-check the output from new_key */
2436 if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) {
2437 panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len);
2438 }
2439
2440 if (key_out.iv_key_len != CP_IV_KEYSIZE) {
2441 panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len);
2442 }
2443
2444 /*
2445 * AKS is allowed to override our preferences and wrap with a
2446 * different class key for policy reasons. If we were told that
2447 * any class other than the one specified is unacceptable then error out
2448 * if that occurred. Check that the effective class returned by
2449 * AKS is the same as our effective new class
2450 */
2451 if (CP_CLASS(wrapped_key_out.dp_class) != key_class) {
2452 if (!ISSET(keyflags, CP_KEYWRAP_DIFFCLASS)) {
2453 error = EPERM;
2454 /* TODO: When 12170074 fixed, release/invalidate the key! */
2455 goto cpnew_fail;
2456 }
2457 }
2458
2459 *newclass_eff = wrapped_key_out.dp_class;
2460 new_key_len = key_out.key_len;
2461 iv_key_len = key_out.iv_key_len;
2462 new_persistent_len = wrapped_key_out.key_len;
2463
2464 /* Is the key a SEP wrapped key? */
2465 if (key_out.flags & AKS_RAW_KEY_WRAPPEDKEY) {
2466 iswrapped = 1;
2467 }
2468 }
2469 }
2470
2471 /*
2472 * Step 2: allocate cprotect and initialize it.
2473 */
2474
2475 cp_key_pair_t *cpkp;
2476 *pholder = alloc_fn(NULL, new_persistent_len, new_key_len, &cpkp);
2477 if (*pholder == NULL) {
2478 return ENOMEM;
2479 }
2480
2481 /* Copy the cache key & IV keys into place if needed. */
2482 if (new_key_len > 0) {
2483 cpx_t cpx = cpkp_cpx(cpkp);
2484
2485 cpx_set_key_len(cpx, new_key_len);
2486 memcpy(cpx_key(cpx), new_key, new_key_len);
2487
2488 /* Initialize the IV key */
2489 if (key_class != PROTECTION_CLASS_F)
2490 cpx_set_aes_iv_key(cpx, iv_key);
2491
2492 cpx_set_is_sep_wrapped_key(cpx, iswrapped);
2493 }
2494 if (new_persistent_len > 0) {
2495 cpkp_set_pers_key_len(cpkp, new_persistent_len);
2496 memcpy(cpkp_pers_key(cpkp), new_persistent_key, new_persistent_len);
2497 }
2498
2499 cpnew_fail:
2500
2501 #if HFS_TMPDBG
2502 #if !SECURE_KERNEL
2503 if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) {
2504 /* Only introspect the data fork */
2505 cp_log_eperm (cp->c_vp, *newclass_eff, true);
2506 }
2507 #endif
2508 #endif
2509
2510 return error;
2511 }
2512
2513
2514 /* Initialize the aks_cred_t structure passed to AKS */
2515 static void cp_init_access(aks_cred_t access, struct cnode *cp)
2516 {
2517 vfs_context_t context = vfs_context_current();
2518 kauth_cred_t cred = vfs_context_ucred(context);
2519 proc_t proc = vfs_context_proc(context);
2520 struct hfsmount *hfsmp;
2521 struct vnode *vp;
2522 uuid_t hfs_uuid;
2523
2524 bzero(access, sizeof(*access));
2525
2526 vp = CTOV(cp, 0);
2527 if (vp == NULL) {
2528 /* is it a rsrc */
2529 vp = CTOV(cp,1);
2530 if (vp == NULL) {
2531 //leave the struct bzeroed.
2532 return;
2533 }
2534 }
2535
2536 hfsmp = VTOHFS(vp);
2537 hfs_getvoluuid(hfsmp, hfs_uuid);
2538
2539 /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */
2540 access->inode = cp->c_fileid;
2541 access->pid = proc_pid(proc);
2542 access->uid = kauth_cred_getuid(cred);
2543 uuid_copy (access->volume_uuid, hfs_uuid);
2544
2545 if (cp->c_cpentry)
2546 access->key_revision = cp->c_cpentry->cp_key_revision;
2547
2548 return;
2549 }
2550
2551 #if HFS_CONFIG_KEY_ROLL
2552
2553 errno_t cp_set_auto_roll(hfsmount_t *hfsmp,
2554 const hfs_key_auto_roll_args_t *args)
2555 {
2556 // 64 bytes should be OK on the stack
2557 _Static_assert(sizeof(struct cp_root_xattr) < 64, "cp_root_xattr too big!");
2558
2559 struct cp_root_xattr xattr;
2560 errno_t ret;
2561
2562 ret = cp_getrootxattr(hfsmp, &xattr);
2563 if (ret)
2564 return ret;
2565
2566 ret = hfs_start_transaction(hfsmp);
2567 if (ret)
2568 return ret;
2569
2570 xattr.auto_roll_min_version = args->min_key_os_version;
2571 xattr.auto_roll_max_version = args->max_key_os_version;
2572
2573 bool roll_old_class_gen = ISSET(args->flags, HFS_KEY_AUTO_ROLL_OLD_CLASS_GENERATION);
2574
2575 if (roll_old_class_gen)
2576 SET(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2577 else
2578 CLR(xattr.flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION);
2579
2580 ret = cp_setrootxattr(hfsmp, &xattr);
2581
2582 errno_t ret2 = hfs_end_transaction(hfsmp);
2583
2584 if (!ret)
2585 ret = ret2;
2586
2587 if (ret)
2588 return ret;
2589
2590 hfs_lock_mount(hfsmp);
2591 hfsmp->hfs_auto_roll_min_key_os_version = args->min_key_os_version;
2592 hfsmp->hfs_auto_roll_max_key_os_version = args->max_key_os_version;
2593 hfs_unlock_mount(hfsmp);
2594
2595 return ret;
2596 }
2597
2598 bool cp_should_auto_roll(hfsmount_t *hfsmp, cprotect_t cpr)
2599 {
2600 if (cpr->cp_key_roll_ctx) {
2601 // Already rolling
2602 return false;
2603 }
2604
2605 // Only automatically roll class A, B & C
2606 if (CP_CLASS(cpr->cp_pclass) < PROTECTION_CLASS_A
2607 || CP_CLASS(cpr->cp_pclass) > PROTECTION_CLASS_C) {
2608 return false;
2609 }
2610
2611 if (!cpkp_has_pers_key(&cpr->cp_keys))
2612 return false;
2613
2614 /*
2615 * Remember, the class generation stored in HFS+ is updated at the *end*,
2616 * so it's old if it matches the generation we have stored.
2617 */
2618 if (ISSET(hfsmp->cproot_flags, CP_ROOT_AUTO_ROLL_OLD_CLASS_GENERATION)
2619 && cp_get_crypto_generation(cpr->cp_pclass) == hfsmp->cp_crypto_generation) {
2620 return true;
2621 }
2622
2623 if (!hfsmp->hfs_auto_roll_min_key_os_version
2624 && !hfsmp->hfs_auto_roll_max_key_os_version) {
2625 // No minimum or maximum set
2626 return false;
2627 }
2628
2629 if (hfsmp->hfs_auto_roll_min_key_os_version
2630 && cpr->cp_key_os_version < hfsmp->hfs_auto_roll_min_key_os_version) {
2631 // Before minimum
2632 return false;
2633 }
2634
2635 if (hfsmp->hfs_auto_roll_max_key_os_version
2636 && cpr->cp_key_os_version >= hfsmp->hfs_auto_roll_max_key_os_version) {
2637 // Greater than maximum
2638 return false;
2639 }
2640
2641 return true;
2642 }
2643
2644 #endif // HFS_CONFIG_KEY_ROLL
2645
2646 errno_t cp_handle_strategy(buf_t bp)
2647 {
2648 vnode_t vp = buf_vnode(bp);
2649 cnode_t *cp = NULL;
2650
2651 if (bufattr_rawencrypted(buf_attr(bp))
2652 || !(cp = cp_get_protected_cnode(vp))
2653 || !cp->c_cpentry) {
2654 // Nothing to do
2655 return 0;
2656 }
2657
2658 /*
2659 * For filesystem resize, we may not have access to the underlying
2660 * file's cache key for whatever reason (device may be locked).
2661 * However, we do not need it since we are going to use the
2662 * temporary HFS-wide resize key which is generated once we start
2663 * relocating file content. If this file's I/O should be done
2664 * using the resize key, it will have been supplied already, so do
2665 * not attach the file's cp blob to the buffer.
2666 */
2667 if (ISSET(cp->c_cpentry->cp_flags, CP_RELOCATION_INFLIGHT))
2668 return 0;
2669
2670 #if HFS_CONFIG_KEY_ROLL
2671 /*
2672 * We don't require any locks here. Pages will be locked so no
2673 * key rolling can take place until this I/O has completed.
2674 */
2675 if (!cp->c_cpentry->cp_key_roll_ctx)
2676 #endif
2677 {
2678 // Fast path
2679 cpx_t cpx = cpkp_cpx(&cp->c_cpentry->cp_keys);
2680
2681 if (cpx_has_key(cpx)) {
2682 bufattr_setcpx(buf_attr(bp), cpx);
2683 return 0;
2684 }
2685 }
2686
2687 /*
2688 * We rely mostly (see note below) upon the truncate lock to
2689 * protect the CP cache key from getting tossed prior to our IO
2690 * finishing here. Nearly all cluster io calls to manipulate file
2691 * payload from HFS take the truncate lock before calling into the
2692 * cluster layer to ensure the file size does not change, or that
2693 * they have exclusive right to change the EOF of the file. That
2694 * same guarantee protects us here since the code that deals with
2695 * CP lock events must now take the truncate lock before doing
2696 * anything.
2697 *
2698 * If you want to change content protection structures, then the
2699 * truncate lock is not sufficient; you must take the truncate
2700 * lock and then wait for outstanding writes to complete. This is
2701 * necessary because asynchronous I/O only holds the truncate lock
2702 * whilst I/O is being queued.
2703 *
2704 * One exception should be the VM swapfile IO, because HFS will
2705 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call
2706 * for the swapfile code only without holding the truncate lock.
2707 * This is because individual swapfiles are maintained at
2708 * fixed-length sizes by the VM code. In non-swapfile IO we use
2709 * PAGEOUT_V2 semantics which allow us to create our own UPL and
2710 * thus take the truncate lock before calling into the cluster
2711 * layer. In that case, however, we are not concerned with the CP
2712 * blob being wiped out in the middle of the IO because there
2713 * isn't anything to toss; the VM swapfile key stays in-core as
2714 * long as the file is open.
2715 */
2716
2717 off_rsrc_t off_rsrc = off_rsrc_make(buf_lblkno(bp) * GetLogicalBlockSize(vp),
2718 VNODE_IS_RSRC(vp));
2719 cp_io_params_t io_params;
2720
2721
2722 /*
2723 * We want to take the cnode lock here and because the vnode write
2724 * count is a pseudo-lock, we need to do something to preserve
2725 * lock ordering; the cnode lock comes before the write count.
2726 * Ideally, the write count would be incremented after the
2727 * strategy routine returns, but that becomes complicated if the
2728 * strategy routine where to call buf_iodone before returning.
2729 * For now, we drop the write count here and then pick it up again
2730 * later.
2731 */
2732 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2733 vnode_writedone(vp);
2734
2735 hfs_lock_always(cp, HFS_SHARED_LOCK);
2736 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2737 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2738 &io_params);
2739 hfs_unlock(cp);
2740
2741 /*
2742 * Last chance: If this data protected I/O does not have unwrapped
2743 * keys present, then try to get them. We already know that it
2744 * should, by this point.
2745 */
2746 if (!cpx_has_key(io_params.cpx)) {
2747 int io_op = ( (buf_flags(bp) & B_READ) ? CP_READ_ACCESS : CP_WRITE_ACCESS);
2748 errno_t error = cp_handle_vnop(vp, io_op, 0);
2749 if (error) {
2750 /*
2751 * We have to be careful here. By this point in the I/O
2752 * path, VM or the cluster engine has prepared a buf_t
2753 * with the proper file offsets and all the rest, so
2754 * simply erroring out will result in us leaking this
2755 * particular buf_t. We need to properly decorate the
2756 * buf_t just as buf_strategy would so as to make it
2757 * appear that the I/O errored out with the particular
2758 * error code.
2759 */
2760 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2761 vnode_startwrite(vp);
2762 buf_seterror (bp, error);
2763 buf_biodone(bp);
2764 return error;
2765 }
2766
2767 hfs_lock_always(cp, HFS_SHARED_LOCK);
2768 cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc,
2769 ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE,
2770 &io_params);
2771 hfs_unlock(cp);
2772 }
2773
2774 hfs_assert(buf_count(bp) <= io_params.max_len);
2775 bufattr_setcpx(buf_attr(bp), io_params.cpx);
2776
2777 if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW))
2778 vnode_startwrite(vp);
2779
2780 return 0;
2781 }
2782
2783 #endif /* CONFIG_PROTECT */