]>
Commit | Line | Data |
---|---|---|
6d2010ae | 1 | /* |
3e170ce0 | 2 | * Copyright (c) 2000-2015 Apple Inc. All rights reserved. |
6d2010ae A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
39236c6e | 5 | * |
6d2010ae A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
39236c6e | 14 | * |
6d2010ae A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
39236c6e | 17 | * |
6d2010ae A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
39236c6e | 25 | * |
6d2010ae A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
3e170ce0 A |
28 | #if CONFIG_PROTECT |
29 | ||
6d2010ae A |
30 | #include <sys/mount.h> |
31 | #include <sys/random.h> | |
32 | #include <sys/xattr.h> | |
33 | #include <sys/uio_internal.h> | |
34 | #include <sys/ubc_internal.h> | |
35 | #include <sys/vnode_if.h> | |
36 | #include <sys/vnode_internal.h> | |
316670eb | 37 | #include <sys/fcntl.h> |
6d2010ae | 38 | #include <libkern/OSByteOrder.h> |
3e170ce0 | 39 | #include <libkern/crypto/sha1.h> |
39236c6e A |
40 | #include <sys/proc.h> |
41 | #include <sys/kauth.h> | |
6d2010ae A |
42 | |
43 | #include "hfs.h" | |
44 | #include "hfs_cnode.h" | |
fe8ab488 | 45 | #include "hfs_fsctl.h" |
3e170ce0 | 46 | #include "hfs_cprotect.h" |
6d2010ae | 47 | |
3e170ce0 A |
48 | |
49 | #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset)) | |
50 | ||
51 | /* | |
52 | * The wrap function pointers and the variable to indicate if they | |
fe8ab488 A |
53 | * are initialized are system-wide, and hence are defined globally. |
54 | */ | |
55 | static struct cp_wrap_func g_cp_wrap_func = {}; | |
56 | static int are_wraps_initialized = false; | |
6d2010ae A |
57 | |
58 | extern int (**hfs_vnodeop_p) (void *); | |
59 | ||
60 | /* | |
61 | * CP private functions | |
62 | */ | |
316670eb A |
63 | static int cp_root_major_vers(mount_t mp); |
64 | static int cp_getxattr(cnode_t *, struct hfsmount *hfsmp, struct cprotect **); | |
3e170ce0 | 65 | static void cp_entry_dealloc(hfsmount_t *hfsmp, struct cprotect *entry); |
39236c6e | 66 | static int cp_restore_keys(struct cprotect *, struct hfsmount *hfsmp, struct cnode *); |
6d2010ae A |
67 | static int cp_lock_vfs_callback(mount_t, void *); |
68 | static int cp_lock_vnode_callback(vnode_t, void *); | |
69 | static int cp_vnode_is_eligible (vnode_t); | |
fe8ab488 | 70 | static int cp_check_access (cnode_t *cp, struct hfsmount *hfsmp, int vnop); |
39236c6e | 71 | static int cp_unwrap(struct hfsmount *, struct cprotect *, struct cnode *); |
39236c6e | 72 | static void cp_init_access(cp_cred_t access, struct cnode *cp); |
6d2010ae | 73 | |
6d2010ae A |
74 | |
75 | #if DEVELOPMENT || DEBUG | |
76 | #define CP_ASSERT(x) \ | |
77 | if ((x) == 0) { \ | |
316670eb | 78 | panic("Content Protection: failed assertion in %s", __FUNCTION__); \ |
6d2010ae A |
79 | } |
80 | #else | |
81 | #define CP_ASSERT(x) | |
82 | #endif | |
83 | ||
3e170ce0 A |
84 | // -- cpx_t accessors -- |
85 | ||
86 | size_t cpx_size(size_t key_size) | |
87 | { | |
88 | size_t size = sizeof(struct cpx) + key_size; | |
89 | ||
90 | #if DEBUG | |
91 | size += 4; // Extra for magic | |
92 | #endif | |
93 | ||
94 | return size; | |
95 | } | |
96 | ||
97 | static size_t cpx_sizex(const struct cpx *cpx) | |
98 | { | |
99 | return cpx_size(cpx->cpx_max_key_len); | |
100 | } | |
101 | ||
102 | cpx_t cpx_alloc(size_t key_len) | |
103 | { | |
104 | cpx_t cpx; | |
105 | ||
106 | MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); | |
107 | ||
108 | cpx_init(cpx, key_len); | |
109 | ||
110 | return cpx; | |
111 | } | |
112 | ||
113 | #if DEBUG | |
114 | static const uint32_t cpx_magic1 = 0x7b787063; // cpx{ | |
115 | static const uint32_t cpx_magic2 = 0x7870637d; // }cpx | |
116 | #endif | |
117 | ||
118 | void cpx_free(cpx_t cpx) | |
119 | { | |
120 | #if DEBUG | |
121 | assert(cpx->cpx_magic1 == cpx_magic1); | |
122 | assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2); | |
123 | #endif | |
124 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); | |
125 | FREE(cpx, M_TEMP); | |
126 | } | |
127 | ||
128 | void cpx_init(cpx_t cpx, size_t key_len) | |
129 | { | |
130 | #if DEBUG | |
131 | cpx->cpx_magic1 = cpx_magic1; | |
132 | *PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2; | |
133 | #endif | |
134 | cpx->cpx_flags = 0; | |
135 | cpx->cpx_key_len = 0; | |
136 | cpx->cpx_max_key_len = key_len; | |
137 | } | |
138 | ||
139 | bool cpx_is_sep_wrapped_key(const struct cpx *cpx) | |
140 | { | |
141 | return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); | |
142 | } | |
143 | ||
144 | void cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v) | |
145 | { | |
146 | if (v) | |
147 | SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); | |
148 | else | |
149 | CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); | |
150 | } | |
151 | ||
152 | bool cpx_use_offset_for_iv(const struct cpx *cpx) | |
153 | { | |
154 | return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); | |
155 | } | |
156 | ||
157 | void cpx_set_use_offset_for_iv(struct cpx *cpx, bool v) | |
158 | { | |
159 | if (v) | |
160 | SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); | |
161 | else | |
162 | CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); | |
163 | } | |
164 | ||
165 | uint16_t cpx_max_key_len(const struct cpx *cpx) | |
166 | { | |
167 | return cpx->cpx_max_key_len; | |
168 | } | |
169 | ||
170 | uint16_t cpx_key_len(const struct cpx *cpx) | |
171 | { | |
172 | return cpx->cpx_key_len; | |
173 | } | |
174 | ||
175 | void cpx_set_key_len(struct cpx *cpx, uint16_t key_len) | |
176 | { | |
177 | cpx->cpx_key_len = key_len; | |
178 | ||
179 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_HFS)) { | |
180 | /* | |
181 | * We assume that if the key length is being modified, the key | |
182 | * has changed. As a result, un-set any bits related to the | |
183 | * AES context, if needed. They should be re-generated | |
184 | * on-demand. | |
185 | */ | |
186 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_HFS); | |
187 | } | |
188 | } | |
189 | ||
190 | bool cpx_has_key(const struct cpx *cpx) | |
191 | { | |
192 | return cpx->cpx_key_len > 0; | |
193 | } | |
194 | ||
195 | #pragma clang diagnostic push | |
196 | #pragma clang diagnostic ignored "-Wcast-qual" | |
197 | void *cpx_key(const struct cpx *cpx) | |
198 | { | |
199 | return (void *)cpx->cpx_cached_key; | |
200 | } | |
201 | #pragma clang diagnostic pop | |
202 | ||
203 | static void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) | |
204 | { | |
205 | aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx); | |
206 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); | |
207 | CLR(cpx->cpx_flags, CPX_IV_AES_CTX_HFS); | |
208 | } | |
209 | ||
210 | aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx) | |
211 | { | |
212 | if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) | |
213 | return &cpx->cpx_iv_aes_ctx; | |
214 | ||
215 | SHA1_CTX sha1ctxt; | |
216 | uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */ | |
217 | ||
218 | /* First init the cp_cache_iv_key[] */ | |
219 | SHA1Init(&sha1ctxt); | |
220 | ||
221 | /* | |
222 | * We can only use this when the keys are generated in the AP; As a result | |
223 | * we only use the first 32 bytes of key length in the cache key | |
224 | */ | |
225 | SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len); | |
226 | SHA1Final(digest, &sha1ctxt); | |
227 | ||
228 | cpx_set_aes_iv_key(cpx, digest); | |
229 | SET(cpx->cpx_flags, CPX_IV_AES_CTX_HFS); | |
230 | ||
231 | return &cpx->cpx_iv_aes_ctx; | |
232 | } | |
233 | ||
234 | static void cpx_flush(cpx_t cpx) | |
235 | { | |
236 | bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); | |
237 | bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx)); | |
238 | cpx->cpx_flags = 0; | |
239 | cpx->cpx_key_len = 0; | |
240 | } | |
241 | ||
242 | static bool cpx_can_copy(const struct cpx *src, const struct cpx *dst) | |
243 | { | |
244 | return src->cpx_key_len <= dst->cpx_max_key_len; | |
245 | } | |
246 | ||
247 | void cpx_copy(const struct cpx *src, cpx_t dst) | |
248 | { | |
249 | uint16_t key_len = cpx_key_len(src); | |
250 | cpx_set_key_len(dst, key_len); | |
251 | memcpy(cpx_key(dst), cpx_key(src), key_len); | |
252 | dst->cpx_flags = src->cpx_flags; | |
253 | if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) | |
254 | dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx; | |
255 | } | |
256 | ||
257 | // -- cp_key_pair accessors -- | |
258 | ||
259 | void cpkp_init(cp_key_pair_t *cpkp, uint16_t max_pers_key_len, | |
260 | uint16_t max_cached_key_len) | |
261 | { | |
262 | cpkp->cpkp_max_pers_key_len = max_pers_key_len; | |
263 | cpkp->cpkp_pers_key_len = 0; | |
264 | cpx_init(&cpkp->cpkp_cpx, max_cached_key_len); | |
265 | ||
266 | // Default to using offsets | |
267 | cpx_set_use_offset_for_iv(&cpkp->cpkp_cpx, true); | |
268 | } | |
269 | ||
270 | uint16_t cpkp_max_pers_key_len(const cp_key_pair_t *cpkp) | |
271 | { | |
272 | return cpkp->cpkp_max_pers_key_len; | |
273 | } | |
274 | ||
275 | uint16_t cpkp_pers_key_len(const cp_key_pair_t *cpkp) | |
276 | { | |
277 | return cpkp->cpkp_pers_key_len; | |
278 | } | |
279 | ||
280 | static bool cpkp_has_pers_key(const cp_key_pair_t *cpkp) | |
281 | { | |
282 | return cpkp->cpkp_pers_key_len > 0; | |
283 | } | |
284 | ||
285 | static void *cpkp_pers_key(const cp_key_pair_t *cpkp) | |
286 | { | |
287 | return PTR_ADD(void *, &cpkp->cpkp_cpx, cpx_sizex(&cpkp->cpkp_cpx)); | |
288 | } | |
289 | ||
290 | static void cpkp_set_pers_key_len(cp_key_pair_t *cpkp, uint16_t key_len) | |
291 | { | |
292 | if (key_len > cpkp->cpkp_max_pers_key_len) | |
293 | panic("hfs_cprotect: key too big!"); | |
294 | cpkp->cpkp_pers_key_len = key_len; | |
295 | } | |
296 | ||
297 | #pragma clang diagnostic push | |
298 | #pragma clang diagnostic ignored "-Wcast-qual" | |
299 | cpx_t cpkp_cpx(const cp_key_pair_t *cpkp) | |
300 | { | |
301 | // Cast to remove const qualifier | |
302 | return (cpx_t)&cpkp->cpkp_cpx; | |
303 | } | |
304 | #pragma clang diagnostic pop | |
305 | ||
306 | size_t cpkp_size(uint16_t pers_key_len, uint16_t cached_key_len) | |
307 | { | |
308 | return (sizeof(cp_key_pair_t) - sizeof(struct cpx) | |
309 | + pers_key_len + cpx_size(cached_key_len)); | |
310 | } | |
311 | ||
312 | size_t cpkp_sizex(const cp_key_pair_t *cpkp) | |
313 | { | |
314 | return cpkp_size(cpkp->cpkp_max_pers_key_len, cpkp->cpkp_cpx.cpx_max_key_len); | |
315 | } | |
316 | ||
317 | void cpkp_flush(cp_key_pair_t *cpkp) | |
318 | { | |
319 | cpx_flush(&cpkp->cpkp_cpx); | |
320 | cpkp->cpkp_pers_key_len = 0; | |
321 | bzero(cpkp_pers_key(cpkp), cpkp->cpkp_max_pers_key_len); | |
322 | } | |
323 | ||
324 | bool cpkp_can_copy(const cp_key_pair_t *src, const cp_key_pair_t *dst) | |
325 | { | |
326 | return (cpkp_pers_key_len(src) <= dst->cpkp_max_pers_key_len | |
327 | && cpx_can_copy(&src->cpkp_cpx, &dst->cpkp_cpx)); | |
328 | } | |
329 | ||
330 | void cpkp_copy(const cp_key_pair_t *src, cp_key_pair_t *dst) | |
331 | { | |
332 | const uint16_t key_len = cpkp_pers_key_len(src); | |
333 | cpkp_set_pers_key_len(dst, key_len); | |
334 | memcpy(cpkp_pers_key(dst), cpkp_pers_key(src), key_len); | |
335 | cpx_copy(&src->cpkp_cpx, &dst->cpkp_cpx); | |
336 | } | |
337 | ||
338 | // -- | |
339 | ||
340 | bool cp_is_supported_version(uint16_t vers) | |
341 | { | |
342 | return vers == CP_VERS_4 || vers == CP_VERS_5; | |
343 | } | |
344 | ||
345 | /* | |
346 | * Return the appropriate key and, if requested, the physical offset and | |
347 | * maximum length for a particular I/O operation. | |
348 | */ | |
349 | void cp_io_params(__unused hfsmount_t *hfsmp, cprotect_t cpr, | |
350 | __unused off_rsrc_t off_rsrc, | |
351 | __unused int direction, cp_io_params_t *io_params) | |
352 | { | |
353 | ||
354 | io_params->max_len = INT64_MAX; | |
355 | io_params->phys_offset = -1; | |
356 | io_params->cpx = cpkp_cpx(&cpr->cp_keys); | |
357 | } | |
358 | ||
359 | static void cp_flush_cached_keys(cprotect_t cpr) | |
360 | { | |
361 | cpx_flush(cpkp_cpx(&cpr->cp_keys)); | |
362 | } | |
363 | ||
364 | static bool cp_needs_pers_key(cprotect_t cpr) | |
365 | { | |
366 | if (CP_CLASS(cpr->cp_pclass) == PROTECTION_CLASS_F) | |
367 | return !cpx_has_key(cpkp_cpx(&cpr->cp_keys)); | |
368 | else | |
369 | return !cpkp_has_pers_key(&cpr->cp_keys); | |
370 | } | |
371 | ||
39236c6e | 372 | int |
6d2010ae A |
373 | cp_key_store_action(int action) |
374 | { | |
39236c6e A |
375 | |
376 | if (action < 0 || action > CP_MAX_STATE) { | |
377 | return -1; | |
378 | } | |
fe8ab488 A |
379 | |
380 | /* | |
381 | * The lock state is kept locally to each data protected filesystem to | |
382 | * avoid using globals. Pass along the lock request to each filesystem | |
383 | * we iterate through. | |
384 | */ | |
39236c6e | 385 | |
fe8ab488 A |
386 | /* |
387 | * Upcast the value in 'action' to be a pointer-width unsigned integer. | |
388 | * This avoids issues relating to pointer-width. | |
389 | */ | |
3e170ce0 | 390 | return vfs_iterate(0, cp_lock_vfs_callback, (void*)(uintptr_t)action); |
6d2010ae A |
391 | } |
392 | ||
393 | ||
39236c6e | 394 | int |
6d2010ae A |
395 | cp_register_wraps(cp_wrap_func_t key_store_func) |
396 | { | |
39236c6e | 397 | g_cp_wrap_func.new_key = key_store_func->new_key; |
6d2010ae | 398 | g_cp_wrap_func.unwrapper = key_store_func->unwrapper; |
39236c6e A |
399 | g_cp_wrap_func.rewrapper = key_store_func->rewrapper; |
400 | /* do not use invalidater until rdar://12170050 goes in ! */ | |
401 | g_cp_wrap_func.invalidater = key_store_func->invalidater; | |
fe8ab488 | 402 | g_cp_wrap_func.backup_key = key_store_func->backup_key; |
39236c6e | 403 | |
fe8ab488 A |
404 | /* Mark the functions as initialized in the function pointer container */ |
405 | are_wraps_initialized = true; | |
6d2010ae | 406 | |
39236c6e | 407 | return 0; |
316670eb | 408 | } |
316670eb | 409 | |
3e170ce0 A |
410 | static cp_key_revision_t cp_initial_key_revision(__unused hfsmount_t *hfsmp) |
411 | { | |
412 | return 1; | |
413 | } | |
414 | ||
415 | cp_key_revision_t cp_next_key_revision(cp_key_revision_t rev) | |
416 | { | |
417 | rev = (rev + 0x0100) ^ (mach_absolute_time() & 0xff); | |
418 | if (!rev) | |
419 | rev = 1; | |
420 | return rev; | |
421 | } | |
422 | ||
6d2010ae A |
423 | /* |
424 | * Allocate and initialize a cprotect blob for a new cnode. | |
316670eb | 425 | * Called from hfs_getnewvnode: cnode is locked exclusive. |
39236c6e | 426 | * |
6d2010ae A |
427 | * Read xattr data off the cnode. Then, if conditions permit, |
428 | * unwrap the file key and cache it in the cprotect blob. | |
429 | */ | |
39236c6e | 430 | int |
316670eb | 431 | cp_entry_init(struct cnode *cp, struct mount *mp) |
6d2010ae | 432 | { |
316670eb | 433 | struct cprotect *entry = NULL; |
6d2010ae | 434 | int error = 0; |
316670eb A |
435 | struct hfsmount *hfsmp = VFSTOHFS(mp); |
436 | ||
39236c6e A |
437 | /* |
438 | * The cnode should be locked at this point, regardless of whether or not | |
439 | * we are creating a new item in the namespace or vending a vnode on behalf | |
440 | * of lookup. The only time we tell getnewvnode to skip the lock is when | |
441 | * constructing a resource fork vnode. But a resource fork vnode must come | |
442 | * after the regular data fork cnode has already been constructed. | |
443 | */ | |
6d2010ae | 444 | if (!cp_fs_protected (mp)) { |
316670eb | 445 | cp->c_cpentry = NULL; |
6d2010ae A |
446 | return 0; |
447 | } | |
39236c6e | 448 | |
316670eb A |
449 | if (!S_ISREG(cp->c_mode) && !S_ISDIR(cp->c_mode)) { |
450 | cp->c_cpentry = NULL; | |
6d2010ae A |
451 | return 0; |
452 | } | |
39236c6e | 453 | |
fe8ab488 | 454 | if (are_wraps_initialized == false) { |
6d2010ae A |
455 | printf("hfs: cp_update_entry: wrap functions not yet set\n"); |
456 | return ENXIO; | |
457 | } | |
39236c6e | 458 | |
316670eb | 459 | if (hfsmp->hfs_running_cp_major_vers == 0) { |
39236c6e | 460 | panic ("hfs cp: no running mount point version! "); |
316670eb | 461 | } |
39236c6e | 462 | |
316670eb A |
463 | CP_ASSERT (cp->c_cpentry == NULL); |
464 | ||
465 | error = cp_getxattr(cp, hfsmp, &entry); | |
3e170ce0 | 466 | if (error == ENOATTR) { |
39236c6e A |
467 | /* |
468 | * Normally, we should always have a CP EA for a file or directory that | |
469 | * we are initializing here. However, there are some extenuating circumstances, | |
470 | * such as the root directory immediately following a newfs_hfs. | |
471 | * | |
472 | * As a result, we leave code here to deal with an ENOATTR which will always | |
473 | * default to a 'D/NONE' key, though we don't expect to use it much. | |
474 | */ | |
3e170ce0 A |
475 | cp_key_class_t target_class = PROTECTION_CLASS_D; |
476 | ||
39236c6e A |
477 | if (S_ISDIR(cp->c_mode)) { |
478 | target_class = PROTECTION_CLASS_DIR_NONE; | |
3e170ce0 A |
479 | } |
480 | ||
481 | cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp); | |
482 | ||
fe8ab488 | 483 | /* allow keybag to override our class preferences */ |
3e170ce0 A |
484 | error = cp_new (&target_class, hfsmp, cp, cp->c_mode, CP_KEYWRAP_DIFFCLASS, |
485 | key_revision, (cp_new_alloc_fn)cp_entry_alloc, (void **)&entry); | |
39236c6e | 486 | if (error == 0) { |
3e170ce0 A |
487 | entry->cp_pclass = target_class; |
488 | entry->cp_key_os_version = cp_os_version(); | |
489 | entry->cp_key_revision = key_revision; | |
39236c6e | 490 | error = cp_setxattr (cp, entry, hfsmp, cp->c_fileid, XATTR_CREATE); |
316670eb | 491 | } |
316670eb | 492 | } |
39236c6e | 493 | |
316670eb | 494 | /* |
39236c6e A |
495 | * Bail out if: |
496 | * a) error was not ENOATTR (we got something bad from the getxattr call) | |
497 | * b) we encountered an error setting the xattr above. | |
498 | * c) we failed to generate a new cprotect data structure. | |
316670eb | 499 | */ |
39236c6e A |
500 | if (error) { |
501 | goto out; | |
502 | } | |
316670eb A |
503 | |
504 | cp->c_cpentry = entry; | |
505 | ||
39236c6e A |
506 | out: |
507 | if (error == 0) { | |
508 | entry->cp_backing_cnode = cp; | |
316670eb | 509 | } |
39236c6e A |
510 | else { |
511 | if (entry) { | |
3e170ce0 | 512 | cp_entry_destroy(hfsmp, entry); |
39236c6e A |
513 | } |
514 | cp->c_cpentry = NULL; | |
515 | } | |
516 | ||
316670eb A |
517 | return error; |
518 | } | |
519 | ||
520 | /* | |
39236c6e | 521 | * cp_setup_newentry |
316670eb | 522 | * |
39236c6e A |
523 | * Generate a keyless cprotect structure for use with the new AppleKeyStore kext. |
524 | * Since the kext is now responsible for vending us both wrapped/unwrapped keys | |
525 | * we need to create a keyless xattr upon file / directory creation. When we have the inode value | |
526 | * and the file/directory is established, then we can ask it to generate keys. Note that | |
527 | * this introduces a potential race; If the device is locked and the wrapping | |
528 | * keys are purged between the time we call this function and the time we ask it to generate | |
529 | * keys for us, we could have to fail the open(2) call and back out the entry. | |
530 | */ | |
531 | ||
3e170ce0 A |
532 | int cp_setup_newentry (struct hfsmount *hfsmp, struct cnode *dcp, |
533 | cp_key_class_t suppliedclass, mode_t cmode, | |
534 | struct cprotect **tmpentry) | |
316670eb | 535 | { |
39236c6e | 536 | int isdir = 0; |
316670eb | 537 | struct cprotect *entry = NULL; |
39236c6e | 538 | uint32_t target_class = hfsmp->default_cp_class; |
fe8ab488 | 539 | suppliedclass = CP_CLASS(suppliedclass); |
316670eb | 540 | |
39236c6e A |
541 | if (hfsmp->hfs_running_cp_major_vers == 0) { |
542 | panic ("CP: major vers not set in mount!"); | |
543 | } | |
544 | ||
545 | if (S_ISDIR (cmode)) { | |
546 | isdir = 1; | |
547 | } | |
316670eb A |
548 | |
549 | /* Decide the target class. Input argument takes priority. */ | |
39236c6e A |
550 | if (cp_is_valid_class (isdir, suppliedclass)) { |
551 | /* caller supplies -1 if it was not specified so we will default to the mount point value */ | |
552 | target_class = suppliedclass; | |
553 | /* | |
554 | * One exception, F is never valid for a directory | |
316670eb A |
555 | * because its children may inherit and userland will be |
556 | * unable to read/write to the files. | |
6d2010ae | 557 | */ |
39236c6e | 558 | if (isdir) { |
316670eb | 559 | if (target_class == PROTECTION_CLASS_F) { |
39236c6e | 560 | *tmpentry = NULL; |
316670eb A |
561 | return EINVAL; |
562 | } | |
563 | } | |
564 | } | |
565 | else { | |
39236c6e A |
566 | /* |
567 | * If no valid class was supplied, behave differently depending on whether or not | |
568 | * the item being created is a file or directory. | |
569 | * | |
570 | * for FILE: | |
571 | * If parent directory has a non-zero class, use that. | |
572 | * If parent directory has a zero class (not set), then attempt to | |
573 | * apply the mount point default. | |
574 | * | |
575 | * for DIRECTORY: | |
576 | * Directories always inherit from the parent; if the parent | |
577 | * has a NONE class set, then we can continue to use that. | |
578 | */ | |
316670eb | 579 | if ((dcp) && (dcp->c_cpentry)) { |
fe8ab488 | 580 | uint32_t parentclass = CP_CLASS(dcp->c_cpentry->cp_pclass); |
39236c6e A |
581 | /* If the parent class is not valid, default to the mount point value */ |
582 | if (cp_is_valid_class(1, parentclass)) { | |
583 | if (isdir) { | |
584 | target_class = parentclass; | |
585 | } | |
586 | else if (parentclass != PROTECTION_CLASS_DIR_NONE) { | |
587 | /* files can inherit so long as it's not NONE */ | |
588 | target_class = parentclass; | |
589 | } | |
316670eb | 590 | } |
39236c6e | 591 | /* Otherwise, we already defaulted to the mount point's default */ |
6d2010ae | 592 | } |
316670eb | 593 | } |
6d2010ae | 594 | |
39236c6e | 595 | /* Generate the cprotect to vend out */ |
3e170ce0 | 596 | entry = cp_entry_alloc(NULL, 0, 0, NULL); |
39236c6e A |
597 | if (entry == NULL) { |
598 | *tmpentry = NULL; | |
316670eb | 599 | return ENOMEM; |
39236c6e | 600 | } |
316670eb A |
601 | |
602 | /* | |
39236c6e A |
603 | * We don't have keys yet, so fill in what we can. At this point |
604 | * this blob has no keys and it has no backing xattr. We just know the | |
605 | * target class. | |
316670eb | 606 | */ |
3e170ce0 | 607 | entry->cp_flags = CP_NO_XATTR; |
fe8ab488 | 608 | /* Note this is only the effective class */ |
39236c6e A |
609 | entry->cp_pclass = target_class; |
610 | *tmpentry = entry; | |
316670eb | 611 | |
39236c6e A |
612 | return 0; |
613 | } | |
614 | ||
6d2010ae | 615 | /* |
316670eb | 616 | * Set up an initial key/class pair for a disassociated cprotect entry. |
39236c6e | 617 | * This function is used to generate transient keys that will never be |
316670eb A |
618 | * written to disk. We use class F for this since it provides the exact |
619 | * semantics that are needed here. Because we never attach this blob to | |
620 | * a cnode directly, we take a pointer to the cprotect struct. | |
621 | * | |
622 | * This function is primarily used in the HFS FS truncation codepath | |
623 | * where we may rely on AES symmetry to relocate encrypted data from | |
624 | * one spot in the disk to another. | |
6d2010ae | 625 | */ |
3e170ce0 | 626 | int cpx_gentempkeys(cpx_t *pcpx, __unused struct hfsmount *hfsmp) |
39236c6e | 627 | { |
3e170ce0 | 628 | cpx_t cpx = cpx_alloc(CP_MAX_KEYSIZE); |
39236c6e | 629 | |
3e170ce0 A |
630 | cpx_set_key_len(cpx, CP_MAX_KEYSIZE); |
631 | read_random(cpx_key(cpx), CP_MAX_KEYSIZE); | |
632 | cpx_set_use_offset_for_iv(cpx, true); | |
6d2010ae | 633 | |
3e170ce0 | 634 | *pcpx = cpx; |
316670eb | 635 | |
39236c6e | 636 | return 0; |
6d2010ae A |
637 | } |
638 | ||
639 | /* | |
640 | * Tear down and clear a cprotect blob for a closing file. | |
39236c6e | 641 | * Called at hfs_reclaim_cnode: cnode is locked exclusive. |
6d2010ae A |
642 | */ |
643 | void | |
3e170ce0 | 644 | cp_entry_destroy(hfsmount_t *hfsmp, struct cprotect *entry_ptr) |
39236c6e A |
645 | { |
646 | if (entry_ptr == NULL) { | |
6d2010ae A |
647 | /* nothing to clean up */ |
648 | return; | |
649 | } | |
3e170ce0 | 650 | cp_entry_dealloc(hfsmp, entry_ptr); |
6d2010ae A |
651 | } |
652 | ||
316670eb | 653 | |
39236c6e A |
654 | int |
655 | cp_fs_protected (mount_t mnt) | |
656 | { | |
6d2010ae A |
657 | return (vfs_flags(mnt) & MNT_CPROTECT); |
658 | } | |
659 | ||
660 | ||
661 | /* | |
662 | * Return a pointer to underlying cnode if there is one for this vnode. | |
663 | * Done without taking cnode lock, inspecting only vnode state. | |
664 | */ | |
316670eb A |
665 | struct cnode * |
666 | cp_get_protected_cnode(struct vnode *vp) | |
6d2010ae A |
667 | { |
668 | if (!cp_vnode_is_eligible(vp)) { | |
669 | return NULL; | |
670 | } | |
39236c6e | 671 | |
6d2010ae A |
672 | if (!cp_fs_protected(VTOVFS(vp))) { |
673 | /* mount point doesn't support it */ | |
674 | return NULL; | |
675 | } | |
39236c6e | 676 | |
316670eb | 677 | return (struct cnode*) vp->v_data; |
6d2010ae A |
678 | } |
679 | ||
680 | ||
681 | /* | |
682 | * Sets *class to persistent class associated with vnode, | |
683 | * or returns error. | |
684 | */ | |
39236c6e | 685 | int |
316670eb | 686 | cp_vnode_getclass(struct vnode *vp, int *class) |
6d2010ae | 687 | { |
316670eb | 688 | struct cprotect *entry; |
6d2010ae | 689 | int error = 0; |
316670eb A |
690 | struct cnode *cp; |
691 | int took_truncate_lock = 0; | |
692 | struct hfsmount *hfsmp = NULL; | |
693 | ||
694 | /* Is this an interesting vp? */ | |
6d2010ae A |
695 | if (!cp_vnode_is_eligible (vp)) { |
696 | return EBADF; | |
697 | } | |
6d2010ae | 698 | |
316670eb A |
699 | /* Is the mount point formatted for content protection? */ |
700 | if (!cp_fs_protected(VTOVFS(vp))) { | |
39236c6e | 701 | return ENOTSUP; |
316670eb | 702 | } |
39236c6e | 703 | |
316670eb A |
704 | cp = VTOC(vp); |
705 | hfsmp = VTOHFS(vp); | |
39236c6e | 706 | |
316670eb | 707 | /* |
39236c6e A |
708 | * Take the truncate lock up-front in shared mode because we may need |
709 | * to manipulate the CP blob. Pend lock events until we're done here. | |
316670eb | 710 | */ |
39236c6e | 711 | hfs_lock_truncate (cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); |
316670eb | 712 | took_truncate_lock = 1; |
6d2010ae | 713 | |
316670eb A |
714 | /* |
715 | * We take only the shared cnode lock up-front. If it turns out that | |
39236c6e A |
716 | * we need to manipulate the CP blob to write a key out, drop the |
717 | * shared cnode lock and acquire an exclusive lock. | |
316670eb | 718 | */ |
39236c6e | 719 | error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT); |
316670eb | 720 | if (error) { |
39236c6e | 721 | hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); |
316670eb A |
722 | return error; |
723 | } | |
39236c6e | 724 | |
316670eb A |
725 | /* pull the class from the live entry */ |
726 | entry = cp->c_cpentry; | |
39236c6e A |
727 | |
728 | if (entry == NULL) { | |
316670eb A |
729 | panic("Content Protection: uninitialized cnode %p", cp); |
730 | } | |
731 | ||
39236c6e | 732 | /* Note that we may not have keys yet, but we know the target class. */ |
6d2010ae | 733 | |
316670eb | 734 | if (error == 0) { |
fe8ab488 | 735 | *class = CP_CLASS(entry->cp_pclass); |
6d2010ae | 736 | } |
39236c6e | 737 | |
316670eb | 738 | if (took_truncate_lock) { |
39236c6e | 739 | hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT); |
316670eb | 740 | } |
39236c6e | 741 | |
316670eb | 742 | hfs_unlock(cp); |
6d2010ae A |
743 | return error; |
744 | } | |
745 | ||
6d2010ae | 746 | /* |
316670eb | 747 | * Sets persistent class for this file or directory. |
6d2010ae A |
748 | * If vnode cannot be protected (system file, non-regular file, non-hfs), EBADF. |
749 | * If the new class can't be accessed now, EPERM. | |
750 | * Otherwise, record class and re-wrap key if the mount point is content-protected. | |
751 | */ | |
39236c6e | 752 | int |
316670eb | 753 | cp_vnode_setclass(struct vnode *vp, uint32_t newclass) |
6d2010ae | 754 | { |
316670eb | 755 | struct cnode *cp; |
6d2010ae A |
756 | struct cprotect *entry = 0; |
757 | int error = 0; | |
316670eb | 758 | int took_truncate_lock = 0; |
316670eb | 759 | struct hfsmount *hfsmp = NULL; |
39236c6e A |
760 | int isdir = 0; |
761 | ||
762 | if (vnode_isdir (vp)) { | |
763 | isdir = 1; | |
6d2010ae A |
764 | } |
765 | ||
fe8ab488 A |
766 | /* Ensure we only use the effective class here */ |
767 | newclass = CP_CLASS(newclass); | |
768 | ||
39236c6e A |
769 | if (!cp_is_valid_class(isdir, newclass)) { |
770 | printf("hfs: CP: cp_setclass called with invalid class %d\n", newclass); | |
771 | return EINVAL; | |
316670eb A |
772 | } |
773 | ||
774 | /* Is this an interesting vp? */ | |
6d2010ae A |
775 | if (!cp_vnode_is_eligible(vp)) { |
776 | return EBADF; | |
777 | } | |
778 | ||
316670eb A |
779 | /* Is the mount point formatted for content protection? */ |
780 | if (!cp_fs_protected(VTOVFS(vp))) { | |
39236c6e | 781 | return ENOTSUP; |
316670eb A |
782 | } |
783 | ||
316670eb | 784 | hfsmp = VTOHFS(vp); |
39236c6e A |
785 | if (hfsmp->hfs_flags & HFS_READ_ONLY) { |
786 | return EROFS; | |
787 | } | |
6d2010ae | 788 | |
39236c6e A |
789 | /* |
790 | * Take the cnode truncate lock exclusive because we want to manipulate the | |
316670eb A |
791 | * CP blob. The lock-event handling code is doing the same. This also forces |
792 | * all pending IOs to drain before we can re-write the persistent and cache keys. | |
793 | */ | |
39236c6e A |
794 | cp = VTOC(vp); |
795 | hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); | |
316670eb | 796 | took_truncate_lock = 1; |
39236c6e | 797 | |
fe8ab488 A |
798 | /* |
799 | * The truncate lock is not sufficient to guarantee the CP blob | |
800 | * isn't being used. We must wait for existing writes to finish. | |
801 | */ | |
802 | vnode_waitforwrites(vp, 0, 0, 0, "cp_vnode_setclass"); | |
803 | ||
39236c6e | 804 | if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) { |
6d2010ae A |
805 | return EINVAL; |
806 | } | |
39236c6e | 807 | |
316670eb A |
808 | entry = cp->c_cpentry; |
809 | if (entry == NULL) { | |
810 | error = EINVAL; | |
811 | goto out; | |
812 | } | |
6d2010ae | 813 | |
39236c6e A |
814 | /* |
815 | * re-wrap per-file key with new class. | |
816 | * Generate an entirely new key if switching to F. | |
817 | */ | |
818 | if (vnode_isreg(vp)) { | |
819 | /* | |
820 | * The vnode is a file. Before proceeding with the re-wrap, we need | |
821 | * to unwrap the keys before proceeding. This is to ensure that | |
822 | * the destination class's properties still work appropriately for the | |
823 | * target class (since B allows I/O but an unwrap prior to the next unlock | |
824 | * will not be allowed). | |
316670eb | 825 | */ |
3e170ce0 | 826 | if (!cpx_has_key(&entry->cp_keys.cpkp_cpx)) { |
39236c6e A |
827 | error = cp_restore_keys (entry, hfsmp, cp); |
828 | if (error) { | |
829 | goto out; | |
830 | } | |
831 | } | |
3e170ce0 | 832 | |
39236c6e A |
833 | if (newclass == PROTECTION_CLASS_F) { |
834 | /* Verify that file is blockless if switching to class F */ | |
835 | if (cp->c_datafork->ff_size > 0) { | |
836 | error = EINVAL; | |
3e170ce0 | 837 | goto out; |
39236c6e | 838 | } |
6d2010ae | 839 | |
3e170ce0 A |
840 | cp_key_pair_t *cpkp; |
841 | cprotect_t new_entry = cp_entry_alloc(NULL, 0, CP_MAX_KEYSIZE, &cpkp); | |
fe8ab488 | 842 | |
3e170ce0 A |
843 | if (!new_entry) { |
844 | error = ENOMEM; | |
39236c6e A |
845 | goto out; |
846 | } | |
3e170ce0 A |
847 | |
848 | /* newclass is only the effective class */ | |
849 | new_entry->cp_pclass = newclass; | |
850 | new_entry->cp_key_os_version = cp_os_version(); | |
851 | new_entry->cp_key_revision = cp_next_key_revision(entry->cp_key_revision); | |
852 | ||
853 | cpx_t cpx = cpkp_cpx(cpkp); | |
854 | ||
855 | /* Class F files are not wrapped, so they continue to use MAX_KEYSIZE */ | |
856 | cpx_set_key_len(cpx, CP_MAX_KEYSIZE); | |
857 | read_random (cpx_key(cpx), CP_MAX_KEYSIZE); | |
858 | ||
859 | cp_replace_entry(hfsmp, cp, new_entry); | |
860 | ||
861 | error = 0; | |
862 | goto out; | |
863 | } | |
864 | ||
865 | /* Deny the setclass if file is to be moved from F to something else */ | |
866 | if (entry->cp_pclass == PROTECTION_CLASS_F) { | |
867 | error = EPERM; | |
868 | goto out; | |
869 | } | |
870 | ||
871 | if (!cpkp_has_pers_key(&entry->cp_keys)) { | |
872 | struct cprotect *new_entry = NULL; | |
873 | /* | |
874 | * We want to fail if we can't wrap to the target class. By not setting | |
875 | * CP_KEYWRAP_DIFFCLASS, we tell keygeneration that if it can't wrap | |
876 | * to 'newclass' then error out. | |
877 | */ | |
878 | uint32_t flags = 0; | |
879 | error = cp_generate_keys (hfsmp, cp, newclass, flags, &new_entry); | |
880 | if (error == 0) { | |
881 | cp_replace_entry (hfsmp, cp, new_entry); | |
39236c6e | 882 | } |
3e170ce0 A |
883 | /* Bypass the setxattr code below since generate_keys does it for us */ |
884 | goto out; | |
39236c6e | 885 | } |
3e170ce0 A |
886 | |
887 | cprotect_t new_entry; | |
888 | error = cp_rewrap(cp, hfsmp, &newclass, &entry->cp_keys, entry, | |
889 | (cp_new_alloc_fn)cp_entry_alloc, (void **)&new_entry); | |
6d2010ae A |
890 | if (error) { |
891 | /* we didn't have perms to set this class. leave file as-is and error out */ | |
892 | goto out; | |
893 | } | |
3e170ce0 A |
894 | |
895 | ||
896 | new_entry->cp_pclass = newclass; | |
897 | ||
898 | cp_replace_entry(hfsmp, cp, new_entry); | |
899 | entry = new_entry; | |
316670eb | 900 | } |
39236c6e | 901 | else if (vnode_isdir(vp)) { |
fe8ab488 | 902 | /* For directories, just update the pclass. newclass is only effective class */ |
39236c6e A |
903 | entry->cp_pclass = newclass; |
904 | error = 0; | |
905 | } | |
906 | else { | |
907 | /* anything else, just error out */ | |
908 | error = EINVAL; | |
909 | goto out; | |
910 | } | |
316670eb | 911 | |
39236c6e A |
912 | /* |
913 | * We get here if the new class was F, or if we were re-wrapping a cprotect that already | |
914 | * existed. If the keys were never generated, then they'll skip the setxattr calls. | |
915 | */ | |
916 | ||
917 | error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_REPLACE); | |
918 | if (error == ENOATTR) { | |
919 | error = cp_setxattr(cp, cp->c_cpentry, VTOHFS(vp), 0, XATTR_CREATE); | |
920 | } | |
6d2010ae | 921 | |
316670eb | 922 | out: |
39236c6e | 923 | |
316670eb | 924 | if (took_truncate_lock) { |
39236c6e | 925 | hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT); |
316670eb A |
926 | } |
927 | hfs_unlock(cp); | |
928 | return error; | |
929 | } | |
930 | ||
931 | ||
fe8ab488 | 932 | int cp_vnode_transcode(vnode_t vp, void *key, unsigned *len) |
316670eb A |
933 | { |
934 | struct cnode *cp; | |
935 | struct cprotect *entry = 0; | |
936 | int error = 0; | |
937 | int took_truncate_lock = 0; | |
938 | struct hfsmount *hfsmp = NULL; | |
939 | ||
39236c6e A |
940 | /* Structures passed between HFS and AKS */ |
941 | cp_cred_s access_in; | |
fe8ab488 | 942 | cp_wrapped_key_s wrapped_key_in, wrapped_key_out; |
39236c6e | 943 | |
316670eb A |
944 | /* Is this an interesting vp? */ |
945 | if (!cp_vnode_is_eligible(vp)) { | |
946 | return EBADF; | |
947 | } | |
948 | ||
949 | /* Is the mount point formatted for content protection? */ | |
950 | if (!cp_fs_protected(VTOVFS(vp))) { | |
39236c6e | 951 | return ENOTSUP; |
6d2010ae A |
952 | } |
953 | ||
316670eb A |
954 | cp = VTOC(vp); |
955 | hfsmp = VTOHFS(vp); | |
956 | ||
39236c6e A |
957 | /* |
958 | * Take the cnode truncate lock exclusive because we want to manipulate the | |
316670eb A |
959 | * CP blob. The lock-event handling code is doing the same. This also forces |
960 | * all pending IOs to drain before we can re-write the persistent and cache keys. | |
961 | */ | |
39236c6e | 962 | hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); |
316670eb | 963 | took_truncate_lock = 1; |
39236c6e A |
964 | |
965 | if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) { | |
316670eb A |
966 | return EINVAL; |
967 | } | |
39236c6e | 968 | |
316670eb A |
969 | entry = cp->c_cpentry; |
970 | if (entry == NULL) { | |
971 | error = EINVAL; | |
972 | goto out; | |
973 | } | |
974 | ||
39236c6e | 975 | /* Send the per-file key in wrapped form for re-wrap with the current class information |
316670eb A |
976 | * Send NULLs in the output parameters of the wrapper() and AKS will do the rest. |
977 | * Don't need to process any outputs, so just clear the locks and pass along the error. */ | |
978 | if (vnode_isreg(vp)) { | |
979 | ||
980 | /* Picked up the following from cp_wrap(). | |
981 | * If needed, more comments available there. */ | |
982 | ||
fe8ab488 | 983 | if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) { |
316670eb A |
984 | error = EINVAL; |
985 | goto out; | |
986 | } | |
987 | ||
39236c6e A |
988 | cp_init_access(&access_in, cp); |
989 | ||
990 | bzero(&wrapped_key_in, sizeof(wrapped_key_in)); | |
fe8ab488 | 991 | bzero(&wrapped_key_out, sizeof(wrapped_key_out)); |
3e170ce0 A |
992 | |
993 | cp_key_pair_t *cpkp = &entry->cp_keys; | |
994 | ||
995 | ||
996 | wrapped_key_in.key = cpkp_pers_key(cpkp); | |
997 | wrapped_key_in.key_len = cpkp_pers_key_len(cpkp); | |
998 | ||
999 | if (!wrapped_key_in.key_len) { | |
1000 | error = EINVAL; | |
1001 | goto out; | |
1002 | } | |
1003 | ||
fe8ab488 | 1004 | /* Use the actual persistent class when talking to AKS */ |
39236c6e | 1005 | wrapped_key_in.dp_class = entry->cp_pclass; |
fe8ab488 A |
1006 | wrapped_key_out.key = key; |
1007 | wrapped_key_out.key_len = *len; | |
39236c6e | 1008 | |
fe8ab488 | 1009 | error = g_cp_wrap_func.backup_key(&access_in, |
39236c6e | 1010 | &wrapped_key_in, |
fe8ab488 | 1011 | &wrapped_key_out); |
316670eb A |
1012 | |
1013 | if(error) | |
1014 | error = EPERM; | |
fe8ab488 A |
1015 | else |
1016 | *len = wrapped_key_out.key_len; | |
6d2010ae A |
1017 | } |
1018 | ||
1019 | out: | |
316670eb | 1020 | if (took_truncate_lock) { |
39236c6e | 1021 | hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT); |
316670eb A |
1022 | } |
1023 | hfs_unlock(cp); | |
6d2010ae A |
1024 | return error; |
1025 | } | |
1026 | ||
316670eb | 1027 | |
6d2010ae | 1028 | /* |
316670eb | 1029 | * Check permission for the given operation (read, write) on this node. |
6d2010ae A |
1030 | * Additionally, if the node needs work, do it: |
1031 | * - create a new key for the file if one hasn't been set before | |
1032 | * - write out the xattr if it hasn't already been saved | |
1033 | * - unwrap the key if needed | |
1034 | * | |
1035 | * Takes cnode lock, and upgrades to exclusive if modifying cprotect. | |
316670eb | 1036 | * |
39236c6e | 1037 | * Note that this function does *NOT* take the cnode truncate lock. This is because |
316670eb A |
1038 | * the thread calling us may already have the truncate lock. It is not necessary |
1039 | * because either we successfully finish this function before the keys are tossed | |
39236c6e | 1040 | * and the IO will fail, or the keys are tossed and then this function will fail. |
316670eb | 1041 | * Either way, the cnode lock still ultimately guards the keys. We only rely on the |
39236c6e | 1042 | * truncate lock to protect us against tossing the keys as a cluster call is in-flight. |
6d2010ae | 1043 | */ |
316670eb A |
1044 | int |
1045 | cp_handle_vnop(struct vnode *vp, int vnop, int ioflag) | |
6d2010ae A |
1046 | { |
1047 | struct cprotect *entry; | |
1048 | int error = 0; | |
316670eb A |
1049 | struct hfsmount *hfsmp = NULL; |
1050 | struct cnode *cp = NULL; | |
6d2010ae | 1051 | |
39236c6e | 1052 | /* |
316670eb A |
1053 | * First, do validation against the vnode before proceeding any further: |
1054 | * Is this vnode originating from a valid content-protected filesystem ? | |
1055 | */ | |
1056 | if (cp_vnode_is_eligible(vp) == 0) { | |
39236c6e | 1057 | /* |
316670eb A |
1058 | * It is either not HFS or not a file/dir. Just return success. This is a valid |
1059 | * case if servicing i/o against another filesystem type from VFS | |
1060 | */ | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | if (cp_fs_protected (VTOVFS(vp)) == 0) { | |
1065 | /* | |
39236c6e | 1066 | * The underlying filesystem does not support content protection. This is also |
316670eb A |
1067 | * a valid case. Simply return success. |
1068 | */ | |
1069 | return 0; | |
1070 | } | |
39236c6e A |
1071 | |
1072 | /* | |
316670eb A |
1073 | * At this point, we know we have a HFS vnode that backs a file or directory on a |
1074 | * filesystem that supports content protection | |
1075 | */ | |
1076 | cp = VTOC(vp); | |
1077 | ||
39236c6e | 1078 | if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) { |
6d2010ae A |
1079 | return error; |
1080 | } | |
1081 | ||
316670eb | 1082 | entry = cp->c_cpentry; |
39236c6e A |
1083 | |
1084 | if (entry == NULL) { | |
316670eb A |
1085 | /* |
1086 | * If this cnode is not content protected, simply return success. | |
39236c6e | 1087 | * Note that this function is called by all I/O-based call sites |
316670eb A |
1088 | * when CONFIG_PROTECT is enabled during XNU building. |
1089 | */ | |
6d2010ae | 1090 | |
39236c6e A |
1091 | /* |
1092 | * All files should have cprotect structs. It's possible to encounter | |
1093 | * a directory from a V2.0 CP system but all files should have protection | |
1094 | * EAs | |
1095 | */ | |
1096 | if (vnode_isreg(vp)) { | |
1097 | error = EPERM; | |
1098 | } | |
1099 | ||
6d2010ae A |
1100 | goto out; |
1101 | } | |
1102 | ||
316670eb A |
1103 | vp = CTOV(cp, 0); |
1104 | if (vp == NULL) { | |
1105 | /* is it a rsrc */ | |
1106 | vp = CTOV(cp,1); | |
1107 | if (vp == NULL) { | |
1108 | error = EINVAL; | |
1109 | goto out; | |
1110 | } | |
1111 | } | |
1112 | hfsmp = VTOHFS(vp); | |
1113 | ||
fe8ab488 | 1114 | if ((error = cp_check_access(cp, hfsmp, vnop))) { |
316670eb | 1115 | /* check for raw encrypted access before bailing out */ |
3e170ce0 A |
1116 | if ((ioflag & IO_ENCRYPTED) |
1117 | && (vnop == CP_READ_ACCESS)) { | |
39236c6e A |
1118 | /* |
1119 | * read access only + asking for the raw encrypted bytes | |
316670eb A |
1120 | * is legitimate, so reset the error value to 0 |
1121 | */ | |
1122 | error = 0; | |
1123 | } | |
1124 | else { | |
1125 | goto out; | |
1126 | } | |
1127 | } | |
1128 | ||
3e170ce0 A |
1129 | if (!ISSET(entry->cp_flags, CP_NO_XATTR)) { |
1130 | if (!S_ISREG(cp->c_mode)) | |
1131 | goto out; | |
1132 | ||
1133 | // If we have a persistent key and the cached key, we're done | |
1134 | if (!cp_needs_pers_key(entry) | |
1135 | && cpx_has_key(cpkp_cpx(&entry->cp_keys))) { | |
1136 | goto out; | |
1137 | } | |
6d2010ae A |
1138 | } |
1139 | ||
1140 | /* upgrade to exclusive lock */ | |
316670eb | 1141 | if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) { |
39236c6e | 1142 | if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { |
6d2010ae A |
1143 | return error; |
1144 | } | |
1145 | } else { | |
316670eb | 1146 | cp->c_lockowner = current_thread(); |
6d2010ae | 1147 | } |
39236c6e | 1148 | |
6d2010ae | 1149 | /* generate new keys if none have ever been saved */ |
3e170ce0 | 1150 | if (cp_needs_pers_key(entry)) { |
39236c6e | 1151 | struct cprotect *newentry = NULL; |
fe8ab488 A |
1152 | /* |
1153 | * It's ok if this ends up being wrapped in a different class than 'pclass'. | |
1154 | * class modification is OK here. | |
1155 | */ | |
1156 | uint32_t flags = CP_KEYWRAP_DIFFCLASS; | |
1157 | ||
1158 | error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry); | |
39236c6e | 1159 | if (error == 0) { |
3e170ce0 | 1160 | cp_replace_entry (hfsmp, cp, newentry); |
39236c6e A |
1161 | entry = newentry; |
1162 | } | |
1163 | else { | |
1164 | goto out; | |
1165 | } | |
316670eb A |
1166 | } |
1167 | ||
1168 | /* unwrap keys if needed */ | |
3e170ce0 | 1169 | if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) { |
316670eb A |
1170 | if ((vnop == CP_READ_ACCESS) && (ioflag & IO_ENCRYPTED)) { |
1171 | /* no need to try to restore keys; they are not going to be used */ | |
1172 | error = 0; | |
1173 | } | |
1174 | else { | |
39236c6e | 1175 | error = cp_restore_keys(entry, hfsmp, cp); |
316670eb A |
1176 | if (error) { |
1177 | goto out; | |
1178 | } | |
6d2010ae A |
1179 | } |
1180 | } | |
1181 | ||
316670eb A |
1182 | /* write out the xattr if it's new */ |
1183 | if (entry->cp_flags & CP_NO_XATTR) | |
1184 | error = cp_setxattr(cp, entry, VTOHFS(cp->c_vp), 0, XATTR_CREATE); | |
1185 | ||
1186 | out: | |
1187 | ||
1188 | hfs_unlock(cp); | |
1189 | return error; | |
1190 | } | |
1191 | ||
3e170ce0 A |
1192 | #if HFS_TMPDBG |
1193 | #if !SECURE_KERNEL | |
1194 | static void cp_log_eperm (struct vnode* vp, int pclass, boolean_t create) { | |
1195 | char procname[256] = {}; | |
1196 | const char *fname = "unknown"; | |
1197 | const char *dbgop = "open"; | |
1198 | ||
1199 | int ppid = proc_selfpid(); | |
1200 | /* selfname does a strlcpy so we're OK */ | |
1201 | proc_selfname(procname, sizeof(procname)); | |
1202 | if (vp && vp->v_name) { | |
1203 | /* steal from the namecache */ | |
1204 | fname = vp->v_name; | |
1205 | } | |
1206 | ||
1207 | if (create) { | |
1208 | dbgop = "create"; | |
1209 | } | |
1210 | ||
1211 | printf("proc %s (pid %d) class %d, op: %s failure @ file %s\n", procname, ppid, pclass, dbgop, fname); | |
1212 | } | |
1213 | #endif | |
1214 | #endif | |
1215 | ||
316670eb A |
1216 | |
1217 | int | |
1218 | cp_handle_open(struct vnode *vp, int mode) | |
1219 | { | |
1220 | struct cnode *cp = NULL ; | |
1221 | struct cprotect *entry = NULL; | |
39236c6e | 1222 | struct hfsmount *hfsmp; |
316670eb | 1223 | int error = 0; |
39236c6e | 1224 | |
316670eb A |
1225 | /* If vnode not eligible, just return success */ |
1226 | if (!cp_vnode_is_eligible(vp)) { | |
1227 | return 0; | |
1228 | } | |
39236c6e | 1229 | |
316670eb A |
1230 | /* If mount point not properly set up, then also return success */ |
1231 | if (!cp_fs_protected(VTOVFS(vp))) { | |
1232 | return 0; | |
1233 | } | |
1234 | ||
316670eb | 1235 | cp = VTOC(vp); |
3e170ce0 A |
1236 | |
1237 | // Allow if raw encrypted mode requested | |
1238 | if (ISSET(mode, FENCRYPTED)) { | |
1239 | return 0; | |
1240 | } | |
1241 | if (ISSET(mode, FUNENCRYPTED)) { | |
1242 | return 0; | |
1243 | } | |
1244 | ||
1245 | /* We know the vnode is in a valid state. Acquire cnode and validate */ | |
39236c6e | 1246 | hfsmp = VTOHFS(vp); |
316670eb | 1247 | |
39236c6e | 1248 | if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) { |
316670eb | 1249 | return error; |
6d2010ae A |
1250 | } |
1251 | ||
316670eb | 1252 | entry = cp->c_cpentry; |
39236c6e A |
1253 | if (entry == NULL) { |
1254 | /* | |
1255 | * If the mount is protected and we couldn't get a cprotect for this vnode, | |
1256 | * then it's not valid for opening. | |
1257 | */ | |
1258 | if (vnode_isreg(vp)) { | |
1259 | error = EPERM; | |
1260 | } | |
316670eb | 1261 | goto out; |
39236c6e | 1262 | } |
316670eb A |
1263 | |
1264 | if (!S_ISREG(cp->c_mode)) | |
1265 | goto out; | |
1266 | ||
39236c6e A |
1267 | /* |
1268 | * Does the cnode have keys yet? If not, then generate them. | |
1269 | */ | |
3e170ce0 | 1270 | if (cp_needs_pers_key(entry)) { |
39236c6e | 1271 | struct cprotect *newentry = NULL; |
fe8ab488 A |
1272 | /* Allow the keybag to override our class preferences */ |
1273 | uint32_t flags = CP_KEYWRAP_DIFFCLASS; | |
1274 | error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), flags, &newentry); | |
39236c6e | 1275 | if (error == 0) { |
3e170ce0 | 1276 | cp_replace_entry (hfsmp, cp, newentry); |
39236c6e A |
1277 | entry = newentry; |
1278 | } | |
1279 | else { | |
1280 | goto out; | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | /* | |
1285 | * We want to minimize the number of unwraps that we'll have to do since | |
1286 | * the cost can vary, depending on the platform we're running. | |
1287 | */ | |
fe8ab488 | 1288 | switch (CP_CLASS(entry->cp_pclass)) { |
316670eb | 1289 | case PROTECTION_CLASS_B: |
39236c6e A |
1290 | if (mode & O_CREAT) { |
1291 | /* | |
1292 | * Class B always allows creation. Since O_CREAT was passed through | |
1293 | * we infer that this was a newly created vnode/cnode. Even though a potential | |
1294 | * race exists when multiple threads attempt to create/open a particular | |
1295 | * file, only one can "win" and actually create it. VFS will unset the | |
1296 | * O_CREAT bit on the loser. | |
1297 | * | |
1298 | * Note that skipping the unwrap check here is not a security issue -- | |
1299 | * we have to unwrap the key permanently upon the first I/O. | |
1300 | */ | |
1301 | break; | |
1302 | } | |
1303 | ||
3e170ce0 | 1304 | if (cpx_has_key(cpkp_cpx(&entry->cp_keys)) && !ISSET(mode, FENCRYPTED)) { |
39236c6e A |
1305 | /* |
1306 | * For a class B file, attempt the unwrap if we have the key in | |
1307 | * core already. | |
1308 | * The device could have just transitioned into the lock state, and | |
1309 | * this vnode may not yet have been purged from the vnode cache (which would | |
1310 | * remove the keys). | |
1311 | */ | |
1312 | cp_cred_s access_in; | |
1313 | cp_wrapped_key_s wrapped_key_in; | |
1314 | ||
1315 | cp_init_access(&access_in, cp); | |
1316 | bzero(&wrapped_key_in, sizeof(wrapped_key_in)); | |
3e170ce0 A |
1317 | wrapped_key_in.key = cpkp_pers_key(&entry->cp_keys); |
1318 | wrapped_key_in.key_len = cpkp_pers_key_len(&entry->cp_keys); | |
fe8ab488 | 1319 | /* Use the persistent class when talking to AKS */ |
39236c6e A |
1320 | wrapped_key_in.dp_class = entry->cp_pclass; |
1321 | error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, NULL); | |
1322 | if (error) { | |
1323 | error = EPERM; | |
1324 | } | |
1325 | break; | |
1326 | } | |
1327 | /* otherwise, fall through to attempt the unwrap/restore */ | |
316670eb | 1328 | case PROTECTION_CLASS_A: |
39236c6e A |
1329 | case PROTECTION_CLASS_C: |
1330 | /* | |
1331 | * At this point, we know that we need to attempt an unwrap if needed; we want | |
1332 | * to makes sure that open(2) fails properly if the device is either just-locked | |
1333 | * or never made it past first unlock. Since the keybag serializes access to the | |
1334 | * unwrapping keys for us and only calls our VFS callback once they've been purged, | |
1335 | * we will get here in two cases: | |
1336 | * | |
1337 | * A) we're in a window before the wrapping keys are purged; this is OK since when they get | |
1338 | * purged, the vnode will get flushed if needed. | |
1339 | * | |
1340 | * B) The keys are already gone. In this case, the restore_keys call below will fail. | |
1341 | * | |
1342 | * Since this function is bypassed entirely if we're opening a raw encrypted file, | |
1343 | * we can always attempt the restore. | |
1344 | */ | |
3e170ce0 | 1345 | if (!cpx_has_key(cpkp_cpx(&entry->cp_keys))) { |
39236c6e A |
1346 | error = cp_restore_keys(entry, hfsmp, cp); |
1347 | } | |
1348 | ||
1349 | if (error) { | |
316670eb | 1350 | error = EPERM; |
39236c6e A |
1351 | } |
1352 | ||
316670eb | 1353 | break; |
39236c6e A |
1354 | |
1355 | case PROTECTION_CLASS_D: | |
316670eb A |
1356 | default: |
1357 | break; | |
6d2010ae A |
1358 | } |
1359 | ||
1360 | out: | |
6d2010ae | 1361 | |
3e170ce0 A |
1362 | #if HFS_TMPDBG |
1363 | #if !SECURE_KERNEL | |
1364 | if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) { | |
1365 | cp_log_eperm (vp, CP_CLASS(entry->cp_pclass), false); | |
39236c6e | 1366 | } |
3e170ce0 A |
1367 | #endif |
1368 | #endif | |
6d2010ae | 1369 | |
3e170ce0 | 1370 | hfs_unlock(cp); |
6d2010ae A |
1371 | return error; |
1372 | } | |
1373 | ||
3e170ce0 | 1374 | |
6d2010ae A |
1375 | /* |
1376 | * cp_getrootxattr: | |
1377 | * Gets the EA we set on the root folder (fileid 1) to get information about the | |
1378 | * version of Content Protection that was used to write to this filesystem. | |
1379 | * Note that all multi-byte fields are written to disk little endian so they must be | |
1380 | * converted to native endian-ness as needed. | |
1381 | */ | |
39236c6e A |
1382 | int |
1383 | cp_getrootxattr(struct hfsmount* hfsmp, struct cp_root_xattr *outxattr) | |
1384 | { | |
6d2010ae A |
1385 | uio_t auio; |
1386 | char uio_buf[UIO_SIZEOF(1)]; | |
3e170ce0 A |
1387 | void *buf; |
1388 | ||
1389 | /* | |
1390 | * We allow for an extra 64 bytes to cater for upgrades. This wouldn't | |
1391 | * be necessary if the xattr routines just returned what we asked for. | |
1392 | */ | |
1393 | size_t attrsize = roundup(sizeof(struct cp_root_xattr) + 64, 64); | |
1394 | ||
6d2010ae A |
1395 | int error = 0; |
1396 | struct vnop_getxattr_args args; | |
1397 | ||
1398 | if (!outxattr) { | |
316670eb | 1399 | panic("Content Protection: cp_xattr called with xattr == NULL"); |
6d2010ae A |
1400 | } |
1401 | ||
3e170ce0 A |
1402 | MALLOC(buf, void *, attrsize, M_TEMP, M_WAITOK); |
1403 | ||
6d2010ae | 1404 | auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); |
3e170ce0 | 1405 | uio_addiov(auio, CAST_USER_ADDR_T(buf), attrsize); |
6d2010ae A |
1406 | |
1407 | args.a_desc = NULL; // unused | |
1408 | args.a_vp = NULL; //unused since we're writing EA to root folder. | |
1409 | args.a_name = CONTENT_PROTECTION_XATTR_NAME; | |
1410 | args.a_uio = auio; | |
1411 | args.a_size = &attrsize; | |
1412 | args.a_options = XATTR_REPLACE; | |
1413 | args.a_context = NULL; // unused | |
1414 | ||
1415 | error = hfs_getxattr_internal(NULL, &args, hfsmp, 1); | |
1416 | ||
39236c6e | 1417 | if (error != 0) { |
6d2010ae A |
1418 | goto out; |
1419 | } | |
1420 | ||
3e170ce0 A |
1421 | if (attrsize < CP_ROOT_XATTR_MIN_LEN) { |
1422 | error = HFS_EINCONSISTENT; | |
1423 | goto out; | |
1424 | } | |
1425 | ||
1426 | const struct cp_root_xattr *xattr = buf; | |
1427 | ||
1428 | bzero(outxattr, sizeof(*outxattr)); | |
1429 | ||
1430 | /* Now convert the multi-byte fields to native endianness */ | |
1431 | outxattr->major_version = OSSwapLittleToHostInt16(xattr->major_version); | |
1432 | outxattr->minor_version = OSSwapLittleToHostInt16(xattr->minor_version); | |
1433 | outxattr->flags = OSSwapLittleToHostInt64(xattr->flags); | |
1434 | ||
1435 | if (outxattr->major_version >= CP_VERS_5) { | |
1436 | if (attrsize < sizeof(struct cp_root_xattr)) { | |
1437 | error = HFS_EINCONSISTENT; | |
1438 | goto out; | |
1439 | } | |
1440 | } | |
1441 | ||
6d2010ae A |
1442 | out: |
1443 | uio_free(auio); | |
3e170ce0 | 1444 | FREE(buf, M_TEMP); |
6d2010ae A |
1445 | return error; |
1446 | } | |
1447 | ||
1448 | /* | |
1449 | * cp_setrootxattr: | |
1450 | * Sets the EA we set on the root folder (fileid 1) to get information about the | |
1451 | * version of Content Protection that was used to write to this filesystem. | |
1452 | * Note that all multi-byte fields are written to disk little endian so they must be | |
1453 | * converted to little endian as needed. | |
1454 | * | |
1455 | * This will be written to the disk when it detects the EA is not there, or when we need | |
1456 | * to make a modification to the on-disk version that can be done in-place. | |
1457 | */ | |
316670eb | 1458 | int |
6d2010ae A |
1459 | cp_setrootxattr(struct hfsmount *hfsmp, struct cp_root_xattr *newxattr) |
1460 | { | |
1461 | int error = 0; | |
1462 | struct vnop_setxattr_args args; | |
1463 | ||
1464 | args.a_desc = NULL; | |
1465 | args.a_vp = NULL; | |
1466 | args.a_name = CONTENT_PROTECTION_XATTR_NAME; | |
1467 | args.a_uio = NULL; //pass data ptr instead | |
39236c6e | 1468 | args.a_options = 0; |
6d2010ae A |
1469 | args.a_context = NULL; //no context needed, only done from mount. |
1470 | ||
3e170ce0 A |
1471 | const uint32_t flags = newxattr->flags; |
1472 | ||
6d2010ae | 1473 | /* Now convert the multi-byte fields to little endian before writing to disk. */ |
3e170ce0 A |
1474 | newxattr->flags = OSSwapHostToLittleInt64(newxattr->flags); |
1475 | ||
1476 | int xattr_size = sizeof(struct cp_root_xattr); | |
1477 | ||
1478 | ||
6d2010ae A |
1479 | newxattr->major_version = OSSwapHostToLittleInt16(newxattr->major_version); |
1480 | newxattr->minor_version = OSSwapHostToLittleInt16(newxattr->minor_version); | |
6d2010ae | 1481 | |
39236c6e | 1482 | error = hfs_setxattr_internal(NULL, (caddr_t)newxattr, |
3e170ce0 A |
1483 | xattr_size, &args, hfsmp, 1); |
1484 | ||
1485 | if (!error) { | |
1486 | hfsmp->cproot_flags = flags; | |
1487 | } | |
1488 | ||
6d2010ae A |
1489 | return error; |
1490 | } | |
1491 | ||
1492 | ||
316670eb A |
1493 | /* |
1494 | * Stores new xattr data on the cnode. | |
1495 | * cnode lock held exclusive (if available). | |
1496 | * | |
1497 | * This function is also invoked during file creation. | |
1498 | */ | |
3e170ce0 A |
1499 | int cp_setxattr(struct cnode *cp, struct cprotect *entry, struct hfsmount *hfsmp, |
1500 | uint32_t fileid, int options) | |
316670eb A |
1501 | { |
1502 | int error = 0; | |
3e170ce0 | 1503 | cp_key_pair_t *cpkp = &entry->cp_keys; |
316670eb | 1504 | |
39236c6e A |
1505 | if (hfsmp->hfs_flags & HFS_READ_ONLY) { |
1506 | return EROFS; | |
1507 | } | |
316670eb | 1508 | |
3e170ce0 A |
1509 | if (hfsmp->hfs_running_cp_major_vers < CP_CURRENT_VERS) { |
1510 | // Upgrade | |
1511 | printf("hfs: upgrading to cp version %u\n", CP_CURRENT_VERS); | |
fe8ab488 | 1512 | |
3e170ce0 | 1513 | struct cp_root_xattr root_xattr; |
316670eb | 1514 | |
3e170ce0 A |
1515 | error = cp_getrootxattr(hfsmp, &root_xattr); |
1516 | if (error) | |
1517 | return error; | |
39236c6e | 1518 | |
3e170ce0 A |
1519 | root_xattr.major_version = CP_CURRENT_VERS; |
1520 | root_xattr.minor_version = CP_MINOR_VERS; | |
39236c6e | 1521 | |
3e170ce0 A |
1522 | error = cp_setrootxattr(hfsmp, &root_xattr); |
1523 | if (error) | |
1524 | return error; | |
316670eb | 1525 | |
3e170ce0 A |
1526 | hfsmp->hfs_running_cp_major_vers = CP_CURRENT_VERS; |
1527 | } | |
39236c6e | 1528 | |
3e170ce0 A |
1529 | struct cp_xattr_v5 *xattr; |
1530 | MALLOC(xattr, struct cp_xattr_v5 *, sizeof(*xattr), M_TEMP, M_WAITOK); | |
39236c6e | 1531 | |
3e170ce0 A |
1532 | xattr->xattr_major_version = OSSwapHostToLittleConstInt16(CP_VERS_5); |
1533 | xattr->xattr_minor_version = OSSwapHostToLittleConstInt16(CP_MINOR_VERS); | |
1534 | xattr->flags = 0; | |
1535 | xattr->persistent_class = OSSwapHostToLittleInt32(entry->cp_pclass); | |
1536 | xattr->key_os_version = OSSwapHostToLittleInt32(entry->cp_key_os_version); | |
1537 | xattr->key_revision = OSSwapHostToLittleInt16(entry->cp_key_revision); | |
39236c6e | 1538 | |
3e170ce0 A |
1539 | uint16_t key_len = cpkp_pers_key_len(cpkp); |
1540 | xattr->key_len = OSSwapHostToLittleInt16(key_len); | |
1541 | memcpy(xattr->persistent_key, cpkp_pers_key(cpkp), key_len); | |
316670eb | 1542 | |
3e170ce0 A |
1543 | size_t xattr_len = offsetof(struct cp_xattr_v5, persistent_key) + key_len; |
1544 | ||
1545 | ||
1546 | struct vnop_setxattr_args args = { | |
1547 | .a_vp = cp ? cp->c_vp : NULL, | |
1548 | .a_name = CONTENT_PROTECTION_XATTR_NAME, | |
1549 | .a_options = options, | |
1550 | .a_context = vfs_context_current(), | |
1551 | }; | |
1552 | ||
1553 | error = hfs_setxattr_internal(cp, xattr, xattr_len, &args, hfsmp, fileid); | |
1554 | ||
1555 | FREE(xattr, M_TEMP); | |
39236c6e | 1556 | |
316670eb A |
1557 | if (error == 0 ) { |
1558 | entry->cp_flags &= ~CP_NO_XATTR; | |
1559 | } | |
1560 | ||
1561 | return error; | |
316670eb A |
1562 | } |
1563 | ||
316670eb A |
1564 | /* |
1565 | * Used by an fcntl to query the underlying FS for its content protection version # | |
1566 | */ | |
1567 | ||
39236c6e A |
1568 | int |
1569 | cp_get_root_major_vers(vnode_t vp, uint32_t *level) | |
1570 | { | |
316670eb A |
1571 | int err = 0; |
1572 | struct hfsmount *hfsmp = NULL; | |
1573 | struct mount *mp = NULL; | |
1574 | ||
1575 | mp = VTOVFS(vp); | |
1576 | ||
1577 | /* check if it supports content protection */ | |
1578 | if (cp_fs_protected(mp) == 0) { | |
39236c6e | 1579 | return ENOTSUP; |
316670eb A |
1580 | } |
1581 | ||
1582 | hfsmp = VFSTOHFS(mp); | |
1583 | /* figure out the level */ | |
1584 | ||
1585 | err = cp_root_major_vers(mp); | |
1586 | ||
1587 | if (err == 0) { | |
1588 | *level = hfsmp->hfs_running_cp_major_vers; | |
1589 | } | |
1590 | /* in error case, cp_root_major_vers will just return EINVAL. Use that */ | |
6d2010ae | 1591 | |
316670eb A |
1592 | return err; |
1593 | } | |
6d2010ae | 1594 | |
39236c6e A |
1595 | /* Used by fcntl to query default protection level of FS */ |
1596 | int cp_get_default_level (struct vnode *vp, uint32_t *level) { | |
1597 | int err = 0; | |
1598 | struct hfsmount *hfsmp = NULL; | |
1599 | struct mount *mp = NULL; | |
1600 | ||
1601 | mp = VTOVFS(vp); | |
1602 | ||
1603 | /* check if it supports content protection */ | |
1604 | if (cp_fs_protected(mp) == 0) { | |
1605 | return ENOTSUP; | |
1606 | } | |
1607 | ||
1608 | hfsmp = VFSTOHFS(mp); | |
1609 | /* figure out the default */ | |
1610 | ||
1611 | *level = hfsmp->default_cp_class; | |
1612 | return err; | |
1613 | } | |
1614 | ||
6d2010ae A |
1615 | /******************** |
1616 | * Private Functions | |
1617 | *******************/ | |
1618 | ||
1619 | static int | |
316670eb A |
1620 | cp_root_major_vers(mount_t mp) |
1621 | { | |
1622 | int err = 0; | |
1623 | struct cp_root_xattr xattr; | |
1624 | struct hfsmount *hfsmp = NULL; | |
1625 | ||
1626 | hfsmp = vfs_fsprivate(mp); | |
1627 | err = cp_getrootxattr (hfsmp, &xattr); | |
1628 | ||
1629 | if (err == 0) { | |
39236c6e | 1630 | hfsmp->hfs_running_cp_major_vers = xattr.major_version; |
316670eb A |
1631 | } |
1632 | else { | |
1633 | return EINVAL; | |
1634 | } | |
1635 | ||
1636 | return 0; | |
1637 | } | |
1638 | ||
1639 | static int | |
1640 | cp_vnode_is_eligible(struct vnode *vp) | |
6d2010ae A |
1641 | { |
1642 | return ((vp->v_op == hfs_vnodeop_p) && | |
1643 | (!vnode_issystem(vp)) && | |
316670eb | 1644 | (vnode_isreg(vp) || vnode_isdir(vp))); |
6d2010ae A |
1645 | } |
1646 | ||
1647 | ||
1648 | ||
39236c6e A |
1649 | int |
1650 | cp_is_valid_class(int isdir, int32_t protectionclass) | |
6d2010ae | 1651 | { |
39236c6e A |
1652 | /* |
1653 | * The valid protection classes are from 0 -> N | |
1654 | * We use a signed argument to detect unassigned values from | |
1655 | * directory entry creation time in HFS. | |
1656 | */ | |
1657 | if (isdir) { | |
1658 | /* Directories are not allowed to have F, but they can have "NONE" */ | |
1659 | return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) && | |
1660 | (protectionclass <= PROTECTION_CLASS_D)); | |
1661 | } | |
1662 | else { | |
1663 | return ((protectionclass >= PROTECTION_CLASS_A) && | |
1664 | (protectionclass <= PROTECTION_CLASS_F)); | |
1665 | } | |
6d2010ae A |
1666 | } |
1667 | ||
3e170ce0 A |
1668 | #if DEBUG |
1669 | static const uint32_t cp_magic1 = 0x7b727063; // cpr{ | |
1670 | static const uint32_t cp_magic2 = 0x7270637d; // }cpr | |
1671 | #endif | |
6d2010ae | 1672 | |
3e170ce0 A |
1673 | struct cprotect * |
1674 | cp_entry_alloc(cprotect_t old, uint16_t pers_key_len, | |
1675 | uint16_t cached_key_len, cp_key_pair_t **pcpkp) | |
6d2010ae A |
1676 | { |
1677 | struct cprotect *cp_entry; | |
316670eb | 1678 | |
3e170ce0 | 1679 | if (pers_key_len > CP_MAX_WRAPPEDKEYSIZE) |
316670eb | 1680 | return (NULL); |
39236c6e | 1681 | |
3e170ce0 A |
1682 | size_t size = (sizeof(struct cprotect) - sizeof(cp_key_pair_t) |
1683 | + cpkp_size(pers_key_len, cached_key_len)); | |
1684 | ||
1685 | #if DEBUG | |
1686 | size += 4; // Extra for magic2 | |
1687 | #endif | |
316670eb | 1688 | |
3e170ce0 A |
1689 | MALLOC(cp_entry, struct cprotect *, size, M_TEMP, M_WAITOK); |
1690 | ||
1691 | if (old) { | |
1692 | memcpy(cp_entry, old, offsetof(struct cprotect, cp_keys)); | |
1693 | ||
1694 | } else { | |
1695 | bzero(cp_entry, offsetof(struct cprotect, cp_keys)); | |
1696 | } | |
1697 | ||
1698 | #if DEBUG | |
1699 | cp_entry->cp_magic1 = cp_magic1; | |
1700 | *PTR_ADD(uint32_t *, cp_entry, size - 4) = cp_magic2; | |
1701 | #endif | |
1702 | ||
1703 | cpkp_init(&cp_entry->cp_keys, pers_key_len, cached_key_len); | |
1704 | ||
1705 | /* | |
1706 | * If we've been passed the old entry, then we are in the process of | |
1707 | * rewrapping in which case we need to copy the cached key. This is | |
1708 | * important for class B files when the device is locked because we | |
1709 | * won't be able to unwrap whilst in this state, yet we still need the | |
1710 | * unwrapped key. | |
1711 | */ | |
1712 | if (old) | |
1713 | cpx_copy(cpkp_cpx(&old->cp_keys), cpkp_cpx(&cp_entry->cp_keys)); | |
1714 | ||
1715 | if (pcpkp) | |
1716 | *pcpkp = &cp_entry->cp_keys; | |
1717 | ||
1718 | return cp_entry; | |
6d2010ae A |
1719 | } |
1720 | ||
316670eb | 1721 | static void |
3e170ce0 | 1722 | cp_entry_dealloc(__unused hfsmount_t *hfsmp, struct cprotect *entry) |
316670eb | 1723 | { |
3e170ce0 A |
1724 | |
1725 | cpkp_flush(&entry->cp_keys); | |
1726 | ||
1727 | #if DEBUG | |
1728 | assert(entry->cp_magic1 == cp_magic1); | |
1729 | assert(*PTR_ADD(uint32_t *, entry, (sizeof(struct cprotect) - sizeof(cp_key_pair_t) | |
1730 | + cpkp_sizex(&entry->cp_keys) == cp_magic2))); | |
1731 | #endif | |
1732 | ||
39236c6e | 1733 | FREE(entry, M_TEMP); |
316670eb A |
1734 | } |
1735 | ||
3e170ce0 A |
1736 | static int cp_read_xattr_v4(__unused hfsmount_t *hfsmp, struct cp_xattr_v4 *xattr, |
1737 | size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options) | |
6d2010ae | 1738 | { |
3e170ce0 A |
1739 | /* Endian swap the multi-byte fields into host endianness from L.E. */ |
1740 | xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version); | |
1741 | xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version); | |
1742 | xattr->key_size = OSSwapLittleToHostInt32(xattr->key_size); | |
1743 | xattr->flags = OSSwapLittleToHostInt32(xattr->flags); | |
1744 | xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class); | |
1745 | xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version); | |
316670eb | 1746 | |
3e170ce0 A |
1747 | /* |
1748 | * Prevent a buffer overflow, and validate the key length obtained from the | |
1749 | * EA. If it's too big, then bail out, because the EA can't be trusted at this | |
1750 | * point. | |
1751 | */ | |
1752 | if (xattr->key_size > CP_MAX_WRAPPEDKEYSIZE) | |
1753 | return HFS_EINCONSISTENT; | |
6d2010ae | 1754 | |
3e170ce0 A |
1755 | size_t min_len = offsetof(struct cp_xattr_v4, persistent_key) + xattr->key_size; |
1756 | if (xattr_len < min_len) | |
1757 | return HFS_EINCONSISTENT; | |
316670eb | 1758 | |
3e170ce0 A |
1759 | /* |
1760 | * Class F files have no backing key; their keylength should be 0, | |
1761 | * though they should have the proper flags set. | |
1762 | * | |
1763 | * A request to instantiate a CP for a class F file should result | |
1764 | * in a bzero'd cp that just says class F, with key_flushed set. | |
1765 | */ | |
1766 | if (CP_CLASS(xattr->persistent_class) == PROTECTION_CLASS_F | |
1767 | || ISSET(xattr->flags, CP_XAF_NEEDS_KEYS)) { | |
1768 | xattr->key_size = 0; | |
1769 | } | |
316670eb | 1770 | |
3e170ce0 A |
1771 | /* set up entry with information from xattr */ |
1772 | cp_key_pair_t *cpkp; | |
1773 | cprotect_t entry; | |
1774 | ||
1775 | if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) { | |
1776 | /* caller passed in a pre-allocated structure to get the basic info */ | |
1777 | entry = *pcpr; | |
1778 | bzero(entry, offsetof(struct cprotect, cp_keys)); | |
1779 | } | |
1780 | else { | |
1781 | entry = cp_entry_alloc(NULL, xattr->key_size, CP_MAX_CACHEBUFLEN, &cpkp); | |
1782 | } | |
39236c6e | 1783 | |
3e170ce0 A |
1784 | entry->cp_pclass = xattr->persistent_class; |
1785 | entry->cp_key_os_version = xattr->key_os_version; | |
39236c6e | 1786 | |
316670eb | 1787 | |
3e170ce0 A |
1788 | if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) { |
1789 | if (xattr->key_size) { | |
1790 | cpkp_set_pers_key_len(cpkp, xattr->key_size); | |
1791 | memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_size); | |
1792 | } | |
316670eb | 1793 | |
3e170ce0 A |
1794 | *pcpr = entry; |
1795 | } | |
1796 | else if (xattr->key_size) { | |
1797 | SET(entry->cp_flags, CP_HAS_A_KEY); | |
1798 | } | |
316670eb | 1799 | |
3e170ce0 A |
1800 | return 0; |
1801 | } | |
39236c6e | 1802 | |
3e170ce0 A |
1803 | int cp_read_xattr_v5(hfsmount_t *hfsmp, struct cp_xattr_v5 *xattr, |
1804 | size_t xattr_len, cprotect_t *pcpr, cp_getxattr_options_t options) | |
1805 | { | |
1806 | if (xattr->xattr_major_version == OSSwapHostToLittleConstInt16(CP_VERS_4)) { | |
1807 | return cp_read_xattr_v4(hfsmp, (struct cp_xattr_v4 *)xattr, xattr_len, pcpr, options); | |
1808 | } | |
316670eb | 1809 | |
3e170ce0 | 1810 | xattr->xattr_major_version = OSSwapLittleToHostInt16(xattr->xattr_major_version); |
39236c6e | 1811 | |
3e170ce0 A |
1812 | if (xattr->xattr_major_version != CP_VERS_5) { |
1813 | printf("hfs: cp_getxattr: unsupported xattr version %d\n", | |
1814 | xattr->xattr_major_version); | |
1815 | return ENOTSUP; | |
1816 | } | |
39236c6e | 1817 | |
3e170ce0 | 1818 | size_t min_len = offsetof(struct cp_xattr_v5, persistent_key); |
39236c6e | 1819 | |
3e170ce0 A |
1820 | if (xattr_len < min_len) |
1821 | return HFS_EINCONSISTENT; | |
39236c6e | 1822 | |
3e170ce0 A |
1823 | xattr->xattr_minor_version = OSSwapLittleToHostInt16(xattr->xattr_minor_version); |
1824 | xattr->flags = OSSwapLittleToHostInt32(xattr->flags); | |
1825 | xattr->persistent_class = OSSwapLittleToHostInt32(xattr->persistent_class); | |
1826 | xattr->key_os_version = OSSwapLittleToHostInt32(xattr->key_os_version); | |
1827 | xattr->key_revision = OSSwapLittleToHostInt16(xattr->key_revision); | |
1828 | xattr->key_len = OSSwapLittleToHostInt16(xattr->key_len); | |
316670eb | 1829 | |
3e170ce0 | 1830 | uint16_t pers_key_len = xattr->key_len; |
316670eb | 1831 | |
3e170ce0 A |
1832 | min_len += pers_key_len; |
1833 | if (xattr_len < min_len) | |
1834 | return HFS_EINCONSISTENT; | |
316670eb | 1835 | |
39236c6e | 1836 | |
3e170ce0 A |
1837 | cp_key_pair_t *cpkp; |
1838 | cprotect_t entry; | |
1839 | ||
1840 | /* | |
1841 | * If option CP_GET_XATTR_BASIC_INFO is set, we only return basic | |
1842 | * information about the file's protection (and not the key) and | |
1843 | * we store the result in the structure the caller passed to us. | |
1844 | */ | |
1845 | if (ISSET(options, CP_GET_XATTR_BASIC_INFO)) { | |
1846 | entry = *pcpr; | |
1847 | bzero(entry, offsetof(struct cprotect, cp_keys)); | |
1848 | } else { | |
1849 | entry = cp_entry_alloc(NULL, xattr->key_len, CP_MAX_CACHEBUFLEN, &cpkp); | |
1850 | } | |
39236c6e | 1851 | |
3e170ce0 A |
1852 | entry->cp_pclass = xattr->persistent_class; |
1853 | entry->cp_key_os_version = xattr->key_os_version; | |
1854 | entry->cp_key_revision = xattr->key_revision; | |
39236c6e | 1855 | |
3e170ce0 A |
1856 | if (!ISSET(options, CP_GET_XATTR_BASIC_INFO)) { |
1857 | if (xattr->key_len) { | |
1858 | cpkp_set_pers_key_len(cpkp, xattr->key_len); | |
1859 | memcpy(cpkp_pers_key(cpkp), xattr->persistent_key, xattr->key_len); | |
1860 | } | |
316670eb | 1861 | |
39236c6e | 1862 | |
3e170ce0 A |
1863 | *pcpr = entry; |
1864 | } | |
1865 | else if (xattr->key_len) { | |
1866 | SET(entry->cp_flags, CP_HAS_A_KEY); | |
1867 | } | |
39236c6e | 1868 | |
3e170ce0 A |
1869 | return 0; |
1870 | } | |
39236c6e | 1871 | |
3e170ce0 A |
1872 | /* |
1873 | * Initializes a new cprotect entry with xattr data from the cnode. | |
1874 | * cnode lock held shared | |
1875 | */ | |
1876 | static int | |
1877 | cp_getxattr(struct cnode *cp, struct hfsmount *hfsmp, cprotect_t *outentry) | |
1878 | { | |
1879 | size_t xattr_len = sizeof(struct cp_xattr_v5); | |
1880 | struct cp_xattr_v5 *xattr; | |
39236c6e | 1881 | |
3e170ce0 A |
1882 | MALLOC (xattr, struct cp_xattr_v5 *, xattr_len, |
1883 | M_TEMP, M_WAITOK); | |
39236c6e | 1884 | |
3e170ce0 A |
1885 | int error = hfs_xattr_read(cp->c_vp, CONTENT_PROTECTION_XATTR_NAME, |
1886 | xattr, &xattr_len); | |
1887 | ||
1888 | if (!error) { | |
1889 | if (xattr_len < CP_XATTR_MIN_LEN) | |
1890 | error = HFS_EINCONSISTENT; | |
1891 | else | |
1892 | error = cp_read_xattr_v5(hfsmp, xattr, xattr_len, outentry, 0); | |
316670eb | 1893 | } |
6d2010ae | 1894 | |
3e170ce0 A |
1895 | #if DEBUG |
1896 | if (error && error != ENOATTR) { | |
1897 | printf("cp_getxattr: bad cp xattr (%d):\n", error); | |
1898 | for (size_t i = 0; i < xattr_len; ++i) | |
1899 | printf("%02x ", ((uint8_t *)xattr)[i]); | |
1900 | printf("\n"); | |
1901 | } | |
1902 | #endif | |
1903 | ||
1904 | FREE(xattr, M_TEMP); | |
6d2010ae | 1905 | |
6d2010ae A |
1906 | return error; |
1907 | } | |
1908 | ||
1909 | /* | |
1910 | * If permitted, restore entry's unwrapped key from the persistent key. | |
316670eb | 1911 | * If not, clear key and set CP_KEY_FLUSHED. |
6d2010ae A |
1912 | * cnode lock held exclusive |
1913 | */ | |
1914 | static int | |
39236c6e | 1915 | cp_restore_keys(struct cprotect *entry, struct hfsmount *hfsmp, struct cnode *cp) |
6d2010ae A |
1916 | { |
1917 | int error = 0; | |
1918 | ||
39236c6e | 1919 | error = cp_unwrap(hfsmp, entry, cp); |
6d2010ae | 1920 | if (error) { |
3e170ce0 | 1921 | cp_flush_cached_keys(entry); |
6d2010ae A |
1922 | error = EPERM; |
1923 | } | |
6d2010ae A |
1924 | return error; |
1925 | } | |
1926 | ||
1927 | static int | |
39236c6e A |
1928 | cp_lock_vfs_callback(mount_t mp, void *arg) |
1929 | { | |
1930 | ||
1931 | /* Use a pointer-width integer field for casting */ | |
1932 | unsigned long new_state; | |
fe8ab488 | 1933 | struct hfsmount *hfsmp; |
39236c6e A |
1934 | |
1935 | /* | |
1936 | * When iterating the various mount points that may | |
1937 | * be present on a content-protected device, we need to skip | |
1938 | * those that do not have it enabled. | |
1939 | */ | |
1940 | if (!cp_fs_protected(mp)) { | |
1941 | return 0; | |
1942 | } | |
39236c6e | 1943 | new_state = (unsigned long) arg; |
fe8ab488 A |
1944 | |
1945 | hfsmp = VFSTOHFS(mp); | |
1946 | ||
1947 | hfs_lock_mount(hfsmp); | |
1948 | /* this loses all of the upper bytes of precision; that's OK */ | |
1949 | hfsmp->hfs_cp_lock_state = (uint8_t) new_state; | |
1950 | hfs_unlock_mount(hfsmp); | |
1951 | ||
39236c6e A |
1952 | if (new_state == CP_LOCKED_STATE) { |
1953 | /* | |
1954 | * We respond only to lock events. Since cprotect structs | |
1955 | * decrypt/restore keys lazily, the unlock events don't | |
1956 | * actually cause anything to happen. | |
1957 | */ | |
1958 | return vnode_iterate(mp, 0, cp_lock_vnode_callback, arg); | |
1959 | } | |
1960 | /* Otherwise just return 0. */ | |
1961 | return 0; | |
1962 | ||
6d2010ae A |
1963 | } |
1964 | ||
1965 | ||
1966 | /* | |
1967 | * Deny access to protected files if keys have been locked. | |
6d2010ae | 1968 | */ |
316670eb | 1969 | static int |
fe8ab488 | 1970 | cp_check_access(struct cnode *cp, struct hfsmount *hfsmp, int vnop __unused) |
6d2010ae A |
1971 | { |
1972 | int error = 0; | |
1973 | ||
fe8ab488 A |
1974 | /* |
1975 | * For now it's OK to examine the state variable here without | |
1976 | * holding the HFS lock. This is only a short-circuit; if the state | |
1977 | * transitions (or is in transition) after we examine this field, we'd | |
1978 | * have to handle that anyway. | |
1979 | */ | |
1980 | if (hfsmp->hfs_cp_lock_state == CP_UNLOCKED_STATE) { | |
316670eb | 1981 | return 0; |
6d2010ae A |
1982 | } |
1983 | ||
316670eb | 1984 | if (!cp->c_cpentry) { |
6d2010ae | 1985 | /* unprotected node */ |
316670eb A |
1986 | return 0; |
1987 | } | |
1988 | ||
1989 | if (!S_ISREG(cp->c_mode)) { | |
1990 | return 0; | |
6d2010ae A |
1991 | } |
1992 | ||
316670eb | 1993 | /* Deny all access for class A files */ |
fe8ab488 | 1994 | switch (CP_CLASS(cp->c_cpentry->cp_pclass)) { |
6d2010ae A |
1995 | case PROTECTION_CLASS_A: { |
1996 | error = EPERM; | |
1997 | break; | |
1998 | } | |
6d2010ae A |
1999 | default: |
2000 | error = 0; | |
2001 | break; | |
2002 | } | |
2003 | ||
2004 | return error; | |
2005 | } | |
2006 | ||
6d2010ae A |
2007 | /* |
2008 | * Respond to a lock or unlock event. | |
2009 | * On lock: clear out keys from memory, then flush file contents. | |
2010 | * On unlock: nothing (function not called). | |
2011 | */ | |
2012 | static int | |
316670eb | 2013 | cp_lock_vnode_callback(struct vnode *vp, void *arg) |
6d2010ae A |
2014 | { |
2015 | cnode_t *cp = NULL; | |
2016 | struct cprotect *entry = NULL; | |
2017 | int error = 0; | |
2018 | int locked = 1; | |
39236c6e | 2019 | unsigned long action = 0; |
316670eb | 2020 | int took_truncate_lock = 0; |
6d2010ae A |
2021 | |
2022 | error = vnode_getwithref (vp); | |
2023 | if (error) { | |
2024 | return error; | |
2025 | } | |
2026 | ||
2027 | cp = VTOC(vp); | |
39236c6e | 2028 | |
316670eb A |
2029 | /* |
2030 | * When cleaning cnodes due to a lock event, we must | |
2031 | * take the truncate lock AND the cnode lock. By taking | |
39236c6e | 2032 | * the truncate lock here, we force (nearly) all pending IOs |
316670eb A |
2033 | * to drain before we can acquire the truncate lock. All HFS cluster |
2034 | * io calls except for swapfile IO need to acquire the truncate lock | |
2035 | * prior to calling into the cluster layer. | |
2036 | */ | |
39236c6e | 2037 | hfs_lock_truncate (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT); |
316670eb | 2038 | took_truncate_lock = 1; |
39236c6e A |
2039 | |
2040 | hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); | |
2041 | ||
6d2010ae A |
2042 | entry = cp->c_cpentry; |
2043 | if (!entry) { | |
2044 | /* unprotected vnode: not a regular file */ | |
2045 | goto out; | |
2046 | } | |
39236c6e A |
2047 | |
2048 | action = (unsigned long) arg; | |
6d2010ae A |
2049 | switch (action) { |
2050 | case CP_LOCKED_STATE: { | |
2051 | vfs_context_t ctx; | |
fe8ab488 | 2052 | if (CP_CLASS(entry->cp_pclass) != PROTECTION_CLASS_A || |
316670eb | 2053 | vnode_isdir(vp)) { |
39236c6e | 2054 | /* |
316670eb A |
2055 | * There is no change at lock for other classes than A. |
2056 | * B is kept in memory for writing, and class F (for VM) does | |
39236c6e A |
2057 | * not have a wrapped key, so there is no work needed for |
2058 | * wrapping/unwrapping. | |
2059 | * | |
2060 | * Note that 'class F' is relevant here because if | |
316670eb | 2061 | * hfs_vnop_strategy does not take the cnode lock |
39236c6e A |
2062 | * to protect the cp blob across IO operations, we rely |
2063 | * implicitly on the truncate lock to be held when doing IO. | |
2064 | * The only case where the truncate lock is not held is during | |
2065 | * swapfile IO because HFS just funnels the VNOP_PAGEOUT | |
2066 | * directly to cluster_pageout. | |
316670eb | 2067 | */ |
6d2010ae A |
2068 | goto out; |
2069 | } | |
39236c6e | 2070 | |
316670eb | 2071 | /* Before doing anything else, zero-fill sparse ranges as needed */ |
6d2010ae | 2072 | ctx = vfs_context_current(); |
fe8ab488 | 2073 | (void) hfs_filedone (vp, ctx, 0); |
6d2010ae A |
2074 | |
2075 | /* first, sync back dirty pages */ | |
2076 | hfs_unlock (cp); | |
2077 | ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); | |
39236c6e | 2078 | hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS); |
316670eb A |
2079 | |
2080 | /* flush keys: | |
2081 | * There was a concern here(9206856) about flushing keys before nand layer is done using them. | |
2082 | * But since we are using ubc_msync with UBC_SYNC, it blocks until all IO is completed. | |
2083 | * Once IOFS caches or is done with these keys, it calls the completion routine in IOSF. | |
2084 | * Which in turn calls buf_biodone() and eventually unblocks ubc_msync() | |
39236c6e | 2085 | * Also verified that the cached data in IOFS is overwritten by other data, and there |
316670eb A |
2086 | * is no key leakage in that layer. |
2087 | */ | |
2088 | ||
3e170ce0 | 2089 | cp_flush_cached_keys(entry); |
39236c6e | 2090 | |
6d2010ae A |
2091 | /* some write may have arrived in the mean time. dump those pages */ |
2092 | hfs_unlock(cp); | |
2093 | locked = 0; | |
39236c6e A |
2094 | |
2095 | ubc_msync (vp, 0, ubc_getsize(vp), NULL, UBC_INVALIDATE | UBC_SYNC); | |
6d2010ae A |
2096 | break; |
2097 | } | |
2098 | case CP_UNLOCKED_STATE: { | |
2099 | /* no-op */ | |
2100 | break; | |
2101 | } | |
2102 | default: | |
39236c6e | 2103 | panic("Content Protection: unknown lock action %lu\n", action); |
6d2010ae | 2104 | } |
39236c6e | 2105 | |
6d2010ae | 2106 | out: |
316670eb | 2107 | if (locked) { |
6d2010ae | 2108 | hfs_unlock(cp); |
316670eb | 2109 | } |
39236c6e | 2110 | |
316670eb | 2111 | if (took_truncate_lock) { |
39236c6e | 2112 | hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT); |
316670eb | 2113 | } |
39236c6e | 2114 | |
6d2010ae A |
2115 | vnode_put (vp); |
2116 | return error; | |
2117 | } | |
2118 | ||
39236c6e A |
2119 | |
2120 | /* | |
2121 | * cp_rewrap: | |
2122 | * | |
2123 | * Generate a new wrapped key based on the existing cache key. | |
2124 | */ | |
2125 | ||
3e170ce0 A |
2126 | int |
2127 | cp_rewrap(struct cnode *cp, __unused hfsmount_t *hfsmp, | |
2128 | cp_key_class_t *newclass, cp_key_pair_t *cpkp, const void *old_holder, | |
2129 | cp_new_alloc_fn alloc_fn, void **pholder) | |
6d2010ae | 2130 | { |
39236c6e | 2131 | struct cprotect *entry = cp->c_cpentry; |
3e170ce0 | 2132 | |
39236c6e | 2133 | uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE]; |
316670eb A |
2134 | size_t keylen = CP_MAX_WRAPPEDKEYSIZE; |
2135 | int error = 0; | |
3e170ce0 | 2136 | const cp_key_class_t key_class = CP_CLASS(*newclass); |
316670eb | 2137 | |
39236c6e A |
2138 | /* Structures passed between HFS and AKS */ |
2139 | cp_cred_s access_in; | |
2140 | cp_wrapped_key_s wrapped_key_in; | |
2141 | cp_wrapped_key_s wrapped_key_out; | |
2142 | ||
316670eb | 2143 | /* |
39236c6e | 2144 | * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient |
316670eb | 2145 | * key that is only good as long as the file is open. There is no |
39236c6e | 2146 | * wrapped key, so there isn't anything to wrap. |
316670eb | 2147 | */ |
3e170ce0 | 2148 | if (key_class == PROTECTION_CLASS_F) { |
39236c6e | 2149 | return EINVAL; |
6d2010ae | 2150 | } |
316670eb | 2151 | |
39236c6e A |
2152 | cp_init_access(&access_in, cp); |
2153 | ||
2154 | bzero(&wrapped_key_in, sizeof(wrapped_key_in)); | |
3e170ce0 A |
2155 | wrapped_key_in.key = cpkp_pers_key(cpkp); |
2156 | wrapped_key_in.key_len = cpkp_pers_key_len(cpkp); | |
fe8ab488 | 2157 | /* Use the persistent class when talking to AKS */ |
39236c6e A |
2158 | wrapped_key_in.dp_class = entry->cp_pclass; |
2159 | ||
2160 | bzero(&wrapped_key_out, sizeof(wrapped_key_out)); | |
2161 | wrapped_key_out.key = new_persistent_key; | |
2162 | wrapped_key_out.key_len = keylen; | |
2163 | ||
316670eb A |
2164 | /* |
2165 | * inode is passed here to find the backup bag wrapped blob | |
2166 | * from userspace. This lookup will occur shortly after creation | |
39236c6e | 2167 | * and only if the file still exists. Beyond this lookup the |
316670eb A |
2168 | * inode is not used. Technically there is a race, we practically |
2169 | * don't lose. | |
2170 | */ | |
39236c6e | 2171 | error = g_cp_wrap_func.rewrapper(&access_in, |
3e170ce0 | 2172 | key_class, /* new class */ |
39236c6e A |
2173 | &wrapped_key_in, |
2174 | &wrapped_key_out); | |
316670eb | 2175 | |
39236c6e A |
2176 | keylen = wrapped_key_out.key_len; |
2177 | ||
2178 | if (error == 0) { | |
3e170ce0 | 2179 | /* |
fe8ab488 A |
2180 | * Verify that AKS returned to us a wrapped key of the |
2181 | * target class requested. | |
316670eb | 2182 | */ |
fe8ab488 | 2183 | /* Get the effective class here */ |
3e170ce0 A |
2184 | cp_key_class_t effective = CP_CLASS(wrapped_key_out.dp_class); |
2185 | if (effective != key_class) { | |
fe8ab488 A |
2186 | /* |
2187 | * Fail the operation if defaults or some other enforcement | |
2188 | * dictated that the class be wrapped differently. | |
2189 | */ | |
2190 | ||
2191 | /* TODO: Invalidate the key when 12170074 unblocked */ | |
2192 | return EPERM; | |
2193 | } | |
2194 | ||
39236c6e | 2195 | /* Allocate a new cpentry */ |
3e170ce0 A |
2196 | cp_key_pair_t *new_cpkp; |
2197 | *pholder = alloc_fn(old_holder, keylen, CP_MAX_CACHEBUFLEN, &new_cpkp); | |
316670eb | 2198 | |
39236c6e | 2199 | /* copy the new key into the entry */ |
3e170ce0 A |
2200 | cpkp_set_pers_key_len(new_cpkp, keylen); |
2201 | memcpy(cpkp_pers_key(new_cpkp), new_persistent_key, keylen); | |
fe8ab488 A |
2202 | |
2203 | /* Actually record/store what AKS reported back, not the effective class stored in newclass */ | |
3e170ce0 | 2204 | *newclass = wrapped_key_out.dp_class; |
39236c6e | 2205 | } |
316670eb A |
2206 | else { |
2207 | error = EPERM; | |
2208 | } | |
2209 | ||
6d2010ae A |
2210 | return error; |
2211 | } | |
2212 | ||
3e170ce0 | 2213 | static int cpkp_unwrap(cnode_t *cp, cp_key_class_t key_class, cp_key_pair_t *cpkp) |
6d2010ae A |
2214 | { |
2215 | int error = 0; | |
39236c6e | 2216 | uint8_t iv_key[CP_IV_KEYSIZE]; |
3e170ce0 | 2217 | cpx_t cpx = cpkp_cpx(cpkp); |
39236c6e A |
2218 | |
2219 | /* Structures passed between HFS and AKS */ | |
2220 | cp_cred_s access_in; | |
2221 | cp_wrapped_key_s wrapped_key_in; | |
2222 | cp_raw_key_s key_out; | |
316670eb | 2223 | |
39236c6e A |
2224 | cp_init_access(&access_in, cp); |
2225 | ||
2226 | bzero(&wrapped_key_in, sizeof(wrapped_key_in)); | |
3e170ce0 A |
2227 | wrapped_key_in.key = cpkp_pers_key(cpkp); |
2228 | wrapped_key_in.key_len = cpkp_max_pers_key_len(cpkp); | |
fe8ab488 | 2229 | /* Use the persistent class when talking to AKS */ |
3e170ce0 | 2230 | wrapped_key_in.dp_class = key_class; |
39236c6e A |
2231 | |
2232 | bzero(&key_out, sizeof(key_out)); | |
39236c6e | 2233 | key_out.iv_key = iv_key; |
3e170ce0 A |
2234 | key_out.key = cpx_key(cpx); |
2235 | /* | |
2236 | * The unwrapper should validate/set the key length for | |
fe8ab488 A |
2237 | * the IV key length and the cache key length, however we need |
2238 | * to supply the correct buffer length so that AKS knows how | |
2239 | * many bytes it has to work with. | |
2240 | */ | |
39236c6e | 2241 | key_out.iv_key_len = CP_IV_KEYSIZE; |
3e170ce0 | 2242 | key_out.key_len = cpx_max_key_len(cpx); |
39236c6e A |
2243 | |
2244 | error = g_cp_wrap_func.unwrapper(&access_in, &wrapped_key_in, &key_out); | |
316670eb | 2245 | if (!error) { |
fe8ab488 A |
2246 | if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) { |
2247 | panic ("cp_unwrap: invalid key length! (%ul)\n", key_out.key_len); | |
2248 | } | |
2249 | ||
3e170ce0 | 2250 | if (key_out.iv_key_len != CP_IV_KEYSIZE) |
fe8ab488 | 2251 | panic ("cp_unwrap: invalid iv key length! (%ul)\n", key_out.iv_key_len); |
fe8ab488 | 2252 | |
3e170ce0 | 2253 | cpx_set_key_len(cpx, key_out.key_len); |
fe8ab488 | 2254 | |
3e170ce0 A |
2255 | cpx_set_aes_iv_key(cpx, iv_key); |
2256 | cpx_set_is_sep_wrapped_key(cpx, ISSET(key_out.flags, CP_RAW_KEY_WRAPPEDKEY)); | |
316670eb A |
2257 | } else { |
2258 | error = EPERM; | |
2259 | } | |
39236c6e A |
2260 | |
2261 | return error; | |
2262 | } | |
2263 | ||
39236c6e | 2264 | static int |
3e170ce0 | 2265 | cp_unwrap(__unused struct hfsmount *hfsmp, struct cprotect *entry, struct cnode *cp) |
39236c6e | 2266 | { |
fe8ab488 | 2267 | /* |
3e170ce0 A |
2268 | * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient |
2269 | * key that is only good as long as the file is open. There is no | |
2270 | * wrapped key, so there isn't anything to unwrap. | |
fe8ab488 | 2271 | */ |
3e170ce0 A |
2272 | if (CP_CLASS(entry->cp_pclass) == PROTECTION_CLASS_F) { |
2273 | return EPERM; | |
2274 | } | |
2275 | ||
2276 | int error = cpkp_unwrap(cp, entry->cp_pclass, &entry->cp_keys); | |
39236c6e | 2277 | |
39236c6e | 2278 | |
3e170ce0 | 2279 | return error; |
39236c6e A |
2280 | } |
2281 | ||
2282 | /* | |
2283 | * cp_generate_keys | |
2284 | * | |
2285 | * Take a cnode that has already been initialized and establish persistent and | |
2286 | * cache keys for it at this time. Note that at the time this is called, the | |
2287 | * directory entry has already been created and we are holding the cnode lock | |
2288 | * on 'cp'. | |
2289 | * | |
2290 | */ | |
3e170ce0 | 2291 | int cp_generate_keys (struct hfsmount *hfsmp, struct cnode *cp, cp_key_class_t targetclass, |
fe8ab488 | 2292 | uint32_t keyflags, struct cprotect **newentry) |
39236c6e A |
2293 | { |
2294 | ||
2295 | int error = 0; | |
2296 | struct cprotect *newcp = NULL; | |
2297 | *newentry = NULL; | |
2298 | ||
fe8ab488 A |
2299 | /* Target class must be an effective class only */ |
2300 | targetclass = CP_CLASS(targetclass); | |
2301 | ||
39236c6e A |
2302 | /* Validate that it has a cprotect already */ |
2303 | if (cp->c_cpentry == NULL) { | |
2304 | /* We can't do anything if it shouldn't be protected. */ | |
2305 | return 0; | |
2306 | } | |
2307 | ||
2308 | /* Asserts for the underlying cprotect */ | |
2309 | if (cp->c_cpentry->cp_flags & CP_NO_XATTR) { | |
2310 | /* should already have an xattr by this point. */ | |
2311 | error = EINVAL; | |
2312 | goto out; | |
2313 | } | |
2314 | ||
2315 | if (S_ISREG(cp->c_mode)) { | |
3e170ce0 | 2316 | if (!cp_needs_pers_key(cp->c_cpentry)) { |
39236c6e A |
2317 | error = EINVAL; |
2318 | goto out; | |
2319 | } | |
2320 | } | |
2321 | ||
3e170ce0 A |
2322 | cp_key_revision_t key_revision = cp_initial_key_revision(hfsmp); |
2323 | ||
2324 | error = cp_new (&targetclass, hfsmp, cp, cp->c_mode, keyflags, key_revision, | |
2325 | (cp_new_alloc_fn)cp_entry_alloc, (void **)&newcp); | |
39236c6e A |
2326 | if (error) { |
2327 | /* | |
2328 | * Key generation failed. This is not necessarily fatal | |
2329 | * since the device could have transitioned into the lock | |
2330 | * state before we called this. | |
2331 | */ | |
2332 | error = EPERM; | |
2333 | goto out; | |
2334 | } | |
3e170ce0 A |
2335 | |
2336 | newcp->cp_pclass = targetclass; | |
2337 | newcp->cp_key_os_version = cp_os_version(); | |
2338 | newcp->cp_key_revision = key_revision; | |
2339 | ||
2340 | /* | |
39236c6e A |
2341 | * If we got here, then we have a new cprotect. |
2342 | * Attempt to write the new one out. | |
2343 | */ | |
2344 | error = cp_setxattr (cp, newcp, hfsmp, cp->c_fileid, XATTR_REPLACE); | |
2345 | ||
2346 | if (error) { | |
2347 | /* Tear down the new cprotect; Tell MKB that it's invalid. Bail out */ | |
2348 | /* TODO: rdar://12170074 needs to be fixed before we can tell MKB */ | |
2349 | if (newcp) { | |
3e170ce0 | 2350 | cp_entry_destroy(hfsmp, newcp); |
39236c6e A |
2351 | } |
2352 | goto out; | |
2353 | } | |
2354 | ||
2355 | /* | |
2356 | * If we get here then we can assert that: | |
2357 | * 1) generated wrapped/unwrapped keys. | |
2358 | * 2) wrote the new keys to disk. | |
2359 | * 3) cprotect is ready to go. | |
2360 | */ | |
3e170ce0 | 2361 | |
39236c6e | 2362 | *newentry = newcp; |
3e170ce0 | 2363 | |
39236c6e | 2364 | out: |
6d2010ae | 2365 | return error; |
39236c6e | 2366 | |
6d2010ae A |
2367 | } |
2368 | ||
3e170ce0 | 2369 | void cp_replace_entry (hfsmount_t *hfsmp, struct cnode *cp, struct cprotect *newentry) |
39236c6e | 2370 | { |
39236c6e | 2371 | if (cp->c_cpentry) { |
3e170ce0 A |
2372 | |
2373 | cp_entry_destroy (hfsmp, cp->c_cpentry); | |
39236c6e A |
2374 | } |
2375 | cp->c_cpentry = newentry; | |
2376 | newentry->cp_backing_cnode = cp; | |
2377 | ||
2378 | return; | |
2379 | } | |
2380 | ||
2381 | ||
2382 | /* | |
2383 | * cp_new | |
2384 | * | |
2385 | * Given a double-pointer to a cprotect, generate keys (either in-kernel or from keystore), | |
2386 | * allocate a cprotect, and vend it back to the caller. | |
2387 | * | |
2388 | * Additionally, decide if keys are even needed -- directories get cprotect data structures | |
2389 | * but they do not have keys. | |
2390 | * | |
3e170ce0 | 2391 | */ |
39236c6e | 2392 | |
3e170ce0 A |
2393 | int |
2394 | cp_new(cp_key_class_t *newclass_eff, __unused struct hfsmount *hfsmp, struct cnode *cp, | |
2395 | mode_t cmode, int32_t keyflags, cp_key_revision_t key_revision, | |
2396 | cp_new_alloc_fn alloc_fn, void **pholder) | |
39236c6e | 2397 | { |
39236c6e | 2398 | int error = 0; |
fe8ab488 A |
2399 | uint8_t new_key[CP_MAX_CACHEBUFLEN]; |
2400 | size_t new_key_len = CP_MAX_CACHEBUFLEN; /* AKS tell us the proper key length, how much of this is used */ | |
39236c6e A |
2401 | uint8_t new_persistent_key[CP_MAX_WRAPPEDKEYSIZE]; |
2402 | size_t new_persistent_len = CP_MAX_WRAPPEDKEYSIZE; | |
2403 | uint8_t iv_key[CP_IV_KEYSIZE]; | |
2404 | size_t iv_key_len = CP_IV_KEYSIZE; | |
fe8ab488 | 2405 | int iswrapped = 0; |
3e170ce0 | 2406 | cp_key_class_t key_class = CP_CLASS(*newclass_eff); |
39236c6e A |
2407 | |
2408 | /* Structures passed between HFS and AKS */ | |
2409 | cp_cred_s access_in; | |
2410 | cp_wrapped_key_s wrapped_key_out; | |
2411 | cp_raw_key_s key_out; | |
2412 | ||
fe8ab488 | 2413 | if (are_wraps_initialized == false) { |
39236c6e A |
2414 | printf("hfs: cp_new: wrap/gen functions not yet set\n"); |
2415 | return ENXIO; | |
2416 | } | |
2417 | ||
fe8ab488 A |
2418 | /* Sanity check that it's a file or directory here */ |
2419 | if (!(S_ISREG(cmode)) && !(S_ISDIR(cmode))) { | |
2420 | return EPERM; | |
2421 | } | |
2422 | ||
39236c6e A |
2423 | /* |
2424 | * Step 1: Generate Keys if needed. | |
2425 | * | |
2426 | * For class F files, the kernel provides the key. | |
2427 | * PROTECTION_CLASS_F is in-use by VM swapfile; it represents a transient | |
2428 | * key that is only good as long as the file is open. There is no | |
2429 | * wrapped key, so there isn't anything to wrap. | |
2430 | * | |
2431 | * For class A->D files, the key store provides the key | |
2432 | * | |
2433 | * For Directories, we only give them a class ; no keys. | |
2434 | */ | |
2435 | if (S_ISDIR (cmode)) { | |
2436 | /* Directories */ | |
2437 | new_persistent_len = 0; | |
2438 | new_key_len = 0; | |
2439 | ||
2440 | error = 0; | |
2441 | } | |
fe8ab488 A |
2442 | else { |
2443 | /* Must be a file */ | |
3e170ce0 | 2444 | if (key_class == PROTECTION_CLASS_F) { |
fe8ab488 | 2445 | /* class F files are not wrapped; they can still use the max key size */ |
39236c6e A |
2446 | new_key_len = CP_MAX_KEYSIZE; |
2447 | read_random (&new_key[0], new_key_len); | |
2448 | new_persistent_len = 0; | |
2449 | ||
2450 | error = 0; | |
2451 | } | |
2452 | else { | |
2453 | /* | |
2454 | * The keystore is provided the file ID so that it can associate | |
2455 | * the wrapped backup blob with this key from userspace. This | |
2456 | * lookup occurs after successful file creation. Beyond this, the | |
2457 | * file ID is not used. Note that there is a potential race here if | |
2458 | * the file ID is re-used. | |
2459 | */ | |
2460 | cp_init_access(&access_in, cp); | |
2461 | ||
2462 | bzero(&key_out, sizeof(key_out)); | |
2463 | key_out.key = new_key; | |
39236c6e | 2464 | key_out.iv_key = iv_key; |
fe8ab488 A |
2465 | /* |
2466 | * AKS will override our key length fields, but we need to supply | |
2467 | * the length of the buffer in those length fields so that | |
2468 | * AKS knows hoa many bytes it has to work with. | |
2469 | */ | |
2470 | key_out.key_len = new_key_len; | |
39236c6e A |
2471 | key_out.iv_key_len = iv_key_len; |
2472 | ||
2473 | bzero(&wrapped_key_out, sizeof(wrapped_key_out)); | |
2474 | wrapped_key_out.key = new_persistent_key; | |
2475 | wrapped_key_out.key_len = new_persistent_len; | |
2476 | ||
3e170ce0 A |
2477 | access_in.key_revision = key_revision; |
2478 | ||
2479 | error = g_cp_wrap_func.new_key(&access_in, | |
2480 | key_class, | |
39236c6e A |
2481 | &key_out, |
2482 | &wrapped_key_out); | |
2483 | ||
fe8ab488 A |
2484 | if (error) { |
2485 | /* keybag returned failure */ | |
2486 | error = EPERM; | |
2487 | goto cpnew_fail; | |
2488 | } | |
2489 | ||
2490 | /* Now sanity-check the output from new_key */ | |
2491 | if (key_out.key_len == 0 || key_out.key_len > CP_MAX_CACHEBUFLEN) { | |
2492 | panic ("cp_new: invalid key length! (%ul) \n", key_out.key_len); | |
2493 | } | |
2494 | ||
3e170ce0 | 2495 | if (key_out.iv_key_len != CP_IV_KEYSIZE) { |
fe8ab488 A |
2496 | panic ("cp_new: invalid iv key length! (%ul) \n", key_out.iv_key_len); |
2497 | } | |
2498 | ||
2499 | /* | |
2500 | * AKS is allowed to override our preferences and wrap with a | |
2501 | * different class key for policy reasons. If we were told that | |
2502 | * any class other than the one specified is unacceptable then error out | |
2503 | * if that occurred. Check that the effective class returned by | |
2504 | * AKS is the same as our effective new class | |
2505 | */ | |
3e170ce0 A |
2506 | if (CP_CLASS(wrapped_key_out.dp_class) != key_class) { |
2507 | if (!ISSET(keyflags, CP_KEYWRAP_DIFFCLASS)) { | |
2508 | error = EPERM; | |
fe8ab488 A |
2509 | /* TODO: When 12170074 fixed, release/invalidate the key! */ |
2510 | goto cpnew_fail; | |
2511 | } | |
2512 | } | |
2513 | ||
3e170ce0 | 2514 | *newclass_eff = wrapped_key_out.dp_class; |
39236c6e A |
2515 | new_key_len = key_out.key_len; |
2516 | iv_key_len = key_out.iv_key_len; | |
2517 | new_persistent_len = wrapped_key_out.key_len; | |
39236c6e | 2518 | |
fe8ab488 A |
2519 | /* Is the key a SEP wrapped key? */ |
2520 | if (key_out.flags & CP_RAW_KEY_WRAPPEDKEY) { | |
2521 | iswrapped = 1; | |
2522 | } | |
2523 | } | |
39236c6e A |
2524 | } |
2525 | ||
2526 | /* | |
fe8ab488 | 2527 | * Step 2: allocate cprotect and initialize it. |
39236c6e A |
2528 | */ |
2529 | ||
3e170ce0 A |
2530 | cp_key_pair_t *cpkp; |
2531 | *pholder = alloc_fn(NULL, new_persistent_len, new_key_len, &cpkp); | |
2532 | if (*pholder == NULL) { | |
fe8ab488 A |
2533 | return ENOMEM; |
2534 | } | |
39236c6e | 2535 | |
fe8ab488 A |
2536 | /* Copy the cache key & IV keys into place if needed. */ |
2537 | if (new_key_len > 0) { | |
3e170ce0 | 2538 | cpx_t cpx = cpkp_cpx(cpkp); |
fe8ab488 | 2539 | |
3e170ce0 A |
2540 | cpx_set_key_len(cpx, new_key_len); |
2541 | memcpy(cpx_key(cpx), new_key, new_key_len); | |
fe8ab488 A |
2542 | |
2543 | /* Initialize the IV key */ | |
3e170ce0 A |
2544 | if (key_class != PROTECTION_CLASS_F) |
2545 | cpx_set_aes_iv_key(cpx, iv_key); | |
2546 | ||
2547 | cpx_set_is_sep_wrapped_key(cpx, iswrapped); | |
39236c6e | 2548 | } |
fe8ab488 | 2549 | if (new_persistent_len > 0) { |
3e170ce0 A |
2550 | cpkp_set_pers_key_len(cpkp, new_persistent_len); |
2551 | memcpy(cpkp_pers_key(cpkp), new_persistent_key, new_persistent_len); | |
fe8ab488 A |
2552 | } |
2553 | ||
3e170ce0 A |
2554 | cpnew_fail: |
2555 | ||
2556 | #if HFS_TMPDBG | |
2557 | #if !SECURE_KERNEL | |
2558 | if ((hfsmp->hfs_cp_verbose) && (error == EPERM)) { | |
2559 | /* Only introspect the data fork */ | |
2560 | cp_log_eperm (cp->c_vp, *newclass_eff, true); | |
39236c6e | 2561 | } |
3e170ce0 A |
2562 | #endif |
2563 | #endif | |
39236c6e A |
2564 | |
2565 | return error; | |
2566 | } | |
2567 | ||
2568 | /* Initialize the cp_cred_t structure passed to AKS */ | |
2569 | static void cp_init_access(cp_cred_t access, struct cnode *cp) | |
2570 | { | |
2571 | vfs_context_t context = vfs_context_current(); | |
2572 | kauth_cred_t cred = vfs_context_ucred(context); | |
2573 | proc_t proc = vfs_context_proc(context); | |
2574 | ||
2575 | bzero(access, sizeof(*access)); | |
2576 | ||
2577 | /* Note: HFS uses 32-bit fileID, even though inode is a 64-bit value */ | |
2578 | access->inode = cp->c_fileid; | |
2579 | access->pid = proc_pid(proc); | |
2580 | access->uid = kauth_cred_getuid(cred); | |
2581 | ||
3e170ce0 A |
2582 | if (cp->c_cpentry) |
2583 | access->key_revision = cp->c_cpentry->cp_key_revision; | |
2584 | ||
39236c6e A |
2585 | return; |
2586 | } | |
6d2010ae | 2587 | |
3e170ce0 A |
2588 | /* |
2589 | * Parses versions of the form 12A316, i.e. <major><minor><revision> and | |
2590 | * returns a uint32_t in the form 0xaabbcccc where aa = <major>, | |
2591 | * bb = <ASCII char>, cccc = <revision>. | |
2592 | */ | |
2593 | static cp_key_os_version_t parse_os_version(void) | |
2594 | { | |
2595 | const char *p = osversion; | |
2596 | ||
2597 | int a = 0; | |
2598 | while (*p >= '0' && *p <= '9') { | |
2599 | a = a * 10 + *p - '0'; | |
2600 | ++p; | |
2601 | } | |
2602 | ||
2603 | if (!a) | |
2604 | return 0; | |
2605 | ||
2606 | int b = *p++; | |
2607 | if (!b) | |
2608 | return 0; | |
2609 | ||
2610 | int c = 0; | |
2611 | while (*p >= '0' && *p <= '9') { | |
2612 | c = c * 10 + *p - '0'; | |
2613 | ++p; | |
2614 | } | |
2615 | ||
2616 | if (!c) | |
2617 | return 0; | |
2618 | ||
2619 | return (a & 0xff) << 24 | b << 16 | (c & 0xffff); | |
2620 | } | |
2621 | ||
2622 | cp_key_os_version_t cp_os_version(void) | |
2623 | { | |
2624 | static cp_key_os_version_t cp_os_version; | |
2625 | ||
2626 | if (cp_os_version) | |
2627 | return cp_os_version; | |
2628 | ||
2629 | if (!osversion[0]) | |
2630 | return 0; | |
2631 | ||
2632 | cp_os_version = parse_os_version(); | |
2633 | if (!cp_os_version) { | |
2634 | printf("cp_os_version: unable to parse osversion `%s'\n", osversion); | |
2635 | cp_os_version = 1; | |
2636 | } | |
2637 | ||
2638 | return cp_os_version; | |
2639 | } | |
2640 | ||
2641 | ||
2642 | errno_t cp_handle_strategy(buf_t bp) | |
2643 | { | |
2644 | vnode_t vp = buf_vnode(bp); | |
2645 | cnode_t *cp = NULL; | |
2646 | ||
2647 | if (bufattr_rawencrypted(buf_attr(bp)) | |
2648 | || !(cp = cp_get_protected_cnode(vp)) | |
2649 | || !cp->c_cpentry) { | |
2650 | // Nothing to do | |
2651 | return 0; | |
2652 | } | |
2653 | ||
2654 | /* | |
2655 | * For filesystem resize, we may not have access to the underlying | |
2656 | * file's cache key for whatever reason (device may be locked). | |
2657 | * However, we do not need it since we are going to use the | |
2658 | * temporary HFS-wide resize key which is generated once we start | |
2659 | * relocating file content. If this file's I/O should be done | |
2660 | * using the resize key, it will have been supplied already, so do | |
2661 | * not attach the file's cp blob to the buffer. | |
2662 | */ | |
2663 | if (ISSET(cp->c_cpentry->cp_flags, CP_RELOCATION_INFLIGHT)) | |
2664 | return 0; | |
2665 | ||
2666 | { | |
2667 | // Fast path | |
2668 | cpx_t cpx = cpkp_cpx(&cp->c_cpentry->cp_keys); | |
2669 | ||
2670 | if (cpx_has_key(cpx)) { | |
2671 | bufattr_setcpx(buf_attr(bp), cpx); | |
2672 | return 0; | |
2673 | } | |
2674 | } | |
2675 | ||
2676 | /* | |
2677 | * We rely mostly (see note below) upon the truncate lock to | |
2678 | * protect the CP cache key from getting tossed prior to our IO | |
2679 | * finishing here. Nearly all cluster io calls to manipulate file | |
2680 | * payload from HFS take the truncate lock before calling into the | |
2681 | * cluster layer to ensure the file size does not change, or that | |
2682 | * they have exclusive right to change the EOF of the file. That | |
2683 | * same guarantee protects us here since the code that deals with | |
2684 | * CP lock events must now take the truncate lock before doing | |
2685 | * anything. | |
2686 | * | |
2687 | * If you want to change content protection structures, then the | |
2688 | * truncate lock is not sufficient; you must take the truncate | |
2689 | * lock and then wait for outstanding writes to complete. This is | |
2690 | * necessary because asynchronous I/O only holds the truncate lock | |
2691 | * whilst I/O is being queued. | |
2692 | * | |
2693 | * One exception should be the VM swapfile IO, because HFS will | |
2694 | * funnel the VNOP_PAGEOUT directly into a cluster_pageout call | |
2695 | * for the swapfile code only without holding the truncate lock. | |
2696 | * This is because individual swapfiles are maintained at | |
2697 | * fixed-length sizes by the VM code. In non-swapfile IO we use | |
2698 | * PAGEOUT_V2 semantics which allow us to create our own UPL and | |
2699 | * thus take the truncate lock before calling into the cluster | |
2700 | * layer. In that case, however, we are not concerned with the CP | |
2701 | * blob being wiped out in the middle of the IO because there | |
2702 | * isn't anything to toss; the VM swapfile key stays in-core as | |
2703 | * long as the file is open. | |
2704 | */ | |
2705 | ||
2706 | off_rsrc_t off_rsrc = off_rsrc_make(buf_lblkno(bp) * GetLogicalBlockSize(vp), | |
2707 | VNODE_IS_RSRC(vp)); | |
2708 | cp_io_params_t io_params; | |
2709 | ||
2710 | ||
2711 | /* | |
2712 | * We want to take the cnode lock here and because the vnode write | |
2713 | * count is a pseudo-lock, we need to do something to preserve | |
2714 | * lock ordering; the cnode lock comes before the write count. | |
2715 | * Ideally, the write count would be incremented after the | |
2716 | * strategy routine returns, but that becomes complicated if the | |
2717 | * strategy routine where to call buf_iodone before returning. | |
2718 | * For now, we drop the write count here and then pick it up again | |
2719 | * later. | |
2720 | */ | |
2721 | if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW)) | |
2722 | vnode_writedone(vp); | |
2723 | ||
2724 | hfs_lock_always(cp, HFS_SHARED_LOCK); | |
2725 | cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc, | |
2726 | ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE, | |
2727 | &io_params); | |
2728 | hfs_unlock(cp); | |
2729 | ||
2730 | /* | |
2731 | * Last chance: If this data protected I/O does not have unwrapped | |
2732 | * keys present, then try to get them. We already know that it | |
2733 | * should, by this point. | |
2734 | */ | |
2735 | if (!cpx_has_key(io_params.cpx)) { | |
2736 | int io_op = ( (buf_flags(bp) & B_READ) ? CP_READ_ACCESS : CP_WRITE_ACCESS); | |
2737 | errno_t error = cp_handle_vnop(vp, io_op, 0); | |
2738 | if (error) { | |
2739 | /* | |
2740 | * We have to be careful here. By this point in the I/O | |
2741 | * path, VM or the cluster engine has prepared a buf_t | |
2742 | * with the proper file offsets and all the rest, so | |
2743 | * simply erroring out will result in us leaking this | |
2744 | * particular buf_t. We need to properly decorate the | |
2745 | * buf_t just as buf_strategy would so as to make it | |
2746 | * appear that the I/O errored out with the particular | |
2747 | * error code. | |
2748 | */ | |
2749 | if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW)) | |
2750 | vnode_startwrite(vp); | |
2751 | buf_seterror (bp, error); | |
2752 | buf_biodone(bp); | |
2753 | return error; | |
2754 | } | |
2755 | ||
2756 | hfs_lock_always(cp, HFS_SHARED_LOCK); | |
2757 | cp_io_params(VTOHFS(vp), cp->c_cpentry, off_rsrc, | |
2758 | ISSET(buf_flags(bp), B_READ) ? VNODE_READ : VNODE_WRITE, | |
2759 | &io_params); | |
2760 | hfs_unlock(cp); | |
2761 | } | |
2762 | ||
2763 | assert(buf_count(bp) <= io_params.max_len); | |
2764 | bufattr_setcpx(buf_attr(bp), io_params.cpx); | |
2765 | ||
2766 | if (!ISSET(buf_flags(bp), B_READ) && !ISSET(buf_flags(bp), B_RAW)) | |
2767 | vnode_startwrite(vp); | |
2768 | ||
2769 | return 0; | |
2770 | } | |
2771 | ||
2772 | #else // !CONFIG_PROTECT | |
2773 | ||
2774 | #include <sys/cdefs.h> | |
2775 | #include <sys/cprotect.h> | |
2776 | #include <sys/errno.h> | |
6d2010ae A |
2777 | |
2778 | int cp_key_store_action(int action __unused) | |
2779 | { | |
2780 | return ENOTSUP; | |
2781 | } | |
2782 | ||
6d2010ae A |
2783 | int cp_register_wraps(cp_wrap_func_t key_store_func __unused) |
2784 | { | |
2785 | return ENOTSUP; | |
2786 | } | |
2787 | ||
3e170ce0 A |
2788 | size_t cpx_size(__unused size_t key_size) |
2789 | { | |
2790 | return 0; | |
2791 | } | |
2792 | ||
2793 | cpx_t cpx_alloc(__unused size_t key_size) | |
2794 | { | |
2795 | return NULL; | |
2796 | } | |
2797 | ||
2798 | void cpx_free(__unused cpx_t cpx) | |
2799 | { | |
2800 | } | |
2801 | ||
2802 | bool cpx_is_sep_wrapped_key(__unused const struct cpx *cpx) | |
2803 | { | |
2804 | return false; | |
2805 | } | |
2806 | ||
2807 | void cpx_set_is_sep_wrapped_key(__unused struct cpx *cpx, __unused bool v) | |
2808 | { | |
2809 | } | |
2810 | ||
2811 | bool cpx_use_offset_for_iv(__unused const struct cpx *cpx) | |
2812 | { | |
2813 | return false; | |
2814 | } | |
2815 | ||
2816 | void cpx_set_use_offset_for_iv(__unused struct cpx *cpx, __unused bool v) | |
2817 | { | |
2818 | } | |
2819 | ||
2820 | uint16_t cpx_key_len(__unused const struct cpx *cpx) | |
2821 | { | |
2822 | return 0; | |
2823 | } | |
2824 | ||
2825 | void cpx_set_key_len(__unused struct cpx *cpx, __unused uint16_t key_len) | |
2826 | { | |
2827 | } | |
2828 | ||
2829 | void *cpx_key(__unused const struct cpx *cpx) | |
2830 | { | |
2831 | return NULL; | |
2832 | } | |
2833 | ||
2834 | aes_encrypt_ctx *cpx_iv_aes_ctx(__unused cpx_t cpx) | |
2835 | { | |
2836 | return NULL; | |
2837 | } | |
2838 | ||
6d2010ae | 2839 | #endif /* CONFIG_PROTECT */ |