2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cprotect.h>
30 #include <sys/malloc.h>
31 #include <sys/mount_internal.h>
32 #include <sys/filio.h>
33 #include <sys/content_protection.h>
34 #include <libkern/crypto/sha1.h>
35 #include <libkern/libkern.h>
37 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
42 * This structure contains the unwrapped key and is passed to the lower layers.
43 * It is private so users must use the accessors declared in sys/cprotect.h
48 typedef uint32_t cpx_flags_t
;
50 CPX_SEP_WRAPPEDKEY
= 0x01,
51 CPX_IV_AES_CTX_INITIALIZED
= 0x02,
52 CPX_USE_OFFSET_FOR_IV
= 0x04,
54 // Using AES IV context generated from key
55 CPX_IV_AES_CTX_VFS
= 0x08,
56 CPX_SYNTHETIC_OFFSET_FOR_IV
= 0x10,
57 CPX_COMPOSITEKEY
= 0x20
64 aes_encrypt_ctx cpx_iv_aes_ctx
; // Context used for generating the IV
65 cpx_flags_t cpx_flags
;
66 uint16_t cpx_max_key_len
;
68 uint8_t cpx_cached_key
[];
71 // -- cpx_t accessors --
73 size_t cpx_size(size_t key_size
)
75 size_t size
= sizeof(struct cpx
) + key_size
;
78 size
+= 4; // Extra for magic
84 size_t cpx_sizex(const struct cpx
*cpx
)
86 return cpx_size(cpx
->cpx_max_key_len
);
89 cpx_t
cpx_alloc(size_t key_len
)
95 * Macs only use 1 key per volume, so force it into its own page.
96 * This way, we can write-protect as needed.
98 size_t cpsize
= cpx_size (key_len
);
99 if (cpsize
< PAGE_SIZE
) {
100 MALLOC(cpx
, cpx_t
, PAGE_SIZE
, M_TEMP
, M_WAITOK
);
103 panic ("cpx_size too large ! (%lu)", cpsize
);
106 MALLOC(cpx
, cpx_t
, cpx_size(key_len
), M_TEMP
, M_WAITOK
);
108 cpx_init(cpx
, key_len
);
113 /* this is really a void function */
114 void cpx_writeprotect (cpx_t cpx
)
117 void *cpxstart
= (void*)cpx
;
118 void *cpxend
= (void*)((uint8_t*)cpx
+ PAGE_SIZE
);
119 vm_map_protect (kernel_map
, cpxstart
, cpxend
, (VM_PROT_READ
), FALSE
);
127 static const uint32_t cpx_magic1
= 0x7b787063; // cpx{
128 static const uint32_t cpx_magic2
= 0x7870637d; // }cpx
131 void cpx_free(cpx_t cpx
)
135 assert(cpx
->cpx_magic1
== cpx_magic1
);
136 assert(*PTR_ADD(uint32_t *, cpx
, cpx_sizex(cpx
) - 4) == cpx_magic2
);
140 /* unprotect the page before bzeroing */
141 void *cpxstart
= (void*)cpx
;
142 void *cpxend
= (void*)((uint8_t*)cpx
+ PAGE_SIZE
);
143 vm_map_protect (kernel_map
, cpxstart
, cpxend
, (VM_PROT_DEFAULT
), FALSE
);
146 bzero(cpx
->cpx_cached_key
, cpx
->cpx_max_key_len
);
150 void cpx_init(cpx_t cpx
, size_t key_len
)
153 cpx
->cpx_magic1
= cpx_magic1
;
154 *PTR_ADD(uint32_t *, cpx
, cpx_size(key_len
) - 4) = cpx_magic2
;
157 cpx
->cpx_key_len
= 0;
158 cpx
->cpx_max_key_len
= key_len
;
161 bool cpx_is_sep_wrapped_key(const struct cpx
*cpx
)
163 return ISSET(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
166 void cpx_set_is_sep_wrapped_key(struct cpx
*cpx
, bool v
)
169 SET(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
171 CLR(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
174 bool cpx_is_composite_key(const struct cpx
*cpx
)
176 return ISSET(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
179 void cpx_set_is_composite_key(struct cpx
*cpx
, bool v
)
182 SET(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
184 CLR(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
187 bool cpx_use_offset_for_iv(const struct cpx
*cpx
)
189 return ISSET(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
192 void cpx_set_use_offset_for_iv(struct cpx
*cpx
, bool v
)
195 SET(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
197 CLR(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
200 bool cpx_synthetic_offset_for_iv(const struct cpx
*cpx
)
202 return ISSET(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
205 void cpx_set_synthetic_offset_for_iv(struct cpx
*cpx
, bool v
)
208 SET(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
210 CLR(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
213 uint16_t cpx_max_key_len(const struct cpx
*cpx
)
215 return cpx
->cpx_max_key_len
;
218 uint16_t cpx_key_len(const struct cpx
*cpx
)
220 return cpx
->cpx_key_len
;
223 void cpx_set_key_len(struct cpx
*cpx
, uint16_t key_len
)
225 cpx
->cpx_key_len
= key_len
;
227 if (ISSET(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
)) {
229 * We assume that if the key length is being modified, the key
230 * has changed. As a result, un-set any bits related to the
231 * AES context, if needed. They should be re-generated
234 CLR(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
| CPX_IV_AES_CTX_VFS
);
238 bool cpx_has_key(const struct cpx
*cpx
)
240 return cpx
->cpx_key_len
> 0;
243 #pragma clang diagnostic push
244 #pragma clang diagnostic ignored "-Wcast-qual"
245 void *cpx_key(const struct cpx
*cpx
)
247 return (void *)cpx
->cpx_cached_key
;
249 #pragma clang diagnostic pop
251 void cpx_set_aes_iv_key(struct cpx
*cpx
, void *iv_key
)
253 aes_encrypt_key128(iv_key
, &cpx
->cpx_iv_aes_ctx
);
254 SET(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
| CPX_USE_OFFSET_FOR_IV
);
255 CLR(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
);
258 aes_encrypt_ctx
*cpx_iv_aes_ctx(struct cpx
*cpx
)
260 if (ISSET(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
))
261 return &cpx
->cpx_iv_aes_ctx
;
264 uint8_t digest
[SHA_DIGEST_LENGTH
]; /* Kiv */
266 /* First init the cp_cache_iv_key[] */
270 * We can only use this when the keys are generated in the AP; As a result
271 * we only use the first 32 bytes of key length in the cache key
273 SHA1Update(&sha1ctxt
, cpx
->cpx_cached_key
, cpx
->cpx_key_len
);
274 SHA1Final(digest
, &sha1ctxt
);
276 cpx_set_aes_iv_key(cpx
, digest
);
277 SET(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
);
279 return &cpx
->cpx_iv_aes_ctx
;
282 void cpx_flush(cpx_t cpx
)
284 bzero(cpx
->cpx_cached_key
, cpx
->cpx_max_key_len
);
285 bzero(&cpx
->cpx_iv_aes_ctx
, sizeof(cpx
->cpx_iv_aes_ctx
));
287 cpx
->cpx_key_len
= 0;
290 bool cpx_can_copy(const struct cpx
*src
, const struct cpx
*dst
)
292 return src
->cpx_key_len
<= dst
->cpx_max_key_len
;
295 void cpx_copy(const struct cpx
*src
, cpx_t dst
)
297 uint16_t key_len
= cpx_key_len(src
);
298 cpx_set_key_len(dst
, key_len
);
299 memcpy(cpx_key(dst
), cpx_key(src
), key_len
);
300 dst
->cpx_flags
= src
->cpx_flags
;
301 if (ISSET(dst
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
))
302 dst
->cpx_iv_aes_ctx
= src
->cpx_iv_aes_ctx
;
306 cp_lock_state_t state
;
309 } cp_lock_vfs_callback_arg
;
312 cp_lock_vfs_callback(mount_t mp
, void *arg
)
314 cp_lock_vfs_callback_arg
*callback_arg
= (cp_lock_vfs_callback_arg
*)arg
;
316 if (callback_arg
->valid_uuid
) {
319 VFSATTR_WANTED(&va
, f_uuid
);
321 if (vfs_getattr(mp
, &va
, vfs_context_current()))
324 if (!VFSATTR_IS_SUPPORTED(&va
, f_uuid
))
327 if(memcmp(va
.f_uuid
, callback_arg
->volume_uuid
, sizeof(uuid_t
)))
331 VFS_IOCTL(mp
, FIODEVICELOCKED
, (void *)(uintptr_t)callback_arg
->state
, 0, vfs_context_kernel());
336 cp_key_store_action(cp_key_store_action_t action
)
338 cp_lock_vfs_callback_arg callback_arg
;
341 case CP_ACTION_LOCKED
:
342 case CP_ACTION_UNLOCKED
:
343 callback_arg
.state
= (action
== CP_ACTION_LOCKED
? CP_LOCKED_STATE
: CP_UNLOCKED_STATE
);
344 memset(callback_arg
.volume_uuid
, 0, sizeof(uuid_t
));
345 callback_arg
.valid_uuid
= 0;
346 return vfs_iterate(0, cp_lock_vfs_callback
, (void *)&callback_arg
);
353 cp_key_store_action_for_volume(uuid_t volume_uuid
, cp_key_store_action_t action
)
355 cp_lock_vfs_callback_arg callback_arg
;
358 case CP_ACTION_LOCKED
:
359 case CP_ACTION_UNLOCKED
:
360 callback_arg
.state
= (action
== CP_ACTION_LOCKED
? CP_LOCKED_STATE
: CP_UNLOCKED_STATE
);
361 memcpy(callback_arg
.volume_uuid
, volume_uuid
, sizeof(uuid_t
));
362 callback_arg
.valid_uuid
= 1;
363 return vfs_iterate(0, cp_lock_vfs_callback
, (void *)&callback_arg
);
370 cp_is_valid_class(int isdir
, int32_t protectionclass
)
373 * The valid protection classes are from 0 -> N
374 * We use a signed argument to detect unassigned values from
375 * directory entry creation time in HFS.
378 /* Directories are not allowed to have F, but they can have "NONE" */
379 return ((protectionclass
>= PROTECTION_CLASS_DIR_NONE
) &&
380 (protectionclass
<= PROTECTION_CLASS_D
));
383 return ((protectionclass
>= PROTECTION_CLASS_A
) &&
384 (protectionclass
<= PROTECTION_CLASS_F
));
389 * Parses versions of the form 12A316, i.e. <major><minor><revision> and
390 * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
391 * bb = <ASCII char>, cccc = <revision>.
393 static cp_key_os_version_t
394 parse_os_version(const char *vers
)
396 const char *p
= vers
;
399 while (*p
>= '0' && *p
<= '9') {
400 a
= a
* 10 + *p
- '0';
412 while (*p
>= '0' && *p
<= '9') {
413 c
= c
* 10 + *p
- '0';
420 return (a
& 0xff) << 24 | b
<< 16 | (c
& 0xffff);
426 static cp_key_os_version_t cp_os_version
;
429 return cp_os_version
;
434 cp_os_version
= parse_os_version(osversion
);
435 if (!cp_os_version
) {
436 printf("cp_os_version: unable to parse osversion `%s'\n", osversion
);
440 return cp_os_version
;