2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cprotect.h>
30 #include <sys/malloc.h>
31 #include <sys/mount_internal.h>
32 #include <sys/filio.h>
33 #include <sys/content_protection.h>
34 #include <libkern/crypto/sha1.h>
35 #include <libkern/libkern.h>
36 //for write protection
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
40 #define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset))
45 * This structure contains the unwrapped key and is passed to the lower layers.
46 * It is private so users must use the accessors declared in sys/cprotect.h
51 typedef uint32_t cpx_flags_t
;
53 CPX_SEP_WRAPPEDKEY
= 0x01,
54 CPX_IV_AES_CTX_INITIALIZED
= 0x02,
55 CPX_USE_OFFSET_FOR_IV
= 0x04,
57 // Using AES IV context generated from key
58 CPX_IV_AES_CTX_VFS
= 0x08,
59 CPX_SYNTHETIC_OFFSET_FOR_IV
= 0x10,
60 CPX_COMPOSITEKEY
= 0x20,
62 //write page protection
63 CPX_WRITE_PROTECTABLE
= 0x40
70 aes_encrypt_ctx cpx_iv_aes_ctx
; // Context used for generating the IV
71 cpx_flags_t cpx_flags
;
72 uint16_t cpx_max_key_len
;
74 uint8_t cpx_cached_key
[];
77 // -- cpx_t accessors --
80 cpx_size(size_t key_size
)
82 size_t size
= sizeof(struct cpx
) + key_size
;
85 size
+= 4; // Extra for magic
92 cpx_sizex(const struct cpx
*cpx
)
94 return cpx_size(cpx
->cpx_max_key_len
);
98 cpx_alloc(size_t key_len
)
102 #if CONFIG_KEYPAGE_WP
104 * Macs only use 1 key per volume, so force it into its own page.
105 * This way, we can write-protect as needed.
107 size_t cpsize
= cpx_size(key_len
);
108 if (cpsize
< PAGE_SIZE
) {
110 * Don't use MALLOC to allocate the page-sized structure. Instead,
111 * use kmem_alloc to bypass KASAN since we are supplying our own
112 * unilateral write protection on this page. Note that kmem_alloc
115 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&cpx
, PAGE_SIZE
, VM_KERN_MEMORY_FILE
)) {
117 * returning NULL at this point (due to failed allocation) would just
118 * result in a panic. fall back to attempting a normal MALLOC, and don't
119 * let the cpx get marked PROTECTABLE.
121 MALLOC(cpx
, cpx_t
, cpx_size(key_len
), M_TEMP
, M_WAITOK
);
123 //mark the page as protectable, since kmem_alloc succeeded.
124 cpx
->cpx_flags
|= CPX_WRITE_PROTECTABLE
;
127 panic("cpx_size too large ! (%lu)", cpsize
);
130 /* If key page write protection disabled, just switch to kernel MALLOC */
131 MALLOC(cpx
, cpx_t
, cpx_size(key_len
), M_TEMP
, M_WAITOK
);
133 cpx_init(cpx
, key_len
);
138 /* this is really a void function */
140 cpx_writeprotect(cpx_t cpx
)
142 #if CONFIG_KEYPAGE_WP
143 void *cpxstart
= (void*)cpx
;
144 void *cpxend
= (void*)((uint8_t*)cpx
+ PAGE_SIZE
);
145 if (cpx
->cpx_flags
& CPX_WRITE_PROTECTABLE
) {
146 vm_map_protect(kernel_map
, (vm_map_offset_t
)cpxstart
, (vm_map_offset_t
)cpxend
, (VM_PROT_READ
), FALSE
);
155 static const uint32_t cpx_magic1
= 0x7b787063; // cpx{
156 static const uint32_t cpx_magic2
= 0x7870637d; // }cpx
163 assert(cpx
->cpx_magic1
== cpx_magic1
);
164 assert(*PTR_ADD(uint32_t *, cpx
, cpx_sizex(cpx
) - 4) == cpx_magic2
);
167 #if CONFIG_KEYPAGE_WP
168 /* unprotect the page before bzeroing */
169 void *cpxstart
= (void*)cpx
;
170 void *cpxend
= (void*)((uint8_t*)cpx
+ PAGE_SIZE
);
171 if (cpx
->cpx_flags
& CPX_WRITE_PROTECTABLE
) {
172 vm_map_protect(kernel_map
, (vm_map_offset_t
)cpxstart
, (vm_map_offset_t
)cpxend
, (VM_PROT_DEFAULT
), FALSE
);
174 //now zero the memory after un-protecting it
175 bzero(cpx
->cpx_cached_key
, cpx
->cpx_max_key_len
);
177 //If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it.
178 kmem_free(kernel_map
, (vm_offset_t
)cpx
, PAGE_SIZE
);
182 bzero(cpx
->cpx_cached_key
, cpx
->cpx_max_key_len
);
189 cpx_init(cpx_t cpx
, size_t key_len
)
192 cpx
->cpx_magic1
= cpx_magic1
;
193 *PTR_ADD(uint32_t *, cpx
, cpx_size(key_len
) - 4) = cpx_magic2
;
196 cpx
->cpx_key_len
= 0;
197 cpx
->cpx_max_key_len
= key_len
;
201 cpx_is_sep_wrapped_key(const struct cpx
*cpx
)
203 return ISSET(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
207 cpx_set_is_sep_wrapped_key(struct cpx
*cpx
, bool v
)
210 SET(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
212 CLR(cpx
->cpx_flags
, CPX_SEP_WRAPPEDKEY
);
217 cpx_is_composite_key(const struct cpx
*cpx
)
219 return ISSET(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
223 cpx_set_is_composite_key(struct cpx
*cpx
, bool v
)
226 SET(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
228 CLR(cpx
->cpx_flags
, CPX_COMPOSITEKEY
);
233 cpx_use_offset_for_iv(const struct cpx
*cpx
)
235 return ISSET(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
239 cpx_set_use_offset_for_iv(struct cpx
*cpx
, bool v
)
242 SET(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
244 CLR(cpx
->cpx_flags
, CPX_USE_OFFSET_FOR_IV
);
249 cpx_synthetic_offset_for_iv(const struct cpx
*cpx
)
251 return ISSET(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
255 cpx_set_synthetic_offset_for_iv(struct cpx
*cpx
, bool v
)
258 SET(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
260 CLR(cpx
->cpx_flags
, CPX_SYNTHETIC_OFFSET_FOR_IV
);
265 cpx_max_key_len(const struct cpx
*cpx
)
267 return cpx
->cpx_max_key_len
;
271 cpx_key_len(const struct cpx
*cpx
)
273 return cpx
->cpx_key_len
;
277 cpx_set_key_len(struct cpx
*cpx
, uint16_t key_len
)
279 cpx
->cpx_key_len
= key_len
;
281 if (ISSET(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
)) {
283 * We assume that if the key length is being modified, the key
284 * has changed. As a result, un-set any bits related to the
285 * AES context, if needed. They should be re-generated
288 CLR(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
| CPX_IV_AES_CTX_VFS
);
293 cpx_has_key(const struct cpx
*cpx
)
295 return cpx
->cpx_key_len
> 0;
298 #pragma clang diagnostic push
299 #pragma clang diagnostic ignored "-Wcast-qual"
301 cpx_key(const struct cpx
*cpx
)
303 return (void *)cpx
->cpx_cached_key
;
305 #pragma clang diagnostic pop
308 cpx_set_aes_iv_key(struct cpx
*cpx
, void *iv_key
)
310 aes_encrypt_key128(iv_key
, &cpx
->cpx_iv_aes_ctx
);
311 SET(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
| CPX_USE_OFFSET_FOR_IV
);
312 CLR(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
);
316 cpx_iv_aes_ctx(struct cpx
*cpx
)
318 if (ISSET(cpx
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
)) {
319 return &cpx
->cpx_iv_aes_ctx
;
323 uint8_t digest
[SHA_DIGEST_LENGTH
]; /* Kiv */
325 /* First init the cp_cache_iv_key[] */
329 * We can only use this when the keys are generated in the AP; As a result
330 * we only use the first 32 bytes of key length in the cache key
332 SHA1Update(&sha1ctxt
, cpx
->cpx_cached_key
, cpx
->cpx_key_len
);
333 SHA1Final(digest
, &sha1ctxt
);
335 cpx_set_aes_iv_key(cpx
, digest
);
336 SET(cpx
->cpx_flags
, CPX_IV_AES_CTX_VFS
);
338 return &cpx
->cpx_iv_aes_ctx
;
344 bzero(cpx
->cpx_cached_key
, cpx
->cpx_max_key_len
);
345 bzero(&cpx
->cpx_iv_aes_ctx
, sizeof(cpx
->cpx_iv_aes_ctx
));
347 cpx
->cpx_key_len
= 0;
351 cpx_can_copy(const struct cpx
*src
, const struct cpx
*dst
)
353 return src
->cpx_key_len
<= dst
->cpx_max_key_len
;
357 cpx_copy(const struct cpx
*src
, cpx_t dst
)
359 uint16_t key_len
= cpx_key_len(src
);
360 cpx_set_key_len(dst
, key_len
);
361 memcpy(cpx_key(dst
), cpx_key(src
), key_len
);
362 dst
->cpx_flags
= src
->cpx_flags
;
363 if (ISSET(dst
->cpx_flags
, CPX_IV_AES_CTX_INITIALIZED
)) {
364 dst
->cpx_iv_aes_ctx
= src
->cpx_iv_aes_ctx
;
369 cp_lock_state_t state
;
372 } cp_lock_vfs_callback_arg
;
375 cp_lock_vfs_callback(mount_t mp
, void *arg
)
377 cp_lock_vfs_callback_arg
*callback_arg
= (cp_lock_vfs_callback_arg
*)arg
;
379 if (callback_arg
->valid_uuid
) {
382 VFSATTR_WANTED(&va
, f_uuid
);
384 if (vfs_getattr(mp
, &va
, vfs_context_current())) {
388 if (!VFSATTR_IS_SUPPORTED(&va
, f_uuid
)) {
392 if (memcmp(va
.f_uuid
, callback_arg
->volume_uuid
, sizeof(uuid_t
))) {
397 VFS_IOCTL(mp
, FIODEVICELOCKED
, (void *)(uintptr_t)callback_arg
->state
, 0, vfs_context_kernel());
402 cp_key_store_action(cp_key_store_action_t action
)
404 cp_lock_vfs_callback_arg callback_arg
;
407 case CP_ACTION_LOCKED
:
408 case CP_ACTION_UNLOCKED
:
409 callback_arg
.state
= (action
== CP_ACTION_LOCKED
? CP_LOCKED_STATE
: CP_UNLOCKED_STATE
);
410 memset(callback_arg
.volume_uuid
, 0, sizeof(uuid_t
));
411 callback_arg
.valid_uuid
= 0;
412 return vfs_iterate(0, cp_lock_vfs_callback
, (void *)&callback_arg
);
419 cp_key_store_action_for_volume(uuid_t volume_uuid
, cp_key_store_action_t action
)
421 cp_lock_vfs_callback_arg callback_arg
;
424 case CP_ACTION_LOCKED
:
425 case CP_ACTION_UNLOCKED
:
426 callback_arg
.state
= (action
== CP_ACTION_LOCKED
? CP_LOCKED_STATE
: CP_UNLOCKED_STATE
);
427 memcpy(callback_arg
.volume_uuid
, volume_uuid
, sizeof(uuid_t
));
428 callback_arg
.valid_uuid
= 1;
429 return vfs_iterate(0, cp_lock_vfs_callback
, (void *)&callback_arg
);
436 cp_is_valid_class(int isdir
, int32_t protectionclass
)
439 * The valid protection classes are from 0 -> N
440 * We use a signed argument to detect unassigned values from
441 * directory entry creation time in HFS.
444 /* Directories are not allowed to have F, but they can have "NONE" */
445 return (protectionclass
>= PROTECTION_CLASS_DIR_NONE
) &&
446 (protectionclass
<= PROTECTION_CLASS_D
);
448 return (protectionclass
>= PROTECTION_CLASS_A
) &&
449 (protectionclass
<= PROTECTION_CLASS_F
);
454 * Parses versions of the form 12A316, i.e. <major><minor><revision> and
455 * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
456 * bb = <ASCII char>, cccc = <revision>.
458 static cp_key_os_version_t
459 parse_os_version(const char *vers
)
461 const char *p
= vers
;
464 while (*p
>= '0' && *p
<= '9') {
465 a
= a
* 10 + *p
- '0';
479 while (*p
>= '0' && *p
<= '9') {
480 c
= c
* 10 + *p
- '0';
488 return (a
& 0xff) << 24 | b
<< 16 | (c
& 0xffff);
494 static cp_key_os_version_t cp_os_version
;
497 return cp_os_version
;
504 cp_os_version
= parse_os_version(osversion
);
505 if (!cp_os_version
) {
506 printf("cp_os_version: unable to parse osversion `%s'\n", osversion
);
510 return cp_os_version
;