2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
64 * APPLE PROTECT MEMORY PAGER
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj
);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj
);
85 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj
,
86 memory_object_control_t control
,
87 memory_object_cluster_size_t pg_size
);
88 kern_return_t
apple_protect_pager_terminate(memory_object_t mem_obj
);
89 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj
,
90 memory_object_offset_t offset
,
91 memory_object_cluster_size_t length
,
92 vm_prot_t protection_required
,
93 memory_object_fault_info_t fault_info
);
94 kern_return_t
apple_protect_pager_data_return(memory_object_t mem_obj
,
95 memory_object_offset_t offset
,
96 memory_object_cluster_size_t data_cnt
,
97 memory_object_offset_t
*resid_offset
,
100 boolean_t kernel_copy
,
102 kern_return_t
apple_protect_pager_data_initialize(memory_object_t mem_obj
,
103 memory_object_offset_t offset
,
104 memory_object_cluster_size_t data_cnt
);
105 kern_return_t
apple_protect_pager_data_unlock(memory_object_t mem_obj
,
106 memory_object_offset_t offset
,
107 memory_object_size_t size
,
108 vm_prot_t desired_access
);
109 kern_return_t
apple_protect_pager_synchronize(memory_object_t mem_obj
,
110 memory_object_offset_t offset
,
111 memory_object_size_t length
,
112 vm_sync_t sync_flags
);
113 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj
,
115 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj
);
117 #define CRYPT_INFO_DEBUG 0
118 void crypt_info_reference(struct pager_crypt_info
*crypt_info
);
119 void crypt_info_deallocate(struct pager_crypt_info
*crypt_info
);
122 * Vector of VM operations for this EMM.
123 * These routines are invoked by VM via the memory_object_*() interfaces.
125 const struct memory_object_pager_ops apple_protect_pager_ops
= {
126 .memory_object_reference
= apple_protect_pager_reference
,
127 .memory_object_deallocate
= apple_protect_pager_deallocate
,
128 .memory_object_init
= apple_protect_pager_init
,
129 .memory_object_terminate
= apple_protect_pager_terminate
,
130 .memory_object_data_request
= apple_protect_pager_data_request
,
131 .memory_object_data_return
= apple_protect_pager_data_return
,
132 .memory_object_data_initialize
= apple_protect_pager_data_initialize
,
133 .memory_object_data_unlock
= apple_protect_pager_data_unlock
,
134 .memory_object_synchronize
= apple_protect_pager_synchronize
,
135 .memory_object_map
= apple_protect_pager_map
,
136 .memory_object_last_unmap
= apple_protect_pager_last_unmap
,
137 .memory_object_data_reclaim
= NULL
,
138 .memory_object_pager_name
= "apple_protect"
142 * The "apple_protect_pager" describes a memory object backed by
143 * the "apple protect" EMM.
145 typedef struct apple_protect_pager
{
146 /* mandatory generic header */
147 struct memory_object ap_pgr_hdr
;
149 /* pager-specific data */
150 queue_chain_t pager_queue
; /* next & prev pagers */
151 struct os_refcnt ref_count
; /* reference count */
152 boolean_t is_ready
; /* is this pager ready ? */
153 boolean_t is_mapped
; /* is this mem_obj mapped ? */
154 vm_object_t backing_object
; /* VM obj w/ encrypted data */
155 vm_object_offset_t backing_offset
;
156 vm_object_offset_t crypto_backing_offset
; /* for key... */
157 vm_object_offset_t crypto_start
;
158 vm_object_offset_t crypto_end
;
159 struct pager_crypt_info
*crypt_info
;
160 } *apple_protect_pager_t
;
161 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
164 * List of memory objects managed by this EMM.
165 * The list is protected by the "apple_protect_pager_lock" lock.
167 int apple_protect_pager_count
= 0; /* number of pagers */
168 int apple_protect_pager_count_mapped
= 0; /* number of unmapped pagers */
169 queue_head_t apple_protect_pager_queue
= QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue
);
170 LCK_GRP_DECLARE(apple_protect_pager_lck_grp
, "apple_protect");
171 LCK_MTX_DECLARE(apple_protect_pager_lock
, &apple_protect_pager_lck_grp
);
174 * Maximum number of unmapped pagers we're willing to keep around.
176 int apple_protect_pager_cache_limit
= 20;
179 * Statistics & counters.
181 int apple_protect_pager_count_max
= 0;
182 int apple_protect_pager_count_unmapped_max
= 0;
183 int apple_protect_pager_num_trim_max
= 0;
184 int apple_protect_pager_num_trim_total
= 0;
188 /* internal prototypes */
189 apple_protect_pager_t
apple_protect_pager_create(
190 vm_object_t backing_object
,
191 vm_object_offset_t backing_offset
,
192 vm_object_offset_t crypto_backing_offset
,
193 struct pager_crypt_info
*crypt_info
,
194 vm_object_offset_t crypto_start
,
195 vm_object_offset_t crypto_end
);
196 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj
);
197 void apple_protect_pager_dequeue(apple_protect_pager_t pager
);
198 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager
,
200 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager
);
201 void apple_protect_pager_trim(void);
205 int apple_protect_pagerdebug
= 0;
206 #define PAGER_ALL 0xffffffff
207 #define PAGER_INIT 0x00000001
208 #define PAGER_PAGEIN 0x00000002
210 #define PAGER_DEBUG(LEVEL, A) \
212 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
217 #define PAGER_DEBUG(LEVEL, A)
221 * apple_protect_pager_init()
223 * Initialize the memory object and makes it ready to be used and mapped.
226 apple_protect_pager_init(
227 memory_object_t mem_obj
,
228 memory_object_control_t control
,
232 memory_object_cluster_size_t pg_size
)
234 apple_protect_pager_t pager
;
236 memory_object_attr_info_data_t attributes
;
238 PAGER_DEBUG(PAGER_ALL
,
239 ("apple_protect_pager_init: %p, %p, %x\n",
240 mem_obj
, control
, pg_size
));
242 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
243 return KERN_INVALID_ARGUMENT
;
246 pager
= apple_protect_pager_lookup(mem_obj
);
248 memory_object_control_reference(control
);
250 pager
->ap_pgr_hdr
.mo_control
= control
;
252 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
253 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
254 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
255 attributes
.may_cache_object
= FALSE
;
256 attributes
.temporary
= TRUE
;
258 kr
= memory_object_change_attributes(
260 MEMORY_OBJECT_ATTRIBUTE_INFO
,
261 (memory_object_info_t
) &attributes
,
262 MEMORY_OBJECT_ATTR_INFO_COUNT
);
263 if (kr
!= KERN_SUCCESS
) {
264 panic("apple_protect_pager_init: "
265 "memory_object_change_attributes() failed");
268 #if CONFIG_SECLUDED_MEMORY
269 if (secluded_for_filecache
) {
270 memory_object_mark_eligible_for_secluded(control
, TRUE
);
272 #endif /* CONFIG_SECLUDED_MEMORY */
278 * apple_protect_data_return()
280 * Handles page-out requests from VM. This should never happen since
281 * the pages provided by this EMM are not supposed to be dirty or dirtied
282 * and VM should simply discard the contents and reclaim the pages if it
286 apple_protect_pager_data_return(
287 __unused memory_object_t mem_obj
,
288 __unused memory_object_offset_t offset
,
289 __unused memory_object_cluster_size_t data_cnt
,
290 __unused memory_object_offset_t
*resid_offset
,
291 __unused
int *io_error
,
292 __unused boolean_t dirty
,
293 __unused boolean_t kernel_copy
,
294 __unused
int upl_flags
)
296 panic("apple_protect_pager_data_return: should never get called");
301 apple_protect_pager_data_initialize(
302 __unused memory_object_t mem_obj
,
303 __unused memory_object_offset_t offset
,
304 __unused memory_object_cluster_size_t data_cnt
)
306 panic("apple_protect_pager_data_initialize: should never get called");
311 apple_protect_pager_data_unlock(
312 __unused memory_object_t mem_obj
,
313 __unused memory_object_offset_t offset
,
314 __unused memory_object_size_t size
,
315 __unused vm_prot_t desired_access
)
321 * apple_protect_pager_data_request()
323 * Handles page-in requests from VM.
325 int apple_protect_pager_data_request_debug
= 0;
327 apple_protect_pager_data_request(
328 memory_object_t mem_obj
,
329 memory_object_offset_t offset
,
330 memory_object_cluster_size_t length
,
334 vm_prot_t protection_required
,
335 memory_object_fault_info_t mo_fault_info
)
337 apple_protect_pager_t pager
;
338 memory_object_control_t mo_control
;
342 upl_page_info_t
*upl_pl
;
343 unsigned int pl_count
;
344 vm_object_t src_top_object
, src_page_object
, dst_object
;
345 kern_return_t kr
, retval
;
346 vm_offset_t src_vaddr
, dst_vaddr
;
347 vm_offset_t cur_offset
;
348 vm_offset_t offset_in_page
;
349 kern_return_t error_code
;
351 vm_page_t src_page
, top_page
;
353 struct vm_object_fault_info fault_info
;
356 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj
, offset
, length
, protection_required
));
358 retval
= KERN_SUCCESS
;
359 src_top_object
= VM_OBJECT_NULL
;
360 src_page_object
= VM_OBJECT_NULL
;
363 fault_info
= *((struct vm_object_fault_info
*)(uintptr_t)mo_fault_info
);
364 fault_info
.stealth
= TRUE
;
365 fault_info
.io_sync
= FALSE
;
366 fault_info
.mark_zf_absent
= FALSE
;
367 fault_info
.batch_pmap_op
= FALSE
;
368 interruptible
= fault_info
.interruptible
;
370 pager
= apple_protect_pager_lookup(mem_obj
);
371 assert(pager
->is_ready
);
372 assert(os_ref_get_count(&pager
->ref_count
) > 1); /* pager is alive and mapped */
374 PAGER_DEBUG(PAGER_PAGEIN
, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj
, offset
, length
, protection_required
, pager
));
376 fault_info
.lo_offset
+= pager
->backing_offset
;
377 fault_info
.hi_offset
+= pager
->backing_offset
;
380 * Gather in a UPL all the VM pages requested by VM.
382 mo_control
= pager
->ap_pgr_hdr
.mo_control
;
386 UPL_RET_ONLY_ABSENT
|
389 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
392 kr
= memory_object_upl_request(mo_control
,
394 &upl
, NULL
, NULL
, upl_flags
, VM_KERN_MEMORY_SECURITY
);
395 if (kr
!= KERN_SUCCESS
) {
399 dst_object
= mo_control
->moc_object
;
400 assert(dst_object
!= VM_OBJECT_NULL
);
403 * We'll map the encrypted data in the kernel address space from the
404 * backing VM object (itself backed by the encrypted file via
407 src_top_object
= pager
->backing_object
;
408 assert(src_top_object
!= VM_OBJECT_NULL
);
409 vm_object_reference(src_top_object
); /* keep the source object alive */
412 * Fill in the contents of the pages requested by VM.
414 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
415 pl_count
= length
/ PAGE_SIZE
;
417 retval
== KERN_SUCCESS
&& cur_offset
< length
;
418 cur_offset
+= PAGE_SIZE
) {
421 if (!upl_page_present(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
))) {
422 /* this page is not in the UPL: skip it */
427 * Map the source (encrypted) page in the kernel's
428 * virtual address space.
429 * We already hold a reference on the src_top_object.
432 vm_object_lock(src_top_object
);
433 vm_object_paging_begin(src_top_object
);
436 src_page
= VM_PAGE_NULL
;
437 kr
= vm_fault_page(src_top_object
,
438 pager
->backing_offset
+ offset
+ cur_offset
,
441 FALSE
, /* src_page not looked up */
451 case VM_FAULT_SUCCESS
:
454 goto retry_src_fault
;
455 case VM_FAULT_MEMORY_SHORTAGE
:
456 if (vm_page_wait(interruptible
)) {
457 goto retry_src_fault
;
460 case VM_FAULT_INTERRUPTED
:
461 retval
= MACH_SEND_INTERRUPTED
;
463 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
464 /* success but no VM page: fail */
465 vm_object_paging_end(src_top_object
);
466 vm_object_unlock(src_top_object
);
468 case VM_FAULT_MEMORY_ERROR
:
469 /* the page is not there ! */
473 retval
= KERN_MEMORY_ERROR
;
477 panic("apple_protect_pager_data_request: "
478 "vm_fault_page() unexpected error 0x%x\n",
481 assert(src_page
!= VM_PAGE_NULL
);
482 assert(src_page
->vmp_busy
);
484 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
485 vm_page_lockspin_queues();
487 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
488 vm_page_speculate(src_page
, FALSE
);
490 vm_page_unlock_queues();
494 * Establish pointers to the source
495 * and destination physical pages.
498 upl_phys_page(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
));
499 assert(dst_pnum
!= 0);
501 src_vaddr
= (vm_map_offset_t
)
502 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(src_page
)
504 dst_vaddr
= (vm_map_offset_t
)
505 phystokv((pmap_paddr_t
)dst_pnum
<< PAGE_SHIFT
);
507 src_page_object
= VM_PAGE_OBJECT(src_page
);
510 * Validate the original page...
512 if (src_page_object
->code_signed
) {
513 vm_page_validate_cs_mapped(
514 src_page
, PAGE_SIZE
, 0,
515 (const void *) src_vaddr
);
518 * ... and transfer the results to the destination page.
520 UPL_SET_CS_VALIDATED(upl_pl
, cur_offset
/ PAGE_SIZE
,
521 src_page
->vmp_cs_validated
);
522 UPL_SET_CS_TAINTED(upl_pl
, cur_offset
/ PAGE_SIZE
,
523 src_page
->vmp_cs_tainted
);
524 UPL_SET_CS_NX(upl_pl
, cur_offset
/ PAGE_SIZE
,
525 src_page
->vmp_cs_nx
);
528 * page_decrypt() might access a mapped file, so let's release
529 * the object lock for the source page to avoid a potential
530 * deadlock. The source page is kept busy and we have a
531 * "paging_in_progress" reference on its object, so it's safe
532 * to unlock the object here.
534 assert(src_page
->vmp_busy
);
535 assert(src_page_object
->paging_in_progress
> 0);
536 vm_object_unlock(src_page_object
);
539 * Decrypt the encrypted contents of the source page
540 * into the destination page.
542 for (offset_in_page
= 0;
543 offset_in_page
< PAGE_SIZE
;
544 offset_in_page
+= 4096) {
545 if (offset
+ cur_offset
+ offset_in_page
<
546 pager
->crypto_start
||
547 offset
+ cur_offset
+ offset_in_page
>=
549 /* not encrypted: just copy */
550 bcopy((const char *)(src_vaddr
+
552 (char *)(dst_vaddr
+ offset_in_page
),
555 if (apple_protect_pager_data_request_debug
) {
556 printf("apple_protect_data_request"
557 "(%p,0x%llx+0x%llx+0x%04llx): "
558 "out of crypto range "
560 "COPY [0x%016llx 0x%016llx] "
567 (uint64_t) cur_offset
,
568 (uint64_t) offset_in_page
,
571 *(uint64_t *)(dst_vaddr
+
573 *(uint64_t *)(dst_vaddr
+
575 src_page_object
->code_signed
,
576 src_page
->vmp_cs_validated
,
577 src_page
->vmp_cs_tainted
,
578 src_page
->vmp_cs_nx
);
583 ret
= pager
->crypt_info
->page_decrypt(
584 (const void *)(src_vaddr
+ offset_in_page
),
585 (void *)(dst_vaddr
+ offset_in_page
),
586 ((pager
->crypto_backing_offset
-
587 pager
->crypto_start
) + /* XXX ? */
591 pager
->crypt_info
->crypt_ops
);
593 if (apple_protect_pager_data_request_debug
) {
594 printf("apple_protect_data_request"
595 "(%p,0x%llx+0x%llx+0x%04llx): "
596 "in crypto range [0x%llx:0x%llx]: "
597 "DECRYPT offset 0x%llx="
598 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
599 "[0x%016llx 0x%016llx] "
607 (uint64_t) cur_offset
,
608 (uint64_t) offset_in_page
,
609 pager
->crypto_start
, pager
->crypto_end
,
610 ((pager
->crypto_backing_offset
-
611 pager
->crypto_start
) +
615 pager
->crypto_backing_offset
,
618 (uint64_t) cur_offset
,
619 (uint64_t) offset_in_page
,
620 *(uint64_t *)(dst_vaddr
+ offset_in_page
),
621 *(uint64_t *)(dst_vaddr
+ offset_in_page
+ 8),
622 src_page_object
->code_signed
,
623 src_page
->vmp_cs_validated
,
624 src_page
->vmp_cs_tainted
,
634 * Decryption failed. Abort the fault.
636 retval
= KERN_ABORTED
;
639 assert(VM_PAGE_OBJECT(src_page
) == src_page_object
);
640 assert(src_page
->vmp_busy
);
641 assert(src_page_object
->paging_in_progress
> 0);
642 vm_object_lock(src_page_object
);
645 * Cleanup the result of vm_fault_page() of the source page.
647 PAGE_WAKEUP_DONE(src_page
);
648 src_page
= VM_PAGE_NULL
;
649 vm_object_paging_end(src_page_object
);
650 vm_object_unlock(src_page_object
);
652 if (top_page
!= VM_PAGE_NULL
) {
653 assert(VM_PAGE_OBJECT(top_page
) == src_top_object
);
654 vm_object_lock(src_top_object
);
655 VM_PAGE_FREE(top_page
);
656 vm_object_paging_end(src_top_object
);
657 vm_object_unlock(src_top_object
);
663 /* clean up the UPL */
666 * The pages are currently dirty because we've just been
667 * writing on them, but as far as we're concerned, they're
668 * clean since they contain their "original" contents as
669 * provided by us, the pager.
670 * Tell the UPL to mark them "clean".
672 upl_clear_dirty(upl
, TRUE
);
674 /* abort or commit the UPL */
675 if (retval
!= KERN_SUCCESS
) {
677 if (retval
== KERN_ABORTED
) {
678 wait_result_t wait_result
;
681 * We aborted the fault and did not provide
682 * any contents for the requested pages but
683 * the pages themselves are not invalid, so
684 * let's return success and let the caller
685 * retry the fault, in case it might succeed
686 * later (when the decryption code is up and
687 * running in the kernel, for example).
689 retval
= KERN_SUCCESS
;
691 * Wait a little bit first to avoid using
692 * too much CPU time retrying and failing
693 * the same fault over and over again.
695 wait_result
= assert_wait_timeout(
696 (event_t
) apple_protect_pager_data_request
,
700 assert(wait_result
== THREAD_WAITING
);
701 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
702 assert(wait_result
== THREAD_TIMED_OUT
);
706 assertf(page_aligned(upl
->u_offset
) && page_aligned(upl
->u_size
),
707 "upl %p offset 0x%llx size 0x%x",
708 upl
, upl
->u_offset
, upl
->u_size
);
709 upl_commit_range(upl
, 0, upl
->u_size
,
710 UPL_COMMIT_CS_VALIDATED
| UPL_COMMIT_WRITTEN_BY_KERNEL
,
711 upl_pl
, pl_count
, &empty
);
714 /* and deallocate the UPL */
718 if (src_top_object
!= VM_OBJECT_NULL
) {
719 vm_object_deallocate(src_top_object
);
725 * apple_protect_pager_reference()
727 * Get a reference on this memory object.
728 * For external usage only. Assumes that the initial reference count is not 0,
729 * i.e one should not "revive" a dead pager this way.
732 apple_protect_pager_reference(
733 memory_object_t mem_obj
)
735 apple_protect_pager_t pager
;
737 pager
= apple_protect_pager_lookup(mem_obj
);
739 lck_mtx_lock(&apple_protect_pager_lock
);
740 os_ref_retain_locked(&pager
->ref_count
);
741 lck_mtx_unlock(&apple_protect_pager_lock
);
746 * apple_protect_pager_dequeue:
748 * Removes a pager from the list of pagers.
750 * The caller must hold "apple_protect_pager_lock".
753 apple_protect_pager_dequeue(
754 apple_protect_pager_t pager
)
756 assert(!pager
->is_mapped
);
758 queue_remove(&apple_protect_pager_queue
,
760 apple_protect_pager_t
,
762 pager
->pager_queue
.next
= NULL
;
763 pager
->pager_queue
.prev
= NULL
;
765 apple_protect_pager_count
--;
769 * apple_protect_pager_terminate_internal:
771 * Trigger the asynchronous termination of the memory object associated
773 * When the memory object is terminated, there will be one more call
774 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
775 * to finish the clean up.
777 * "apple_protect_pager_lock" should not be held by the caller.
778 * We don't need the lock because the pager has already been removed from
779 * the pagers' list and is now ours exclusively.
782 apple_protect_pager_terminate_internal(
783 apple_protect_pager_t pager
)
785 assert(pager
->is_ready
);
786 assert(!pager
->is_mapped
);
788 if (pager
->backing_object
!= VM_OBJECT_NULL
) {
789 vm_object_deallocate(pager
->backing_object
);
790 pager
->backing_object
= VM_OBJECT_NULL
;
793 /* one less pager using this "pager_crypt_info" */
795 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
798 pager
->crypt_info
->crypt_refcnt
);
799 #endif /* CRYPT_INFO_DEBUG */
800 crypt_info_deallocate(pager
->crypt_info
);
801 pager
->crypt_info
= NULL
;
803 /* trigger the destruction of the memory object */
804 memory_object_destroy(pager
->ap_pgr_hdr
.mo_control
, 0);
808 * apple_protect_pager_deallocate_internal()
810 * Release a reference on this pager and free it when the last
811 * reference goes away.
812 * Can be called with apple_protect_pager_lock held or not but always returns
816 apple_protect_pager_deallocate_internal(
817 apple_protect_pager_t pager
,
820 boolean_t needs_trimming
;
824 lck_mtx_lock(&apple_protect_pager_lock
);
827 count_unmapped
= (apple_protect_pager_count
-
828 apple_protect_pager_count_mapped
);
829 if (count_unmapped
> apple_protect_pager_cache_limit
) {
830 /* we have too many unmapped pagers: trim some */
831 needs_trimming
= TRUE
;
833 needs_trimming
= FALSE
;
836 /* drop a reference on this pager */
837 os_ref_count_t ref_count
= os_ref_release_locked(&pager
->ref_count
);
839 if (ref_count
== 1) {
841 * Only the "named" reference is left, which means that
842 * no one is really holding on to this pager anymore.
845 apple_protect_pager_dequeue(pager
);
846 /* the pager is all ours: no need for the lock now */
847 lck_mtx_unlock(&apple_protect_pager_lock
);
848 apple_protect_pager_terminate_internal(pager
);
849 } else if (ref_count
== 0) {
851 * Dropped the existence reference; the memory object has
852 * been terminated. Do some final cleanup and release the
855 lck_mtx_unlock(&apple_protect_pager_lock
);
856 if (pager
->ap_pgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
857 memory_object_control_deallocate(pager
->ap_pgr_hdr
.mo_control
);
858 pager
->ap_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
860 kfree(pager
, sizeof(*pager
));
861 pager
= APPLE_PROTECT_PAGER_NULL
;
863 /* there are still plenty of references: keep going... */
864 lck_mtx_unlock(&apple_protect_pager_lock
);
867 if (needs_trimming
) {
868 apple_protect_pager_trim();
870 /* caution: lock is not held on return... */
874 * apple_protect_pager_deallocate()
876 * Release a reference on this pager and free it when the last
877 * reference goes away.
880 apple_protect_pager_deallocate(
881 memory_object_t mem_obj
)
883 apple_protect_pager_t pager
;
885 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_deallocate: %p\n", mem_obj
));
886 pager
= apple_protect_pager_lookup(mem_obj
);
887 apple_protect_pager_deallocate_internal(pager
, FALSE
);
894 apple_protect_pager_terminate(
898 memory_object_t mem_obj
)
900 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_terminate: %p\n", mem_obj
));
909 apple_protect_pager_synchronize(
910 __unused memory_object_t mem_obj
,
911 __unused memory_object_offset_t offset
,
912 __unused memory_object_size_t length
,
913 __unused vm_sync_t sync_flags
)
915 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
920 * apple_protect_pager_map()
922 * This allows VM to let us, the EMM, know that this memory object
923 * is currently mapped one or more times. This is called by VM each time
924 * the memory object gets mapped and we take one extra reference on the
925 * memory object to account for all its mappings.
928 apple_protect_pager_map(
929 memory_object_t mem_obj
,
930 __unused vm_prot_t prot
)
932 apple_protect_pager_t pager
;
934 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_map: %p\n", mem_obj
));
936 pager
= apple_protect_pager_lookup(mem_obj
);
938 lck_mtx_lock(&apple_protect_pager_lock
);
939 assert(pager
->is_ready
);
940 assert(os_ref_get_count(&pager
->ref_count
) > 0); /* pager is alive */
941 if (pager
->is_mapped
== FALSE
) {
943 * First mapping of this pager: take an extra reference
944 * that will remain until all the mappings of this pager
947 pager
->is_mapped
= TRUE
;
948 os_ref_retain_locked(&pager
->ref_count
);
949 apple_protect_pager_count_mapped
++;
951 lck_mtx_unlock(&apple_protect_pager_lock
);
957 * apple_protect_pager_last_unmap()
959 * This is called by VM when this memory object is no longer mapped anywhere.
962 apple_protect_pager_last_unmap(
963 memory_object_t mem_obj
)
965 apple_protect_pager_t pager
;
968 PAGER_DEBUG(PAGER_ALL
,
969 ("apple_protect_pager_last_unmap: %p\n", mem_obj
));
971 pager
= apple_protect_pager_lookup(mem_obj
);
973 lck_mtx_lock(&apple_protect_pager_lock
);
974 if (pager
->is_mapped
) {
976 * All the mappings are gone, so let go of the one extra
977 * reference that represents all the mappings of this pager.
979 apple_protect_pager_count_mapped
--;
980 count_unmapped
= (apple_protect_pager_count
-
981 apple_protect_pager_count_mapped
);
982 if (count_unmapped
> apple_protect_pager_count_unmapped_max
) {
983 apple_protect_pager_count_unmapped_max
= count_unmapped
;
985 pager
->is_mapped
= FALSE
;
986 apple_protect_pager_deallocate_internal(pager
, TRUE
);
987 /* caution: deallocate_internal() released the lock ! */
989 lck_mtx_unlock(&apple_protect_pager_lock
);
999 apple_protect_pager_t
1000 apple_protect_pager_lookup(
1001 memory_object_t mem_obj
)
1003 apple_protect_pager_t pager
;
1005 assert(mem_obj
->mo_pager_ops
== &apple_protect_pager_ops
);
1006 pager
= (apple_protect_pager_t
)(uintptr_t) mem_obj
;
1007 assert(os_ref_get_count(&pager
->ref_count
) > 0);
1011 apple_protect_pager_t
1012 apple_protect_pager_create(
1013 vm_object_t backing_object
,
1014 vm_object_offset_t backing_offset
,
1015 vm_object_offset_t crypto_backing_offset
,
1016 struct pager_crypt_info
*crypt_info
,
1017 vm_object_offset_t crypto_start
,
1018 vm_object_offset_t crypto_end
)
1020 apple_protect_pager_t pager
, pager2
;
1021 memory_object_control_t control
;
1023 struct pager_crypt_info
*old_crypt_info
;
1025 pager
= (apple_protect_pager_t
) kalloc(sizeof(*pager
));
1026 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
1027 return APPLE_PROTECT_PAGER_NULL
;
1031 * The vm_map call takes both named entry ports and raw memory
1032 * objects in the same parameter. We need to make sure that
1033 * vm_map does not see this object as a named entry port. So,
1034 * we reserve the first word in the object for a fake ip_kotype
1035 * setting - that will tell vm_map to use it as a memory object.
1037 pager
->ap_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
1038 pager
->ap_pgr_hdr
.mo_pager_ops
= &apple_protect_pager_ops
;
1039 pager
->ap_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
1041 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
1042 os_ref_init_count(&pager
->ref_count
, NULL
, 2); /* existence reference (for the cache) and another for the caller */
1043 pager
->is_mapped
= FALSE
;
1044 pager
->backing_object
= backing_object
;
1045 pager
->backing_offset
= backing_offset
;
1046 pager
->crypto_backing_offset
= crypto_backing_offset
;
1047 pager
->crypto_start
= crypto_start
;
1048 pager
->crypto_end
= crypto_end
;
1049 pager
->crypt_info
= crypt_info
; /* allocated by caller */
1051 #if CRYPT_INFO_DEBUG
1052 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1055 crypt_info
->page_decrypt
,
1056 crypt_info
->crypt_end
,
1057 crypt_info
->crypt_ops
,
1058 crypt_info
->crypt_refcnt
);
1059 #endif /* CRYPT_INFO_DEBUG */
1061 vm_object_reference(backing_object
);
1063 old_crypt_info
= NULL
;
1065 lck_mtx_lock(&apple_protect_pager_lock
);
1066 /* see if anyone raced us to create a pager for the same object */
1067 queue_iterate(&apple_protect_pager_queue
,
1069 apple_protect_pager_t
,
1071 if ((pager2
->crypt_info
->page_decrypt
!=
1072 crypt_info
->page_decrypt
) ||
1073 (pager2
->crypt_info
->crypt_end
!=
1074 crypt_info
->crypt_end
) ||
1075 (pager2
->crypt_info
->crypt_ops
!=
1076 crypt_info
->crypt_ops
)) {
1077 /* crypt_info contents do not match: next pager */
1081 /* found a match for crypt_info ... */
1082 if (old_crypt_info
) {
1083 /* ... already switched to that crypt_info */
1084 assert(old_crypt_info
== pager2
->crypt_info
);
1085 } else if (pager2
->crypt_info
!= crypt_info
) {
1086 /* ... switch to that pager's crypt_info */
1087 #if CRYPT_INFO_DEBUG
1088 printf("CRYPT_INFO %s: reference %p ref %d "
1092 pager2
->crypt_info
->crypt_refcnt
);
1093 #endif /* CRYPT_INFO_DEBUG */
1094 old_crypt_info
= pager2
->crypt_info
;
1095 crypt_info_reference(old_crypt_info
);
1096 pager
->crypt_info
= old_crypt_info
;
1099 if (pager2
->backing_object
== backing_object
&&
1100 pager2
->backing_offset
== backing_offset
&&
1101 pager2
->crypto_backing_offset
== crypto_backing_offset
&&
1102 pager2
->crypto_start
== crypto_start
&&
1103 pager2
->crypto_end
== crypto_end
) {
1104 /* full match: use that pager */
1108 if (!queue_end(&apple_protect_pager_queue
,
1109 (queue_entry_t
) pager2
)) {
1110 /* we lost the race, down with the loser... */
1111 lck_mtx_unlock(&apple_protect_pager_lock
);
1112 vm_object_deallocate(pager
->backing_object
);
1113 pager
->backing_object
= VM_OBJECT_NULL
;
1114 #if CRYPT_INFO_DEBUG
1115 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1118 pager
->crypt_info
->crypt_refcnt
);
1119 #endif /* CRYPT_INFO_DEBUG */
1120 crypt_info_deallocate(pager
->crypt_info
);
1121 pager
->crypt_info
= NULL
;
1122 kfree(pager
, sizeof(*pager
));
1123 /* ... and go with the winner */
1125 /* let the winner make sure the pager gets ready */
1129 /* enter new pager at the head of our list of pagers */
1130 queue_enter_first(&apple_protect_pager_queue
,
1132 apple_protect_pager_t
,
1134 apple_protect_pager_count
++;
1135 if (apple_protect_pager_count
> apple_protect_pager_count_max
) {
1136 apple_protect_pager_count_max
= apple_protect_pager_count
;
1138 lck_mtx_unlock(&apple_protect_pager_lock
);
1140 kr
= memory_object_create_named((memory_object_t
) pager
,
1143 assert(kr
== KERN_SUCCESS
);
1145 memory_object_mark_trusted(control
);
1147 lck_mtx_lock(&apple_protect_pager_lock
);
1148 /* the new pager is now ready to be used */
1149 pager
->is_ready
= TRUE
;
1150 lck_mtx_unlock(&apple_protect_pager_lock
);
1152 /* wakeup anyone waiting for this pager to be ready */
1153 thread_wakeup(&pager
->is_ready
);
1155 if (old_crypt_info
!= NULL
&&
1156 old_crypt_info
!= crypt_info
) {
1157 /* we re-used an old crypt_info instead of using our new one */
1158 #if CRYPT_INFO_DEBUG
1159 printf("CRYPT_INFO %s: deallocate %p ref %d "
1160 "(create used old)\n",
1163 crypt_info
->crypt_refcnt
);
1164 #endif /* CRYPT_INFO_DEBUG */
1165 crypt_info_deallocate(crypt_info
);
1173 * apple_protect_pager_setup()
1175 * Provide the caller with a memory object backed by the provided
1176 * "backing_object" VM object. If such a memory object already exists,
1177 * re-use it, otherwise create a new memory object.
1180 apple_protect_pager_setup(
1181 vm_object_t backing_object
,
1182 vm_object_offset_t backing_offset
,
1183 vm_object_offset_t crypto_backing_offset
,
1184 struct pager_crypt_info
*crypt_info
,
1185 vm_object_offset_t crypto_start
,
1186 vm_object_offset_t crypto_end
)
1188 apple_protect_pager_t pager
;
1189 struct pager_crypt_info
*old_crypt_info
, *new_crypt_info
;
1191 #if CRYPT_INFO_DEBUG
1192 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1195 crypt_info
->page_decrypt
,
1196 crypt_info
->crypt_end
,
1197 crypt_info
->crypt_ops
,
1198 crypt_info
->crypt_refcnt
);
1199 #endif /* CRYPT_INFO_DEBUG */
1201 old_crypt_info
= NULL
;
1203 lck_mtx_lock(&apple_protect_pager_lock
);
1205 queue_iterate(&apple_protect_pager_queue
,
1207 apple_protect_pager_t
,
1209 if ((pager
->crypt_info
->page_decrypt
!=
1210 crypt_info
->page_decrypt
) ||
1211 (pager
->crypt_info
->crypt_end
!=
1212 crypt_info
->crypt_end
) ||
1213 (pager
->crypt_info
->crypt_ops
!=
1214 crypt_info
->crypt_ops
)) {
1215 /* no match for "crypt_info": next pager */
1218 /* found a match for crypt_info ... */
1219 if (old_crypt_info
) {
1220 /* ... already switched to that crypt_info */
1221 assert(old_crypt_info
== pager
->crypt_info
);
1223 /* ... switch to that pager's crypt_info */
1224 old_crypt_info
= pager
->crypt_info
;
1225 #if CRYPT_INFO_DEBUG
1226 printf("CRYPT_INFO %s: "
1227 "switching crypt_info from %p [%p,%p,%p,%d] "
1228 "to %p [%p,%p,%p,%d] from pager %p\n",
1231 crypt_info
->page_decrypt
,
1232 crypt_info
->crypt_end
,
1233 crypt_info
->crypt_ops
,
1234 crypt_info
->crypt_refcnt
,
1236 old_crypt_info
->page_decrypt
,
1237 old_crypt_info
->crypt_end
,
1238 old_crypt_info
->crypt_ops
,
1239 old_crypt_info
->crypt_refcnt
,
1241 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1244 pager
->crypt_info
->crypt_refcnt
);
1245 #endif /* CRYPT_INFO_DEBUG */
1246 crypt_info_reference(pager
->crypt_info
);
1249 if (pager
->backing_object
== backing_object
&&
1250 pager
->backing_offset
== backing_offset
&&
1251 pager
->crypto_backing_offset
== crypto_backing_offset
&&
1252 pager
->crypto_start
== crypto_start
&&
1253 pager
->crypto_end
== crypto_end
) {
1254 /* full match: use that pager! */
1255 assert(old_crypt_info
== pager
->crypt_info
);
1256 assert(old_crypt_info
->crypt_refcnt
> 1);
1257 #if CRYPT_INFO_DEBUG
1258 printf("CRYPT_INFO %s: "
1259 "pager match with %p crypt_info %p\n",
1263 printf("CRYPT_INFO %s: deallocate %p ref %d "
1267 old_crypt_info
->crypt_refcnt
);
1268 #endif /* CRYPT_INFO_DEBUG */
1269 /* release the extra ref on crypt_info we got above */
1270 crypt_info_deallocate(old_crypt_info
);
1271 assert(old_crypt_info
->crypt_refcnt
> 0);
1272 /* give extra reference on pager to the caller */
1273 os_ref_retain_locked(&pager
->ref_count
);
1277 if (queue_end(&apple_protect_pager_queue
,
1278 (queue_entry_t
) pager
)) {
1279 lck_mtx_unlock(&apple_protect_pager_lock
);
1280 /* no existing pager for this backing object */
1281 pager
= APPLE_PROTECT_PAGER_NULL
;
1282 if (old_crypt_info
) {
1283 /* use this old crypt_info for new pager */
1284 new_crypt_info
= old_crypt_info
;
1285 #if CRYPT_INFO_DEBUG
1286 printf("CRYPT_INFO %s: "
1287 "will use old_crypt_info %p for new pager\n",
1290 #endif /* CRYPT_INFO_DEBUG */
1292 /* allocate a new crypt_info for new pager */
1293 new_crypt_info
= kalloc(sizeof(*new_crypt_info
));
1294 *new_crypt_info
= *crypt_info
;
1295 new_crypt_info
->crypt_refcnt
= 1;
1296 #if CRYPT_INFO_DEBUG
1297 printf("CRYPT_INFO %s: "
1298 "will use new_crypt_info %p for new pager\n",
1301 #endif /* CRYPT_INFO_DEBUG */
1303 if (new_crypt_info
== NULL
) {
1304 /* can't create new pager without a crypt_info */
1306 /* create new pager */
1307 pager
= apple_protect_pager_create(
1310 crypto_backing_offset
,
1315 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
1316 /* could not create a new pager */
1317 if (new_crypt_info
== old_crypt_info
) {
1318 /* release extra reference on old_crypt_info */
1319 #if CRYPT_INFO_DEBUG
1320 printf("CRYPT_INFO %s: deallocate %p ref %d "
1321 "(create fail old_crypt_info)\n",
1324 old_crypt_info
->crypt_refcnt
);
1325 #endif /* CRYPT_INFO_DEBUG */
1326 crypt_info_deallocate(old_crypt_info
);
1327 old_crypt_info
= NULL
;
1329 /* release unused new_crypt_info */
1330 assert(new_crypt_info
->crypt_refcnt
== 1);
1331 #if CRYPT_INFO_DEBUG
1332 printf("CRYPT_INFO %s: deallocate %p ref %d "
1333 "(create fail new_crypt_info)\n",
1336 new_crypt_info
->crypt_refcnt
);
1337 #endif /* CRYPT_INFO_DEBUG */
1338 crypt_info_deallocate(new_crypt_info
);
1339 new_crypt_info
= NULL
;
1341 return MEMORY_OBJECT_NULL
;
1343 lck_mtx_lock(&apple_protect_pager_lock
);
1345 assert(old_crypt_info
== pager
->crypt_info
);
1348 while (!pager
->is_ready
) {
1349 lck_mtx_sleep(&apple_protect_pager_lock
,
1354 lck_mtx_unlock(&apple_protect_pager_lock
);
1356 return (memory_object_t
) pager
;
1360 apple_protect_pager_trim(void)
1362 apple_protect_pager_t pager
, prev_pager
;
1363 queue_head_t trim_queue
;
1367 lck_mtx_lock(&apple_protect_pager_lock
);
1370 * We have too many pagers, try and trim some unused ones,
1371 * starting with the oldest pager at the end of the queue.
1373 queue_init(&trim_queue
);
1376 for (pager
= (apple_protect_pager_t
)
1377 queue_last(&apple_protect_pager_queue
);
1378 !queue_end(&apple_protect_pager_queue
,
1379 (queue_entry_t
) pager
);
1380 pager
= prev_pager
) {
1381 /* get prev elt before we dequeue */
1382 prev_pager
= (apple_protect_pager_t
)
1383 queue_prev(&pager
->pager_queue
);
1385 if (os_ref_get_count(&pager
->ref_count
) == 2 &&
1387 !pager
->is_mapped
) {
1388 /* this pager can be trimmed */
1390 /* remove this pager from the main list ... */
1391 apple_protect_pager_dequeue(pager
);
1392 /* ... and add it to our trim queue */
1393 queue_enter_first(&trim_queue
,
1395 apple_protect_pager_t
,
1398 count_unmapped
= (apple_protect_pager_count
-
1399 apple_protect_pager_count_mapped
);
1400 if (count_unmapped
<= apple_protect_pager_cache_limit
) {
1401 /* we have enough pagers to trim */
1406 if (num_trim
> apple_protect_pager_num_trim_max
) {
1407 apple_protect_pager_num_trim_max
= num_trim
;
1409 apple_protect_pager_num_trim_total
+= num_trim
;
1411 lck_mtx_unlock(&apple_protect_pager_lock
);
1413 /* terminate the trimmed pagers */
1414 while (!queue_empty(&trim_queue
)) {
1415 queue_remove_first(&trim_queue
,
1417 apple_protect_pager_t
,
1419 pager
->pager_queue
.next
= NULL
;
1420 pager
->pager_queue
.prev
= NULL
;
1422 * We can't call deallocate_internal() because the pager
1423 * has already been dequeued, but we still need to remove
1426 os_ref_count_t __assert_only count
= os_ref_release_locked(&pager
->ref_count
);
1428 apple_protect_pager_terminate_internal(pager
);
1434 crypt_info_reference(
1435 struct pager_crypt_info
*crypt_info
)
1437 assert(crypt_info
->crypt_refcnt
!= 0);
1438 #if CRYPT_INFO_DEBUG
1439 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1442 crypt_info
->crypt_refcnt
,
1443 crypt_info
->crypt_refcnt
+ 1);
1444 #endif /* CRYPT_INFO_DEBUG */
1445 OSAddAtomic(+1, &crypt_info
->crypt_refcnt
);
1449 crypt_info_deallocate(
1450 struct pager_crypt_info
*crypt_info
)
1452 #if CRYPT_INFO_DEBUG
1453 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1456 crypt_info
->crypt_refcnt
,
1457 crypt_info
->crypt_refcnt
- 1);
1458 #endif /* CRYPT_INFO_DEBUG */
1459 OSAddAtomic(-1, &crypt_info
->crypt_refcnt
);
1460 if (crypt_info
->crypt_refcnt
== 0) {
1461 /* deallocate any crypt module data */
1462 if (crypt_info
->crypt_end
) {
1463 crypt_info
->crypt_end(crypt_info
->crypt_ops
);
1464 crypt_info
->crypt_end
= NULL
;
1466 #if CRYPT_INFO_DEBUG
1467 printf("CRYPT_INFO %s: freeing %p\n",
1470 #endif /* CRYPT_INFO_DEBUG */
1471 kfree(crypt_info
, sizeof(*crypt_info
));