2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
63 * APPLE PROTECT MEMORY PAGER
65 * This external memory manager (EMM) handles memory from the encrypted
66 * sections of some executables protected by the DSMOS kernel extension.
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the encrypted data from its backing VM object, itself backed by
70 * the encrypted file, decrypting it and providing it to VM.
72 * The decrypted pages will never be dirtied, so the memory manager doesn't
73 * need to handle page-out requests (from memory_object_data_return()). The
74 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 * We don't expect to have to handle a large number of apple-protected
77 * binaries, so the data structures are very simple (simple linked list)
81 /* forward declarations */
82 void apple_protect_pager_reference(memory_object_t mem_obj
);
83 void apple_protect_pager_deallocate(memory_object_t mem_obj
);
84 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj
,
85 memory_object_control_t control
,
86 memory_object_cluster_size_t pg_size
);
87 kern_return_t
apple_protect_pager_terminate(memory_object_t mem_obj
);
88 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj
,
89 memory_object_offset_t offset
,
90 memory_object_cluster_size_t length
,
91 vm_prot_t protection_required
,
92 memory_object_fault_info_t fault_info
);
93 kern_return_t
apple_protect_pager_data_return(memory_object_t mem_obj
,
94 memory_object_offset_t offset
,
95 memory_object_cluster_size_t data_cnt
,
96 memory_object_offset_t
*resid_offset
,
99 boolean_t kernel_copy
,
101 kern_return_t
apple_protect_pager_data_initialize(memory_object_t mem_obj
,
102 memory_object_offset_t offset
,
103 memory_object_cluster_size_t data_cnt
);
104 kern_return_t
apple_protect_pager_data_unlock(memory_object_t mem_obj
,
105 memory_object_offset_t offset
,
106 memory_object_size_t size
,
107 vm_prot_t desired_access
);
108 kern_return_t
apple_protect_pager_synchronize(memory_object_t mem_obj
,
109 memory_object_offset_t offset
,
110 memory_object_size_t length
,
111 vm_sync_t sync_flags
);
112 kern_return_t
apple_protect_pager_map(memory_object_t mem_obj
,
114 kern_return_t
apple_protect_pager_last_unmap(memory_object_t mem_obj
);
115 boolean_t
apple_protect_pager_backing_object(
116 memory_object_t mem_obj
,
117 memory_object_offset_t mem_obj_offset
,
118 vm_object_t
*backing_object
,
119 vm_object_offset_t
*backing_offset
);
121 #define CRYPT_INFO_DEBUG 0
122 void crypt_info_reference(struct pager_crypt_info
*crypt_info
);
123 void crypt_info_deallocate(struct pager_crypt_info
*crypt_info
);
126 * Vector of VM operations for this EMM.
127 * These routines are invoked by VM via the memory_object_*() interfaces.
129 const struct memory_object_pager_ops apple_protect_pager_ops
= {
130 .memory_object_reference
= apple_protect_pager_reference
,
131 .memory_object_deallocate
= apple_protect_pager_deallocate
,
132 .memory_object_init
= apple_protect_pager_init
,
133 .memory_object_terminate
= apple_protect_pager_terminate
,
134 .memory_object_data_request
= apple_protect_pager_data_request
,
135 .memory_object_data_return
= apple_protect_pager_data_return
,
136 .memory_object_data_initialize
= apple_protect_pager_data_initialize
,
137 .memory_object_data_unlock
= apple_protect_pager_data_unlock
,
138 .memory_object_synchronize
= apple_protect_pager_synchronize
,
139 .memory_object_map
= apple_protect_pager_map
,
140 .memory_object_last_unmap
= apple_protect_pager_last_unmap
,
141 .memory_object_data_reclaim
= NULL
,
142 .memory_object_backing_object
= apple_protect_pager_backing_object
,
143 .memory_object_pager_name
= "apple_protect"
147 * The "apple_protect_pager" describes a memory object backed by
148 * the "apple protect" EMM.
150 typedef struct apple_protect_pager
{
151 /* mandatory generic header */
152 struct memory_object ap_pgr_hdr
;
154 /* pager-specific data */
155 queue_chain_t pager_queue
; /* next & prev pagers */
156 #if MEMORY_OBJECT_HAS_REFCOUNT
157 #define ap_pgr_hdr_ref ap_pgr_hdr.mo_ref
159 os_ref_atomic_t ap_pgr_hdr_ref
; /* reference count */
161 bool is_ready
; /* is this pager ready ? */
162 bool is_mapped
; /* is this mem_obj mapped ? */
163 bool is_cached
; /* is this pager cached ? */
164 vm_object_t backing_object
; /* VM obj w/ encrypted data */
165 vm_object_offset_t backing_offset
;
166 vm_object_offset_t crypto_backing_offset
; /* for key... */
167 vm_object_offset_t crypto_start
;
168 vm_object_offset_t crypto_end
;
169 struct pager_crypt_info
*crypt_info
;
170 } *apple_protect_pager_t
;
171 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
174 * List of memory objects managed by this EMM.
175 * The list is protected by the "apple_protect_pager_lock" lock.
177 unsigned int apple_protect_pager_count
= 0; /* number of pagers */
178 unsigned int apple_protect_pager_count_mapped
= 0; /* number of unmapped pagers */
179 queue_head_t apple_protect_pager_queue
= QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue
);
180 LCK_GRP_DECLARE(apple_protect_pager_lck_grp
, "apple_protect");
181 LCK_MTX_DECLARE(apple_protect_pager_lock
, &apple_protect_pager_lck_grp
);
184 * Maximum number of unmapped pagers we're willing to keep around.
186 unsigned int apple_protect_pager_cache_limit
= 20;
189 * Statistics & counters.
191 unsigned int apple_protect_pager_count_max
= 0;
192 unsigned int apple_protect_pager_count_unmapped_max
= 0;
193 unsigned int apple_protect_pager_num_trim_max
= 0;
194 unsigned int apple_protect_pager_num_trim_total
= 0;
198 /* internal prototypes */
199 apple_protect_pager_t
apple_protect_pager_create(
200 vm_object_t backing_object
,
201 vm_object_offset_t backing_offset
,
202 vm_object_offset_t crypto_backing_offset
,
203 struct pager_crypt_info
*crypt_info
,
204 vm_object_offset_t crypto_start
,
205 vm_object_offset_t crypto_end
,
206 boolean_t cache_pager
);
207 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj
);
208 void apple_protect_pager_dequeue(apple_protect_pager_t pager
);
209 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager
,
211 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager
);
212 void apple_protect_pager_trim(void);
216 int apple_protect_pagerdebug
= 0;
217 #define PAGER_ALL 0xffffffff
218 #define PAGER_INIT 0x00000001
219 #define PAGER_PAGEIN 0x00000002
221 #define PAGER_DEBUG(LEVEL, A) \
223 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
228 #define PAGER_DEBUG(LEVEL, A)
232 * apple_protect_pager_init()
234 * Initialize the memory object and makes it ready to be used and mapped.
237 apple_protect_pager_init(
238 memory_object_t mem_obj
,
239 memory_object_control_t control
,
243 memory_object_cluster_size_t pg_size
)
245 apple_protect_pager_t pager
;
247 memory_object_attr_info_data_t attributes
;
249 PAGER_DEBUG(PAGER_ALL
,
250 ("apple_protect_pager_init: %p, %p, %x\n",
251 mem_obj
, control
, pg_size
));
253 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
254 return KERN_INVALID_ARGUMENT
;
257 pager
= apple_protect_pager_lookup(mem_obj
);
259 memory_object_control_reference(control
);
261 pager
->ap_pgr_hdr
.mo_control
= control
;
263 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
264 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
265 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
266 attributes
.may_cache_object
= FALSE
;
267 attributes
.temporary
= TRUE
;
269 kr
= memory_object_change_attributes(
271 MEMORY_OBJECT_ATTRIBUTE_INFO
,
272 (memory_object_info_t
) &attributes
,
273 MEMORY_OBJECT_ATTR_INFO_COUNT
);
274 if (kr
!= KERN_SUCCESS
) {
275 panic("apple_protect_pager_init: "
276 "memory_object_change_attributes() failed");
279 #if CONFIG_SECLUDED_MEMORY
280 if (secluded_for_filecache
) {
281 memory_object_mark_eligible_for_secluded(control
, TRUE
);
283 #endif /* CONFIG_SECLUDED_MEMORY */
289 * apple_protect_data_return()
291 * Handles page-out requests from VM. This should never happen since
292 * the pages provided by this EMM are not supposed to be dirty or dirtied
293 * and VM should simply discard the contents and reclaim the pages if it
297 apple_protect_pager_data_return(
298 __unused memory_object_t mem_obj
,
299 __unused memory_object_offset_t offset
,
300 __unused memory_object_cluster_size_t data_cnt
,
301 __unused memory_object_offset_t
*resid_offset
,
302 __unused
int *io_error
,
303 __unused boolean_t dirty
,
304 __unused boolean_t kernel_copy
,
305 __unused
int upl_flags
)
307 panic("apple_protect_pager_data_return: should never get called");
312 apple_protect_pager_data_initialize(
313 __unused memory_object_t mem_obj
,
314 __unused memory_object_offset_t offset
,
315 __unused memory_object_cluster_size_t data_cnt
)
317 panic("apple_protect_pager_data_initialize: should never get called");
322 apple_protect_pager_data_unlock(
323 __unused memory_object_t mem_obj
,
324 __unused memory_object_offset_t offset
,
325 __unused memory_object_size_t size
,
326 __unused vm_prot_t desired_access
)
332 * apple_protect_pager_data_request()
334 * Handles page-in requests from VM.
336 int apple_protect_pager_data_request_debug
= 0;
338 apple_protect_pager_data_request(
339 memory_object_t mem_obj
,
340 memory_object_offset_t offset
,
341 memory_object_cluster_size_t length
,
345 vm_prot_t protection_required
,
346 memory_object_fault_info_t mo_fault_info
)
348 apple_protect_pager_t pager
;
349 memory_object_control_t mo_control
;
353 upl_page_info_t
*upl_pl
;
354 unsigned int pl_count
;
355 vm_object_t src_top_object
, src_page_object
, dst_object
;
356 kern_return_t kr
, retval
;
357 vm_offset_t src_vaddr
, dst_vaddr
;
358 vm_offset_t cur_offset
;
359 vm_offset_t offset_in_page
;
360 kern_return_t error_code
;
362 vm_page_t src_page
, top_page
;
364 struct vm_object_fault_info fault_info
;
367 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj
, offset
, length
, protection_required
));
369 retval
= KERN_SUCCESS
;
370 src_top_object
= VM_OBJECT_NULL
;
371 src_page_object
= VM_OBJECT_NULL
;
374 fault_info
= *((struct vm_object_fault_info
*)(uintptr_t)mo_fault_info
);
375 fault_info
.stealth
= TRUE
;
376 fault_info
.io_sync
= FALSE
;
377 fault_info
.mark_zf_absent
= FALSE
;
378 fault_info
.batch_pmap_op
= FALSE
;
379 interruptible
= fault_info
.interruptible
;
381 pager
= apple_protect_pager_lookup(mem_obj
);
382 assert(pager
->is_ready
);
383 assert(os_ref_get_count_raw(&pager
->ap_pgr_hdr_ref
) > 1); /* pager is alive and mapped */
385 PAGER_DEBUG(PAGER_PAGEIN
, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj
, offset
, length
, protection_required
, pager
));
387 fault_info
.lo_offset
+= pager
->backing_offset
;
388 fault_info
.hi_offset
+= pager
->backing_offset
;
391 * Gather in a UPL all the VM pages requested by VM.
393 mo_control
= pager
->ap_pgr_hdr
.mo_control
;
397 UPL_RET_ONLY_ABSENT
|
400 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
403 kr
= memory_object_upl_request(mo_control
,
405 &upl
, NULL
, NULL
, upl_flags
, VM_KERN_MEMORY_SECURITY
);
406 if (kr
!= KERN_SUCCESS
) {
410 dst_object
= memory_object_control_to_vm_object(mo_control
);
411 assert(dst_object
!= VM_OBJECT_NULL
);
414 * We'll map the encrypted data in the kernel address space from the
415 * backing VM object (itself backed by the encrypted file via
418 src_top_object
= pager
->backing_object
;
419 assert(src_top_object
!= VM_OBJECT_NULL
);
420 vm_object_reference(src_top_object
); /* keep the source object alive */
423 * Fill in the contents of the pages requested by VM.
425 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
426 pl_count
= length
/ PAGE_SIZE
;
428 retval
== KERN_SUCCESS
&& cur_offset
< length
;
429 cur_offset
+= PAGE_SIZE
) {
432 if (!upl_page_present(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
))) {
433 /* this page is not in the UPL: skip it */
438 * Map the source (encrypted) page in the kernel's
439 * virtual address space.
440 * We already hold a reference on the src_top_object.
443 vm_object_lock(src_top_object
);
444 vm_object_paging_begin(src_top_object
);
447 src_page
= VM_PAGE_NULL
;
448 kr
= vm_fault_page(src_top_object
,
449 pager
->backing_offset
+ offset
+ cur_offset
,
452 FALSE
, /* src_page not looked up */
462 case VM_FAULT_SUCCESS
:
465 goto retry_src_fault
;
466 case VM_FAULT_MEMORY_SHORTAGE
:
467 if (vm_page_wait(interruptible
)) {
468 goto retry_src_fault
;
471 case VM_FAULT_INTERRUPTED
:
472 retval
= MACH_SEND_INTERRUPTED
;
474 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
475 /* success but no VM page: fail */
476 vm_object_paging_end(src_top_object
);
477 vm_object_unlock(src_top_object
);
479 case VM_FAULT_MEMORY_ERROR
:
480 /* the page is not there ! */
484 retval
= KERN_MEMORY_ERROR
;
488 panic("apple_protect_pager_data_request: "
489 "vm_fault_page() unexpected error 0x%x\n",
492 assert(src_page
!= VM_PAGE_NULL
);
493 assert(src_page
->vmp_busy
);
495 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
496 vm_page_lockspin_queues();
498 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
499 vm_page_speculate(src_page
, FALSE
);
501 vm_page_unlock_queues();
505 * Establish pointers to the source
506 * and destination physical pages.
509 upl_phys_page(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
));
510 assert(dst_pnum
!= 0);
512 src_vaddr
= (vm_map_offset_t
)
513 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(src_page
)
515 dst_vaddr
= (vm_map_offset_t
)
516 phystokv((pmap_paddr_t
)dst_pnum
<< PAGE_SHIFT
);
518 src_page_object
= VM_PAGE_OBJECT(src_page
);
521 * Validate the original page...
523 if (src_page_object
->code_signed
) {
524 vm_page_validate_cs_mapped(
525 src_page
, PAGE_SIZE
, 0,
526 (const void *) src_vaddr
);
529 * ... and transfer the results to the destination page.
531 UPL_SET_CS_VALIDATED(upl_pl
, cur_offset
/ PAGE_SIZE
,
532 src_page
->vmp_cs_validated
);
533 UPL_SET_CS_TAINTED(upl_pl
, cur_offset
/ PAGE_SIZE
,
534 src_page
->vmp_cs_tainted
);
535 UPL_SET_CS_NX(upl_pl
, cur_offset
/ PAGE_SIZE
,
536 src_page
->vmp_cs_nx
);
539 * page_decrypt() might access a mapped file, so let's release
540 * the object lock for the source page to avoid a potential
541 * deadlock. The source page is kept busy and we have a
542 * "paging_in_progress" reference on its object, so it's safe
543 * to unlock the object here.
545 assert(src_page
->vmp_busy
);
546 assert(src_page_object
->paging_in_progress
> 0);
547 vm_object_unlock(src_page_object
);
550 * Decrypt the encrypted contents of the source page
551 * into the destination page.
553 for (offset_in_page
= 0;
554 offset_in_page
< PAGE_SIZE
;
555 offset_in_page
+= 4096) {
556 if (offset
+ cur_offset
+ offset_in_page
<
557 pager
->crypto_start
||
558 offset
+ cur_offset
+ offset_in_page
>=
560 /* not encrypted: just copy */
561 bcopy((const char *)(src_vaddr
+
563 (char *)(dst_vaddr
+ offset_in_page
),
566 if (apple_protect_pager_data_request_debug
) {
567 printf("apple_protect_data_request"
568 "(%p,0x%llx+0x%llx+0x%04llx): "
569 "out of crypto range "
571 "COPY [0x%016llx 0x%016llx] "
578 (uint64_t) cur_offset
,
579 (uint64_t) offset_in_page
,
582 *(uint64_t *)(dst_vaddr
+
584 *(uint64_t *)(dst_vaddr
+
586 src_page_object
->code_signed
,
587 src_page
->vmp_cs_validated
,
588 src_page
->vmp_cs_tainted
,
589 src_page
->vmp_cs_nx
);
594 ret
= pager
->crypt_info
->page_decrypt(
595 (const void *)(src_vaddr
+ offset_in_page
),
596 (void *)(dst_vaddr
+ offset_in_page
),
597 ((pager
->crypto_backing_offset
-
598 pager
->crypto_start
) + /* XXX ? */
602 pager
->crypt_info
->crypt_ops
);
604 if (apple_protect_pager_data_request_debug
) {
605 printf("apple_protect_data_request"
606 "(%p,0x%llx+0x%llx+0x%04llx): "
607 "in crypto range [0x%llx:0x%llx]: "
608 "DECRYPT offset 0x%llx="
609 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
610 "[0x%016llx 0x%016llx] "
618 (uint64_t) cur_offset
,
619 (uint64_t) offset_in_page
,
620 pager
->crypto_start
, pager
->crypto_end
,
621 ((pager
->crypto_backing_offset
-
622 pager
->crypto_start
) +
626 pager
->crypto_backing_offset
,
629 (uint64_t) cur_offset
,
630 (uint64_t) offset_in_page
,
631 *(uint64_t *)(dst_vaddr
+ offset_in_page
),
632 *(uint64_t *)(dst_vaddr
+ offset_in_page
+ 8),
633 src_page_object
->code_signed
,
634 src_page
->vmp_cs_validated
,
635 src_page
->vmp_cs_tainted
,
645 * Decryption failed. Abort the fault.
647 retval
= KERN_ABORTED
;
650 assert(VM_PAGE_OBJECT(src_page
) == src_page_object
);
651 assert(src_page
->vmp_busy
);
652 assert(src_page_object
->paging_in_progress
> 0);
653 vm_object_lock(src_page_object
);
656 * Cleanup the result of vm_fault_page() of the source page.
658 PAGE_WAKEUP_DONE(src_page
);
659 src_page
= VM_PAGE_NULL
;
660 vm_object_paging_end(src_page_object
);
661 vm_object_unlock(src_page_object
);
663 if (top_page
!= VM_PAGE_NULL
) {
664 assert(VM_PAGE_OBJECT(top_page
) == src_top_object
);
665 vm_object_lock(src_top_object
);
666 VM_PAGE_FREE(top_page
);
667 vm_object_paging_end(src_top_object
);
668 vm_object_unlock(src_top_object
);
674 /* clean up the UPL */
677 * The pages are currently dirty because we've just been
678 * writing on them, but as far as we're concerned, they're
679 * clean since they contain their "original" contents as
680 * provided by us, the pager.
681 * Tell the UPL to mark them "clean".
683 upl_clear_dirty(upl
, TRUE
);
685 /* abort or commit the UPL */
686 if (retval
!= KERN_SUCCESS
) {
688 if (retval
== KERN_ABORTED
) {
689 wait_result_t wait_result
;
692 * We aborted the fault and did not provide
693 * any contents for the requested pages but
694 * the pages themselves are not invalid, so
695 * let's return success and let the caller
696 * retry the fault, in case it might succeed
697 * later (when the decryption code is up and
698 * running in the kernel, for example).
700 retval
= KERN_SUCCESS
;
702 * Wait a little bit first to avoid using
703 * too much CPU time retrying and failing
704 * the same fault over and over again.
706 wait_result
= assert_wait_timeout(
707 (event_t
) apple_protect_pager_data_request
,
711 assert(wait_result
== THREAD_WAITING
);
712 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
713 assert(wait_result
== THREAD_TIMED_OUT
);
717 assertf(page_aligned(upl
->u_offset
) && page_aligned(upl
->u_size
),
718 "upl %p offset 0x%llx size 0x%x",
719 upl
, upl
->u_offset
, upl
->u_size
);
720 upl_commit_range(upl
, 0, upl
->u_size
,
721 UPL_COMMIT_CS_VALIDATED
| UPL_COMMIT_WRITTEN_BY_KERNEL
,
722 upl_pl
, pl_count
, &empty
);
725 /* and deallocate the UPL */
729 if (src_top_object
!= VM_OBJECT_NULL
) {
730 vm_object_deallocate(src_top_object
);
736 * apple_protect_pager_reference()
738 * Get a reference on this memory object.
739 * For external usage only. Assumes that the initial reference count is not 0,
740 * i.e one should not "revive" a dead pager this way.
743 apple_protect_pager_reference(
744 memory_object_t mem_obj
)
746 apple_protect_pager_t pager
;
748 pager
= apple_protect_pager_lookup(mem_obj
);
750 lck_mtx_lock(&apple_protect_pager_lock
);
751 os_ref_retain_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
752 lck_mtx_unlock(&apple_protect_pager_lock
);
757 * apple_protect_pager_dequeue:
759 * Removes a pager from the list of pagers.
761 * The caller must hold "apple_protect_pager_lock".
764 apple_protect_pager_dequeue(
765 apple_protect_pager_t pager
)
767 assert(!pager
->is_mapped
);
769 queue_remove(&apple_protect_pager_queue
,
771 apple_protect_pager_t
,
773 pager
->pager_queue
.next
= NULL
;
774 pager
->pager_queue
.prev
= NULL
;
776 apple_protect_pager_count
--;
780 * apple_protect_pager_terminate_internal:
782 * Trigger the asynchronous termination of the memory object associated
784 * When the memory object is terminated, there will be one more call
785 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
786 * to finish the clean up.
788 * "apple_protect_pager_lock" should not be held by the caller.
789 * We don't need the lock because the pager has already been removed from
790 * the pagers' list and is now ours exclusively.
793 apple_protect_pager_terminate_internal(
794 apple_protect_pager_t pager
)
796 assert(pager
->is_ready
);
797 assert(!pager
->is_mapped
);
799 if (pager
->backing_object
!= VM_OBJECT_NULL
) {
800 vm_object_deallocate(pager
->backing_object
);
801 pager
->backing_object
= VM_OBJECT_NULL
;
804 /* one less pager using this "pager_crypt_info" */
806 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
809 pager
->crypt_info
->crypt_refcnt
);
810 #endif /* CRYPT_INFO_DEBUG */
811 crypt_info_deallocate(pager
->crypt_info
);
812 pager
->crypt_info
= NULL
;
814 /* trigger the destruction of the memory object */
815 memory_object_destroy(pager
->ap_pgr_hdr
.mo_control
, 0);
819 * apple_protect_pager_deallocate_internal()
821 * Release a reference on this pager and free it when the last
822 * reference goes away.
823 * Can be called with apple_protect_pager_lock held or not but always returns
827 apple_protect_pager_deallocate_internal(
828 apple_protect_pager_t pager
,
831 boolean_t needs_trimming
;
832 unsigned int count_unmapped
;
833 os_ref_count_t ref_count
;
836 lck_mtx_lock(&apple_protect_pager_lock
);
839 count_unmapped
= (apple_protect_pager_count
-
840 apple_protect_pager_count_mapped
);
841 if (count_unmapped
> apple_protect_pager_cache_limit
) {
842 /* we have too many unmapped pagers: trim some */
843 needs_trimming
= TRUE
;
845 needs_trimming
= FALSE
;
848 /* drop a reference on this pager */
849 ref_count
= os_ref_release_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
851 if (ref_count
== 1) {
853 * Only the "named" reference is left, which means that
854 * no one is really holding on to this pager anymore.
857 apple_protect_pager_dequeue(pager
);
858 /* the pager is all ours: no need for the lock now */
859 lck_mtx_unlock(&apple_protect_pager_lock
);
860 apple_protect_pager_terminate_internal(pager
);
861 } else if (ref_count
== 0) {
863 * Dropped the existence reference; the memory object has
864 * been terminated. Do some final cleanup and release the
867 lck_mtx_unlock(&apple_protect_pager_lock
);
868 if (pager
->ap_pgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
869 memory_object_control_deallocate(pager
->ap_pgr_hdr
.mo_control
);
870 pager
->ap_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
872 kfree(pager
, sizeof(*pager
));
873 pager
= APPLE_PROTECT_PAGER_NULL
;
875 /* there are still plenty of references: keep going... */
876 lck_mtx_unlock(&apple_protect_pager_lock
);
879 if (needs_trimming
) {
880 apple_protect_pager_trim();
882 /* caution: lock is not held on return... */
886 * apple_protect_pager_deallocate()
888 * Release a reference on this pager and free it when the last
889 * reference goes away.
892 apple_protect_pager_deallocate(
893 memory_object_t mem_obj
)
895 apple_protect_pager_t pager
;
897 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_deallocate: %p\n", mem_obj
));
898 pager
= apple_protect_pager_lookup(mem_obj
);
899 apple_protect_pager_deallocate_internal(pager
, FALSE
);
906 apple_protect_pager_terminate(
910 memory_object_t mem_obj
)
912 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_terminate: %p\n", mem_obj
));
921 apple_protect_pager_synchronize(
922 __unused memory_object_t mem_obj
,
923 __unused memory_object_offset_t offset
,
924 __unused memory_object_size_t length
,
925 __unused vm_sync_t sync_flags
)
927 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
932 * apple_protect_pager_map()
934 * This allows VM to let us, the EMM, know that this memory object
935 * is currently mapped one or more times. This is called by VM each time
936 * the memory object gets mapped and we take one extra reference on the
937 * memory object to account for all its mappings.
940 apple_protect_pager_map(
941 memory_object_t mem_obj
,
942 __unused vm_prot_t prot
)
944 apple_protect_pager_t pager
;
946 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_map: %p\n", mem_obj
));
948 pager
= apple_protect_pager_lookup(mem_obj
);
950 lck_mtx_lock(&apple_protect_pager_lock
);
951 assert(pager
->is_ready
);
952 assert(os_ref_get_count_raw(&pager
->ap_pgr_hdr_ref
) > 0); /* pager is alive */
953 if (pager
->is_mapped
== FALSE
) {
955 * First mapping of this pager: take an extra reference
956 * that will remain until all the mappings of this pager
959 pager
->is_mapped
= TRUE
;
960 os_ref_retain_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
961 apple_protect_pager_count_mapped
++;
963 lck_mtx_unlock(&apple_protect_pager_lock
);
969 * apple_protect_pager_last_unmap()
971 * This is called by VM when this memory object is no longer mapped anywhere.
974 apple_protect_pager_last_unmap(
975 memory_object_t mem_obj
)
977 apple_protect_pager_t pager
;
978 unsigned int count_unmapped
;
980 PAGER_DEBUG(PAGER_ALL
,
981 ("apple_protect_pager_last_unmap: %p\n", mem_obj
));
983 pager
= apple_protect_pager_lookup(mem_obj
);
985 lck_mtx_lock(&apple_protect_pager_lock
);
986 if (pager
->is_mapped
) {
988 * All the mappings are gone, so let go of the one extra
989 * reference that represents all the mappings of this pager.
991 apple_protect_pager_count_mapped
--;
992 count_unmapped
= (apple_protect_pager_count
-
993 apple_protect_pager_count_mapped
);
994 if (count_unmapped
> apple_protect_pager_count_unmapped_max
) {
995 apple_protect_pager_count_unmapped_max
= count_unmapped
;
997 pager
->is_mapped
= FALSE
;
998 apple_protect_pager_deallocate_internal(pager
, TRUE
);
999 /* caution: deallocate_internal() released the lock ! */
1001 lck_mtx_unlock(&apple_protect_pager_lock
);
1004 return KERN_SUCCESS
;
1008 apple_protect_pager_backing_object(
1009 memory_object_t mem_obj
,
1010 memory_object_offset_t offset
,
1011 vm_object_t
*backing_object
,
1012 vm_object_offset_t
*backing_offset
)
1014 apple_protect_pager_t pager
;
1016 PAGER_DEBUG(PAGER_ALL
,
1017 ("apple_protect_pager_backing_object: %p\n", mem_obj
));
1019 pager
= apple_protect_pager_lookup(mem_obj
);
1021 *backing_object
= pager
->backing_object
;
1022 *backing_offset
= pager
->backing_offset
+ offset
;
1030 apple_protect_pager_t
1031 apple_protect_pager_lookup(
1032 memory_object_t mem_obj
)
1034 apple_protect_pager_t pager
;
1036 assert(mem_obj
->mo_pager_ops
== &apple_protect_pager_ops
);
1037 pager
= (apple_protect_pager_t
)(uintptr_t) mem_obj
;
1038 assert(os_ref_get_count_raw(&pager
->ap_pgr_hdr_ref
) > 0);
1042 apple_protect_pager_t
1043 apple_protect_pager_create(
1044 vm_object_t backing_object
,
1045 vm_object_offset_t backing_offset
,
1046 vm_object_offset_t crypto_backing_offset
,
1047 struct pager_crypt_info
*crypt_info
,
1048 vm_object_offset_t crypto_start
,
1049 vm_object_offset_t crypto_end
,
1050 boolean_t cache_pager
)
1052 apple_protect_pager_t pager
, pager2
;
1053 memory_object_control_t control
;
1055 struct pager_crypt_info
*old_crypt_info
;
1057 pager
= (apple_protect_pager_t
) kalloc(sizeof(*pager
));
1058 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
1059 return APPLE_PROTECT_PAGER_NULL
;
1063 * The vm_map call takes both named entry ports and raw memory
1064 * objects in the same parameter. We need to make sure that
1065 * vm_map does not see this object as a named entry port. So,
1066 * we reserve the first word in the object for a fake ip_kotype
1067 * setting - that will tell vm_map to use it as a memory object.
1069 pager
->ap_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
1070 pager
->ap_pgr_hdr
.mo_pager_ops
= &apple_protect_pager_ops
;
1071 pager
->ap_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
1073 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
1074 /* one reference for the caller */
1075 os_ref_init_count_raw(&pager
->ap_pgr_hdr_ref
, NULL
, 1);
1076 pager
->is_mapped
= FALSE
;
1078 /* extra reference for the cache */
1079 os_ref_retain_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
1080 pager
->is_cached
= true;
1082 pager
->is_cached
= false;
1084 pager
->backing_object
= backing_object
;
1085 pager
->backing_offset
= backing_offset
;
1086 pager
->crypto_backing_offset
= crypto_backing_offset
;
1087 pager
->crypto_start
= crypto_start
;
1088 pager
->crypto_end
= crypto_end
;
1089 pager
->crypt_info
= crypt_info
; /* allocated by caller */
1091 #if CRYPT_INFO_DEBUG
1092 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1095 crypt_info
->page_decrypt
,
1096 crypt_info
->crypt_end
,
1097 crypt_info
->crypt_ops
,
1098 crypt_info
->crypt_refcnt
);
1099 #endif /* CRYPT_INFO_DEBUG */
1101 vm_object_reference(backing_object
);
1103 old_crypt_info
= NULL
;
1105 lck_mtx_lock(&apple_protect_pager_lock
);
1106 /* see if anyone raced us to create a pager for the same object */
1107 queue_iterate(&apple_protect_pager_queue
,
1109 apple_protect_pager_t
,
1111 if ((pager2
->crypt_info
->page_decrypt
!=
1112 crypt_info
->page_decrypt
) ||
1113 (pager2
->crypt_info
->crypt_end
!=
1114 crypt_info
->crypt_end
) ||
1115 (pager2
->crypt_info
->crypt_ops
!=
1116 crypt_info
->crypt_ops
)) {
1117 /* crypt_info contents do not match: next pager */
1121 /* found a match for crypt_info ... */
1122 if (old_crypt_info
) {
1123 /* ... already switched to that crypt_info */
1124 assert(old_crypt_info
== pager2
->crypt_info
);
1125 } else if (pager2
->crypt_info
!= crypt_info
) {
1126 /* ... switch to that pager's crypt_info */
1127 #if CRYPT_INFO_DEBUG
1128 printf("CRYPT_INFO %s: reference %p ref %d "
1132 pager2
->crypt_info
->crypt_refcnt
);
1133 #endif /* CRYPT_INFO_DEBUG */
1134 old_crypt_info
= pager2
->crypt_info
;
1135 crypt_info_reference(old_crypt_info
);
1136 pager
->crypt_info
= old_crypt_info
;
1139 if (pager2
->backing_object
== backing_object
&&
1140 pager2
->backing_offset
== backing_offset
&&
1141 pager2
->crypto_backing_offset
== crypto_backing_offset
&&
1142 pager2
->crypto_start
== crypto_start
&&
1143 pager2
->crypto_end
== crypto_end
) {
1144 /* full match: use that pager */
1148 if (!queue_end(&apple_protect_pager_queue
,
1149 (queue_entry_t
) pager2
)) {
1150 /* we lost the race, down with the loser... */
1151 lck_mtx_unlock(&apple_protect_pager_lock
);
1152 vm_object_deallocate(pager
->backing_object
);
1153 pager
->backing_object
= VM_OBJECT_NULL
;
1154 #if CRYPT_INFO_DEBUG
1155 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1158 pager
->crypt_info
->crypt_refcnt
);
1159 #endif /* CRYPT_INFO_DEBUG */
1160 crypt_info_deallocate(pager
->crypt_info
);
1161 pager
->crypt_info
= NULL
;
1162 kfree(pager
, sizeof(*pager
));
1163 /* ... and go with the winner */
1165 /* let the winner make sure the pager gets ready */
1169 /* enter new pager at the head of our list of pagers */
1170 queue_enter_first(&apple_protect_pager_queue
,
1172 apple_protect_pager_t
,
1174 apple_protect_pager_count
++;
1175 if (apple_protect_pager_count
> apple_protect_pager_count_max
) {
1176 apple_protect_pager_count_max
= apple_protect_pager_count
;
1178 lck_mtx_unlock(&apple_protect_pager_lock
);
1180 kr
= memory_object_create_named((memory_object_t
) pager
,
1183 assert(kr
== KERN_SUCCESS
);
1185 memory_object_mark_trusted(control
);
1187 lck_mtx_lock(&apple_protect_pager_lock
);
1188 /* the new pager is now ready to be used */
1189 pager
->is_ready
= TRUE
;
1190 lck_mtx_unlock(&apple_protect_pager_lock
);
1192 /* wakeup anyone waiting for this pager to be ready */
1193 thread_wakeup(&pager
->is_ready
);
1195 if (old_crypt_info
!= NULL
&&
1196 old_crypt_info
!= crypt_info
) {
1197 /* we re-used an old crypt_info instead of using our new one */
1198 #if CRYPT_INFO_DEBUG
1199 printf("CRYPT_INFO %s: deallocate %p ref %d "
1200 "(create used old)\n",
1203 crypt_info
->crypt_refcnt
);
1204 #endif /* CRYPT_INFO_DEBUG */
1205 crypt_info_deallocate(crypt_info
);
1213 * apple_protect_pager_setup()
1215 * Provide the caller with a memory object backed by the provided
1216 * "backing_object" VM object. If such a memory object already exists,
1217 * re-use it, otherwise create a new memory object.
1220 apple_protect_pager_setup(
1221 vm_object_t backing_object
,
1222 vm_object_offset_t backing_offset
,
1223 vm_object_offset_t crypto_backing_offset
,
1224 struct pager_crypt_info
*crypt_info
,
1225 vm_object_offset_t crypto_start
,
1226 vm_object_offset_t crypto_end
,
1227 boolean_t cache_pager
)
1229 apple_protect_pager_t pager
;
1230 struct pager_crypt_info
*old_crypt_info
, *new_crypt_info
;
1232 #if CRYPT_INFO_DEBUG
1233 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1236 crypt_info
->page_decrypt
,
1237 crypt_info
->crypt_end
,
1238 crypt_info
->crypt_ops
,
1239 crypt_info
->crypt_refcnt
);
1240 #endif /* CRYPT_INFO_DEBUG */
1242 old_crypt_info
= NULL
;
1244 lck_mtx_lock(&apple_protect_pager_lock
);
1246 queue_iterate(&apple_protect_pager_queue
,
1248 apple_protect_pager_t
,
1250 if ((pager
->crypt_info
->page_decrypt
!=
1251 crypt_info
->page_decrypt
) ||
1252 (pager
->crypt_info
->crypt_end
!=
1253 crypt_info
->crypt_end
) ||
1254 (pager
->crypt_info
->crypt_ops
!=
1255 crypt_info
->crypt_ops
)) {
1256 /* no match for "crypt_info": next pager */
1259 /* found a match for crypt_info ... */
1260 if (old_crypt_info
) {
1261 /* ... already switched to that crypt_info */
1262 assert(old_crypt_info
== pager
->crypt_info
);
1264 /* ... switch to that pager's crypt_info */
1265 old_crypt_info
= pager
->crypt_info
;
1266 #if CRYPT_INFO_DEBUG
1267 printf("CRYPT_INFO %s: "
1268 "switching crypt_info from %p [%p,%p,%p,%d] "
1269 "to %p [%p,%p,%p,%d] from pager %p\n",
1272 crypt_info
->page_decrypt
,
1273 crypt_info
->crypt_end
,
1274 crypt_info
->crypt_ops
,
1275 crypt_info
->crypt_refcnt
,
1277 old_crypt_info
->page_decrypt
,
1278 old_crypt_info
->crypt_end
,
1279 old_crypt_info
->crypt_ops
,
1280 old_crypt_info
->crypt_refcnt
,
1282 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1285 pager
->crypt_info
->crypt_refcnt
);
1286 #endif /* CRYPT_INFO_DEBUG */
1287 crypt_info_reference(pager
->crypt_info
);
1290 if (pager
->backing_object
== backing_object
&&
1291 pager
->backing_offset
== backing_offset
&&
1292 pager
->crypto_backing_offset
== crypto_backing_offset
&&
1293 pager
->crypto_start
== crypto_start
&&
1294 pager
->crypto_end
== crypto_end
) {
1295 /* full match: use that pager! */
1296 assert(old_crypt_info
== pager
->crypt_info
);
1297 assert(old_crypt_info
->crypt_refcnt
> 1);
1298 #if CRYPT_INFO_DEBUG
1299 printf("CRYPT_INFO %s: "
1300 "pager match with %p crypt_info %p\n",
1304 printf("CRYPT_INFO %s: deallocate %p ref %d "
1308 old_crypt_info
->crypt_refcnt
);
1309 #endif /* CRYPT_INFO_DEBUG */
1310 /* release the extra ref on crypt_info we got above */
1311 crypt_info_deallocate(old_crypt_info
);
1312 assert(old_crypt_info
->crypt_refcnt
> 0);
1313 /* give extra reference on pager to the caller */
1314 os_ref_retain_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
1318 if (queue_end(&apple_protect_pager_queue
,
1319 (queue_entry_t
) pager
)) {
1320 lck_mtx_unlock(&apple_protect_pager_lock
);
1321 /* no existing pager for this backing object */
1322 pager
= APPLE_PROTECT_PAGER_NULL
;
1323 if (old_crypt_info
) {
1324 /* use this old crypt_info for new pager */
1325 new_crypt_info
= old_crypt_info
;
1326 #if CRYPT_INFO_DEBUG
1327 printf("CRYPT_INFO %s: "
1328 "will use old_crypt_info %p for new pager\n",
1331 #endif /* CRYPT_INFO_DEBUG */
1333 /* allocate a new crypt_info for new pager */
1334 new_crypt_info
= kalloc(sizeof(*new_crypt_info
));
1335 *new_crypt_info
= *crypt_info
;
1336 new_crypt_info
->crypt_refcnt
= 1;
1337 #if CRYPT_INFO_DEBUG
1338 printf("CRYPT_INFO %s: "
1339 "will use new_crypt_info %p for new pager\n",
1342 #endif /* CRYPT_INFO_DEBUG */
1344 if (new_crypt_info
== NULL
) {
1345 /* can't create new pager without a crypt_info */
1347 /* create new pager */
1348 pager
= apple_protect_pager_create(
1351 crypto_backing_offset
,
1357 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
1358 /* could not create a new pager */
1359 if (new_crypt_info
== old_crypt_info
) {
1360 /* release extra reference on old_crypt_info */
1361 #if CRYPT_INFO_DEBUG
1362 printf("CRYPT_INFO %s: deallocate %p ref %d "
1363 "(create fail old_crypt_info)\n",
1366 old_crypt_info
->crypt_refcnt
);
1367 #endif /* CRYPT_INFO_DEBUG */
1368 crypt_info_deallocate(old_crypt_info
);
1369 old_crypt_info
= NULL
;
1371 /* release unused new_crypt_info */
1372 assert(new_crypt_info
->crypt_refcnt
== 1);
1373 #if CRYPT_INFO_DEBUG
1374 printf("CRYPT_INFO %s: deallocate %p ref %d "
1375 "(create fail new_crypt_info)\n",
1378 new_crypt_info
->crypt_refcnt
);
1379 #endif /* CRYPT_INFO_DEBUG */
1380 crypt_info_deallocate(new_crypt_info
);
1381 new_crypt_info
= NULL
;
1383 return MEMORY_OBJECT_NULL
;
1385 lck_mtx_lock(&apple_protect_pager_lock
);
1387 assert(old_crypt_info
== pager
->crypt_info
);
1390 while (!pager
->is_ready
) {
1391 lck_mtx_sleep(&apple_protect_pager_lock
,
1396 lck_mtx_unlock(&apple_protect_pager_lock
);
1398 return (memory_object_t
) pager
;
1402 apple_protect_pager_trim(void)
1404 apple_protect_pager_t pager
, prev_pager
;
1405 queue_head_t trim_queue
;
1406 unsigned int num_trim
;
1407 unsigned int count_unmapped
;
1409 lck_mtx_lock(&apple_protect_pager_lock
);
1412 * We have too many pagers, try and trim some unused ones,
1413 * starting with the oldest pager at the end of the queue.
1415 queue_init(&trim_queue
);
1418 for (pager
= (apple_protect_pager_t
)
1419 queue_last(&apple_protect_pager_queue
);
1420 !queue_end(&apple_protect_pager_queue
,
1421 (queue_entry_t
) pager
);
1422 pager
= prev_pager
) {
1423 /* get prev elt before we dequeue */
1424 prev_pager
= (apple_protect_pager_t
)
1425 queue_prev(&pager
->pager_queue
);
1427 if (pager
->is_cached
&&
1428 os_ref_get_count_raw(&pager
->ap_pgr_hdr_ref
) == 2 &&
1430 !pager
->is_mapped
) {
1431 /* this pager can be trimmed */
1433 /* remove this pager from the main list ... */
1434 apple_protect_pager_dequeue(pager
);
1435 /* ... and add it to our trim queue */
1436 queue_enter_first(&trim_queue
,
1438 apple_protect_pager_t
,
1441 count_unmapped
= (apple_protect_pager_count
-
1442 apple_protect_pager_count_mapped
);
1443 if (count_unmapped
<= apple_protect_pager_cache_limit
) {
1444 /* we have enough pagers to trim */
1449 if (num_trim
> apple_protect_pager_num_trim_max
) {
1450 apple_protect_pager_num_trim_max
= num_trim
;
1452 apple_protect_pager_num_trim_total
+= num_trim
;
1454 lck_mtx_unlock(&apple_protect_pager_lock
);
1456 /* terminate the trimmed pagers */
1457 while (!queue_empty(&trim_queue
)) {
1458 queue_remove_first(&trim_queue
,
1460 apple_protect_pager_t
,
1462 assert(pager
->is_cached
);
1463 pager
->is_cached
= false;
1464 pager
->pager_queue
.next
= NULL
;
1465 pager
->pager_queue
.prev
= NULL
;
1467 * We can't call deallocate_internal() because the pager
1468 * has already been dequeued, but we still need to remove
1471 os_ref_count_t __assert_only count
;
1472 count
= os_ref_release_locked_raw(&pager
->ap_pgr_hdr_ref
, NULL
);
1474 apple_protect_pager_terminate_internal(pager
);
1480 crypt_info_reference(
1481 struct pager_crypt_info
*crypt_info
)
1483 assert(crypt_info
->crypt_refcnt
!= 0);
1484 #if CRYPT_INFO_DEBUG
1485 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1488 crypt_info
->crypt_refcnt
,
1489 crypt_info
->crypt_refcnt
+ 1);
1490 #endif /* CRYPT_INFO_DEBUG */
1491 OSAddAtomic(+1, &crypt_info
->crypt_refcnt
);
1495 crypt_info_deallocate(
1496 struct pager_crypt_info
*crypt_info
)
1498 #if CRYPT_INFO_DEBUG
1499 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1502 crypt_info
->crypt_refcnt
,
1503 crypt_info
->crypt_refcnt
- 1);
1504 #endif /* CRYPT_INFO_DEBUG */
1505 OSAddAtomic(-1, &crypt_info
->crypt_refcnt
);
1506 if (crypt_info
->crypt_refcnt
== 0) {
1507 /* deallocate any crypt module data */
1508 if (crypt_info
->crypt_end
) {
1509 crypt_info
->crypt_end(crypt_info
->crypt_ops
);
1510 crypt_info
->crypt_end
= NULL
;
1512 #if CRYPT_INFO_DEBUG
1513 printf("CRYPT_INFO %s: freeing %p\n",
1516 #endif /* CRYPT_INFO_DEBUG */
1517 kfree(crypt_info
, sizeof(*crypt_info
));