]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
53
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
61
62
63 /*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 memory_object_cluster_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 memory_object_cluster_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
116
117 #define CRYPT_INFO_DEBUG 0
118 void crypt_info_reference(struct pager_crypt_info *crypt_info);
119 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
120
121 /*
122 * Vector of VM operations for this EMM.
123 * These routines are invoked by VM via the memory_object_*() interfaces.
124 */
125 const struct memory_object_pager_ops apple_protect_pager_ops = {
126 .memory_object_reference = apple_protect_pager_reference,
127 .memory_object_deallocate = apple_protect_pager_deallocate,
128 .memory_object_init = apple_protect_pager_init,
129 .memory_object_terminate = apple_protect_pager_terminate,
130 .memory_object_data_request = apple_protect_pager_data_request,
131 .memory_object_data_return = apple_protect_pager_data_return,
132 .memory_object_data_initialize = apple_protect_pager_data_initialize,
133 .memory_object_data_unlock = apple_protect_pager_data_unlock,
134 .memory_object_synchronize = apple_protect_pager_synchronize,
135 .memory_object_map = apple_protect_pager_map,
136 .memory_object_last_unmap = apple_protect_pager_last_unmap,
137 .memory_object_data_reclaim = NULL,
138 .memory_object_pager_name = "apple_protect"
139 };
140
141 /*
142 * The "apple_protect_pager" describes a memory object backed by
143 * the "apple protect" EMM.
144 */
145 typedef struct apple_protect_pager {
146 /* mandatory generic header */
147 struct memory_object ap_pgr_hdr;
148
149 /* pager-specific data */
150 queue_chain_t pager_queue; /* next & prev pagers */
151 struct os_refcnt ref_count; /* reference count */
152 boolean_t is_ready; /* is this pager ready ? */
153 boolean_t is_mapped; /* is this mem_obj mapped ? */
154 vm_object_t backing_object; /* VM obj w/ encrypted data */
155 vm_object_offset_t backing_offset;
156 vm_object_offset_t crypto_backing_offset; /* for key... */
157 vm_object_offset_t crypto_start;
158 vm_object_offset_t crypto_end;
159 struct pager_crypt_info *crypt_info;
160 } *apple_protect_pager_t;
161 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
162
163 /*
164 * List of memory objects managed by this EMM.
165 * The list is protected by the "apple_protect_pager_lock" lock.
166 */
167 int apple_protect_pager_count = 0; /* number of pagers */
168 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
169 queue_head_t apple_protect_pager_queue;
170 decl_lck_mtx_data(, apple_protect_pager_lock);
171
172 /*
173 * Maximum number of unmapped pagers we're willing to keep around.
174 */
175 int apple_protect_pager_cache_limit = 20;
176
177 /*
178 * Statistics & counters.
179 */
180 int apple_protect_pager_count_max = 0;
181 int apple_protect_pager_count_unmapped_max = 0;
182 int apple_protect_pager_num_trim_max = 0;
183 int apple_protect_pager_num_trim_total = 0;
184
185
186 lck_grp_t apple_protect_pager_lck_grp;
187 lck_grp_attr_t apple_protect_pager_lck_grp_attr;
188 lck_attr_t apple_protect_pager_lck_attr;
189
190
191 /* internal prototypes */
192 apple_protect_pager_t apple_protect_pager_create(
193 vm_object_t backing_object,
194 vm_object_offset_t backing_offset,
195 vm_object_offset_t crypto_backing_offset,
196 struct pager_crypt_info *crypt_info,
197 vm_object_offset_t crypto_start,
198 vm_object_offset_t crypto_end);
199 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
200 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
201 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
202 boolean_t locked);
203 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
204 void apple_protect_pager_trim(void);
205
206
207 #if DEBUG
208 int apple_protect_pagerdebug = 0;
209 #define PAGER_ALL 0xffffffff
210 #define PAGER_INIT 0x00000001
211 #define PAGER_PAGEIN 0x00000002
212
213 #define PAGER_DEBUG(LEVEL, A) \
214 MACRO_BEGIN \
215 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
216 printf A; \
217 } \
218 MACRO_END
219 #else
220 #define PAGER_DEBUG(LEVEL, A)
221 #endif
222
223
224 void
225 apple_protect_pager_bootstrap(void)
226 {
227 lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
228 lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
229 lck_attr_setdefault(&apple_protect_pager_lck_attr);
230 lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
231 queue_init(&apple_protect_pager_queue);
232 }
233
234 /*
235 * apple_protect_pager_init()
236 *
237 * Initialize the memory object and makes it ready to be used and mapped.
238 */
239 kern_return_t
240 apple_protect_pager_init(
241 memory_object_t mem_obj,
242 memory_object_control_t control,
243 #if !DEBUG
244 __unused
245 #endif
246 memory_object_cluster_size_t pg_size)
247 {
248 apple_protect_pager_t pager;
249 kern_return_t kr;
250 memory_object_attr_info_data_t attributes;
251
252 PAGER_DEBUG(PAGER_ALL,
253 ("apple_protect_pager_init: %p, %p, %x\n",
254 mem_obj, control, pg_size));
255
256 if (control == MEMORY_OBJECT_CONTROL_NULL) {
257 return KERN_INVALID_ARGUMENT;
258 }
259
260 pager = apple_protect_pager_lookup(mem_obj);
261
262 memory_object_control_reference(control);
263
264 pager->ap_pgr_hdr.mo_control = control;
265
266 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
267 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
268 attributes.cluster_size = (1 << (PAGE_SHIFT));
269 attributes.may_cache_object = FALSE;
270 attributes.temporary = TRUE;
271
272 kr = memory_object_change_attributes(
273 control,
274 MEMORY_OBJECT_ATTRIBUTE_INFO,
275 (memory_object_info_t) &attributes,
276 MEMORY_OBJECT_ATTR_INFO_COUNT);
277 if (kr != KERN_SUCCESS) {
278 panic("apple_protect_pager_init: "
279 "memory_object_change_attributes() failed");
280 }
281
282 #if CONFIG_SECLUDED_MEMORY
283 if (secluded_for_filecache) {
284 memory_object_mark_eligible_for_secluded(control, TRUE);
285 }
286 #endif /* CONFIG_SECLUDED_MEMORY */
287
288 return KERN_SUCCESS;
289 }
290
291 /*
292 * apple_protect_data_return()
293 *
294 * Handles page-out requests from VM. This should never happen since
295 * the pages provided by this EMM are not supposed to be dirty or dirtied
296 * and VM should simply discard the contents and reclaim the pages if it
297 * needs to.
298 */
299 kern_return_t
300 apple_protect_pager_data_return(
301 __unused memory_object_t mem_obj,
302 __unused memory_object_offset_t offset,
303 __unused memory_object_cluster_size_t data_cnt,
304 __unused memory_object_offset_t *resid_offset,
305 __unused int *io_error,
306 __unused boolean_t dirty,
307 __unused boolean_t kernel_copy,
308 __unused int upl_flags)
309 {
310 panic("apple_protect_pager_data_return: should never get called");
311 return KERN_FAILURE;
312 }
313
314 kern_return_t
315 apple_protect_pager_data_initialize(
316 __unused memory_object_t mem_obj,
317 __unused memory_object_offset_t offset,
318 __unused memory_object_cluster_size_t data_cnt)
319 {
320 panic("apple_protect_pager_data_initialize: should never get called");
321 return KERN_FAILURE;
322 }
323
324 kern_return_t
325 apple_protect_pager_data_unlock(
326 __unused memory_object_t mem_obj,
327 __unused memory_object_offset_t offset,
328 __unused memory_object_size_t size,
329 __unused vm_prot_t desired_access)
330 {
331 return KERN_FAILURE;
332 }
333
334 /*
335 * apple_protect_pager_data_request()
336 *
337 * Handles page-in requests from VM.
338 */
339 int apple_protect_pager_data_request_debug = 0;
340 kern_return_t
341 apple_protect_pager_data_request(
342 memory_object_t mem_obj,
343 memory_object_offset_t offset,
344 memory_object_cluster_size_t length,
345 #if !DEBUG
346 __unused
347 #endif
348 vm_prot_t protection_required,
349 memory_object_fault_info_t mo_fault_info)
350 {
351 apple_protect_pager_t pager;
352 memory_object_control_t mo_control;
353 upl_t upl;
354 int upl_flags;
355 upl_size_t upl_size;
356 upl_page_info_t *upl_pl;
357 unsigned int pl_count;
358 vm_object_t src_top_object, src_page_object, dst_object;
359 kern_return_t kr, retval;
360 vm_offset_t src_vaddr, dst_vaddr;
361 vm_offset_t cur_offset;
362 vm_offset_t offset_in_page;
363 kern_return_t error_code;
364 vm_prot_t prot;
365 vm_page_t src_page, top_page;
366 int interruptible;
367 struct vm_object_fault_info fault_info;
368 int ret;
369
370 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
371
372 retval = KERN_SUCCESS;
373 src_top_object = VM_OBJECT_NULL;
374 src_page_object = VM_OBJECT_NULL;
375 upl = NULL;
376 upl_pl = NULL;
377 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
378 fault_info.stealth = TRUE;
379 fault_info.io_sync = FALSE;
380 fault_info.mark_zf_absent = FALSE;
381 fault_info.batch_pmap_op = FALSE;
382 interruptible = fault_info.interruptible;
383
384 pager = apple_protect_pager_lookup(mem_obj);
385 assert(pager->is_ready);
386 assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */
387
388 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
389
390 fault_info.lo_offset += pager->backing_offset;
391 fault_info.hi_offset += pager->backing_offset;
392
393 /*
394 * Gather in a UPL all the VM pages requested by VM.
395 */
396 mo_control = pager->ap_pgr_hdr.mo_control;
397
398 upl_size = length;
399 upl_flags =
400 UPL_RET_ONLY_ABSENT |
401 UPL_SET_LITE |
402 UPL_NO_SYNC |
403 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
404 UPL_SET_INTERNAL;
405 pl_count = 0;
406 kr = memory_object_upl_request(mo_control,
407 offset, upl_size,
408 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
409 if (kr != KERN_SUCCESS) {
410 retval = kr;
411 goto done;
412 }
413 dst_object = mo_control->moc_object;
414 assert(dst_object != VM_OBJECT_NULL);
415
416 /*
417 * We'll map the encrypted data in the kernel address space from the
418 * backing VM object (itself backed by the encrypted file via
419 * the vnode pager).
420 */
421 src_top_object = pager->backing_object;
422 assert(src_top_object != VM_OBJECT_NULL);
423 vm_object_reference(src_top_object); /* keep the source object alive */
424
425 /*
426 * Fill in the contents of the pages requested by VM.
427 */
428 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
429 pl_count = length / PAGE_SIZE;
430 for (cur_offset = 0;
431 retval == KERN_SUCCESS && cur_offset < length;
432 cur_offset += PAGE_SIZE) {
433 ppnum_t dst_pnum;
434
435 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
436 /* this page is not in the UPL: skip it */
437 continue;
438 }
439
440 /*
441 * Map the source (encrypted) page in the kernel's
442 * virtual address space.
443 * We already hold a reference on the src_top_object.
444 */
445 retry_src_fault:
446 vm_object_lock(src_top_object);
447 vm_object_paging_begin(src_top_object);
448 error_code = 0;
449 prot = VM_PROT_READ;
450 src_page = VM_PAGE_NULL;
451 kr = vm_fault_page(src_top_object,
452 pager->backing_offset + offset + cur_offset,
453 VM_PROT_READ,
454 FALSE,
455 FALSE, /* src_page not looked up */
456 &prot,
457 &src_page,
458 &top_page,
459 NULL,
460 &error_code,
461 FALSE,
462 FALSE,
463 &fault_info);
464 switch (kr) {
465 case VM_FAULT_SUCCESS:
466 break;
467 case VM_FAULT_RETRY:
468 goto retry_src_fault;
469 case VM_FAULT_MEMORY_SHORTAGE:
470 if (vm_page_wait(interruptible)) {
471 goto retry_src_fault;
472 }
473 /* fall thru */
474 case VM_FAULT_INTERRUPTED:
475 retval = MACH_SEND_INTERRUPTED;
476 goto done;
477 case VM_FAULT_SUCCESS_NO_VM_PAGE:
478 /* success but no VM page: fail */
479 vm_object_paging_end(src_top_object);
480 vm_object_unlock(src_top_object);
481 /*FALLTHROUGH*/
482 case VM_FAULT_MEMORY_ERROR:
483 /* the page is not there ! */
484 if (error_code) {
485 retval = error_code;
486 } else {
487 retval = KERN_MEMORY_ERROR;
488 }
489 goto done;
490 default:
491 panic("apple_protect_pager_data_request: "
492 "vm_fault_page() unexpected error 0x%x\n",
493 kr);
494 }
495 assert(src_page != VM_PAGE_NULL);
496 assert(src_page->vmp_busy);
497
498 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
499 vm_page_lockspin_queues();
500
501 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
502 vm_page_speculate(src_page, FALSE);
503 }
504 vm_page_unlock_queues();
505 }
506
507 /*
508 * Establish pointers to the source
509 * and destination physical pages.
510 */
511 dst_pnum = (ppnum_t)
512 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
513 assert(dst_pnum != 0);
514
515 src_vaddr = (vm_map_offset_t)
516 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
517 << PAGE_SHIFT);
518 dst_vaddr = (vm_map_offset_t)
519 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
520
521 src_page_object = VM_PAGE_OBJECT(src_page);
522
523 /*
524 * Validate the original page...
525 */
526 if (src_page_object->code_signed) {
527 vm_page_validate_cs_mapped(
528 src_page,
529 (const void *) src_vaddr);
530 }
531 /*
532 * ... and transfer the results to the destination page.
533 */
534 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
535 src_page->vmp_cs_validated);
536 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
537 src_page->vmp_cs_tainted);
538 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
539 src_page->vmp_cs_nx);
540
541 /*
542 * page_decrypt() might access a mapped file, so let's release
543 * the object lock for the source page to avoid a potential
544 * deadlock. The source page is kept busy and we have a
545 * "paging_in_progress" reference on its object, so it's safe
546 * to unlock the object here.
547 */
548 assert(src_page->vmp_busy);
549 assert(src_page_object->paging_in_progress > 0);
550 vm_object_unlock(src_page_object);
551
552 /*
553 * Decrypt the encrypted contents of the source page
554 * into the destination page.
555 */
556 for (offset_in_page = 0;
557 offset_in_page < PAGE_SIZE;
558 offset_in_page += 4096) {
559 if (offset + cur_offset + offset_in_page <
560 pager->crypto_start ||
561 offset + cur_offset + offset_in_page >=
562 pager->crypto_end) {
563 /* not encrypted: just copy */
564 bcopy((const char *)(src_vaddr +
565 offset_in_page),
566 (char *)(dst_vaddr + offset_in_page),
567 4096);
568
569 if (apple_protect_pager_data_request_debug) {
570 printf("apple_protect_data_request"
571 "(%p,0x%llx+0x%llx+0x%04llx): "
572 "out of crypto range "
573 "[0x%llx:0x%llx]: "
574 "COPY [0x%016llx 0x%016llx] "
575 "code_signed=%d "
576 "cs_validated=%d "
577 "cs_tainted=%d "
578 "cs_nx=%d\n",
579 pager,
580 offset,
581 (uint64_t) cur_offset,
582 (uint64_t) offset_in_page,
583 pager->crypto_start,
584 pager->crypto_end,
585 *(uint64_t *)(dst_vaddr +
586 offset_in_page),
587 *(uint64_t *)(dst_vaddr +
588 offset_in_page + 8),
589 src_page_object->code_signed,
590 src_page->vmp_cs_validated,
591 src_page->vmp_cs_tainted,
592 src_page->vmp_cs_nx);
593 }
594 ret = 0;
595 continue;
596 }
597 ret = pager->crypt_info->page_decrypt(
598 (const void *)(src_vaddr + offset_in_page),
599 (void *)(dst_vaddr + offset_in_page),
600 ((pager->crypto_backing_offset -
601 pager->crypto_start) + /* XXX ? */
602 offset +
603 cur_offset +
604 offset_in_page),
605 pager->crypt_info->crypt_ops);
606
607 if (apple_protect_pager_data_request_debug) {
608 printf("apple_protect_data_request"
609 "(%p,0x%llx+0x%llx+0x%04llx): "
610 "in crypto range [0x%llx:0x%llx]: "
611 "DECRYPT offset 0x%llx="
612 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
613 "[0x%016llx 0x%016llx] "
614 "code_signed=%d "
615 "cs_validated=%d "
616 "cs_tainted=%d "
617 "cs_nx=%d "
618 "ret=0x%x\n",
619 pager,
620 offset,
621 (uint64_t) cur_offset,
622 (uint64_t) offset_in_page,
623 pager->crypto_start, pager->crypto_end,
624 ((pager->crypto_backing_offset -
625 pager->crypto_start) +
626 offset +
627 cur_offset +
628 offset_in_page),
629 pager->crypto_backing_offset,
630 pager->crypto_start,
631 offset,
632 (uint64_t) cur_offset,
633 (uint64_t) offset_in_page,
634 *(uint64_t *)(dst_vaddr + offset_in_page),
635 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
636 src_page_object->code_signed,
637 src_page->vmp_cs_validated,
638 src_page->vmp_cs_tainted,
639 src_page->vmp_cs_nx,
640 ret);
641 }
642 if (ret) {
643 break;
644 }
645 }
646 if (ret) {
647 /*
648 * Decryption failed. Abort the fault.
649 */
650 retval = KERN_ABORTED;
651 }
652
653 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
654 assert(src_page->vmp_busy);
655 assert(src_page_object->paging_in_progress > 0);
656 vm_object_lock(src_page_object);
657
658 /*
659 * Cleanup the result of vm_fault_page() of the source page.
660 */
661 PAGE_WAKEUP_DONE(src_page);
662 src_page = VM_PAGE_NULL;
663 vm_object_paging_end(src_page_object);
664 vm_object_unlock(src_page_object);
665
666 if (top_page != VM_PAGE_NULL) {
667 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
668 vm_object_lock(src_top_object);
669 VM_PAGE_FREE(top_page);
670 vm_object_paging_end(src_top_object);
671 vm_object_unlock(src_top_object);
672 }
673 }
674
675 done:
676 if (upl != NULL) {
677 /* clean up the UPL */
678
679 /*
680 * The pages are currently dirty because we've just been
681 * writing on them, but as far as we're concerned, they're
682 * clean since they contain their "original" contents as
683 * provided by us, the pager.
684 * Tell the UPL to mark them "clean".
685 */
686 upl_clear_dirty(upl, TRUE);
687
688 /* abort or commit the UPL */
689 if (retval != KERN_SUCCESS) {
690 upl_abort(upl, 0);
691 if (retval == KERN_ABORTED) {
692 wait_result_t wait_result;
693
694 /*
695 * We aborted the fault and did not provide
696 * any contents for the requested pages but
697 * the pages themselves are not invalid, so
698 * let's return success and let the caller
699 * retry the fault, in case it might succeed
700 * later (when the decryption code is up and
701 * running in the kernel, for example).
702 */
703 retval = KERN_SUCCESS;
704 /*
705 * Wait a little bit first to avoid using
706 * too much CPU time retrying and failing
707 * the same fault over and over again.
708 */
709 wait_result = assert_wait_timeout(
710 (event_t) apple_protect_pager_data_request,
711 THREAD_UNINT,
712 10000, /* 10ms */
713 NSEC_PER_USEC);
714 assert(wait_result == THREAD_WAITING);
715 wait_result = thread_block(THREAD_CONTINUE_NULL);
716 assert(wait_result == THREAD_TIMED_OUT);
717 }
718 } else {
719 boolean_t empty;
720 upl_commit_range(upl, 0, upl->size,
721 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
722 upl_pl, pl_count, &empty);
723 }
724
725 /* and deallocate the UPL */
726 upl_deallocate(upl);
727 upl = NULL;
728 }
729 if (src_top_object != VM_OBJECT_NULL) {
730 vm_object_deallocate(src_top_object);
731 }
732 return retval;
733 }
734
735 /*
736 * apple_protect_pager_reference()
737 *
738 * Get a reference on this memory object.
739 * For external usage only. Assumes that the initial reference count is not 0,
740 * i.e one should not "revive" a dead pager this way.
741 */
742 void
743 apple_protect_pager_reference(
744 memory_object_t mem_obj)
745 {
746 apple_protect_pager_t pager;
747
748 pager = apple_protect_pager_lookup(mem_obj);
749
750 lck_mtx_lock(&apple_protect_pager_lock);
751 os_ref_retain_locked(&pager->ref_count);
752 lck_mtx_unlock(&apple_protect_pager_lock);
753 }
754
755
756 /*
757 * apple_protect_pager_dequeue:
758 *
759 * Removes a pager from the list of pagers.
760 *
761 * The caller must hold "apple_protect_pager_lock".
762 */
763 void
764 apple_protect_pager_dequeue(
765 apple_protect_pager_t pager)
766 {
767 assert(!pager->is_mapped);
768
769 queue_remove(&apple_protect_pager_queue,
770 pager,
771 apple_protect_pager_t,
772 pager_queue);
773 pager->pager_queue.next = NULL;
774 pager->pager_queue.prev = NULL;
775
776 apple_protect_pager_count--;
777 }
778
779 /*
780 * apple_protect_pager_terminate_internal:
781 *
782 * Trigger the asynchronous termination of the memory object associated
783 * with this pager.
784 * When the memory object is terminated, there will be one more call
785 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
786 * to finish the clean up.
787 *
788 * "apple_protect_pager_lock" should not be held by the caller.
789 * We don't need the lock because the pager has already been removed from
790 * the pagers' list and is now ours exclusively.
791 */
792 void
793 apple_protect_pager_terminate_internal(
794 apple_protect_pager_t pager)
795 {
796 assert(pager->is_ready);
797 assert(!pager->is_mapped);
798
799 if (pager->backing_object != VM_OBJECT_NULL) {
800 vm_object_deallocate(pager->backing_object);
801 pager->backing_object = VM_OBJECT_NULL;
802 }
803
804 /* one less pager using this "pager_crypt_info" */
805 #if CRYPT_INFO_DEBUG
806 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
807 __FUNCTION__,
808 pager->crypt_info,
809 pager->crypt_info->crypt_refcnt);
810 #endif /* CRYPT_INFO_DEBUG */
811 crypt_info_deallocate(pager->crypt_info);
812 pager->crypt_info = NULL;
813
814 /* trigger the destruction of the memory object */
815 memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
816 }
817
818 /*
819 * apple_protect_pager_deallocate_internal()
820 *
821 * Release a reference on this pager and free it when the last
822 * reference goes away.
823 * Can be called with apple_protect_pager_lock held or not but always returns
824 * with it unlocked.
825 */
826 void
827 apple_protect_pager_deallocate_internal(
828 apple_protect_pager_t pager,
829 boolean_t locked)
830 {
831 boolean_t needs_trimming;
832 int count_unmapped;
833
834 if (!locked) {
835 lck_mtx_lock(&apple_protect_pager_lock);
836 }
837
838 count_unmapped = (apple_protect_pager_count -
839 apple_protect_pager_count_mapped);
840 if (count_unmapped > apple_protect_pager_cache_limit) {
841 /* we have too many unmapped pagers: trim some */
842 needs_trimming = TRUE;
843 } else {
844 needs_trimming = FALSE;
845 }
846
847 /* drop a reference on this pager */
848 os_ref_count_t ref_count = os_ref_release_locked(&pager->ref_count);
849
850 if (ref_count == 1) {
851 /*
852 * Only the "named" reference is left, which means that
853 * no one is really holding on to this pager anymore.
854 * Terminate it.
855 */
856 apple_protect_pager_dequeue(pager);
857 /* the pager is all ours: no need for the lock now */
858 lck_mtx_unlock(&apple_protect_pager_lock);
859 apple_protect_pager_terminate_internal(pager);
860 } else if (ref_count == 0) {
861 /*
862 * Dropped the existence reference; the memory object has
863 * been terminated. Do some final cleanup and release the
864 * pager structure.
865 */
866 lck_mtx_unlock(&apple_protect_pager_lock);
867 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
868 memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
869 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
870 }
871 kfree(pager, sizeof(*pager));
872 pager = APPLE_PROTECT_PAGER_NULL;
873 } else {
874 /* there are still plenty of references: keep going... */
875 lck_mtx_unlock(&apple_protect_pager_lock);
876 }
877
878 if (needs_trimming) {
879 apple_protect_pager_trim();
880 }
881 /* caution: lock is not held on return... */
882 }
883
884 /*
885 * apple_protect_pager_deallocate()
886 *
887 * Release a reference on this pager and free it when the last
888 * reference goes away.
889 */
890 void
891 apple_protect_pager_deallocate(
892 memory_object_t mem_obj)
893 {
894 apple_protect_pager_t pager;
895
896 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
897 pager = apple_protect_pager_lookup(mem_obj);
898 apple_protect_pager_deallocate_internal(pager, FALSE);
899 }
900
901 /*
902 *
903 */
904 kern_return_t
905 apple_protect_pager_terminate(
906 #if !DEBUG
907 __unused
908 #endif
909 memory_object_t mem_obj)
910 {
911 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
912
913 return KERN_SUCCESS;
914 }
915
916 /*
917 *
918 */
919 kern_return_t
920 apple_protect_pager_synchronize(
921 __unused memory_object_t mem_obj,
922 __unused memory_object_offset_t offset,
923 __unused memory_object_size_t length,
924 __unused vm_sync_t sync_flags)
925 {
926 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
927 return KERN_FAILURE;
928 }
929
930 /*
931 * apple_protect_pager_map()
932 *
933 * This allows VM to let us, the EMM, know that this memory object
934 * is currently mapped one or more times. This is called by VM each time
935 * the memory object gets mapped and we take one extra reference on the
936 * memory object to account for all its mappings.
937 */
938 kern_return_t
939 apple_protect_pager_map(
940 memory_object_t mem_obj,
941 __unused vm_prot_t prot)
942 {
943 apple_protect_pager_t pager;
944
945 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
946
947 pager = apple_protect_pager_lookup(mem_obj);
948
949 lck_mtx_lock(&apple_protect_pager_lock);
950 assert(pager->is_ready);
951 assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */
952 if (pager->is_mapped == FALSE) {
953 /*
954 * First mapping of this pager: take an extra reference
955 * that will remain until all the mappings of this pager
956 * are removed.
957 */
958 pager->is_mapped = TRUE;
959 os_ref_retain_locked(&pager->ref_count);
960 apple_protect_pager_count_mapped++;
961 }
962 lck_mtx_unlock(&apple_protect_pager_lock);
963
964 return KERN_SUCCESS;
965 }
966
967 /*
968 * apple_protect_pager_last_unmap()
969 *
970 * This is called by VM when this memory object is no longer mapped anywhere.
971 */
972 kern_return_t
973 apple_protect_pager_last_unmap(
974 memory_object_t mem_obj)
975 {
976 apple_protect_pager_t pager;
977 int count_unmapped;
978
979 PAGER_DEBUG(PAGER_ALL,
980 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
981
982 pager = apple_protect_pager_lookup(mem_obj);
983
984 lck_mtx_lock(&apple_protect_pager_lock);
985 if (pager->is_mapped) {
986 /*
987 * All the mappings are gone, so let go of the one extra
988 * reference that represents all the mappings of this pager.
989 */
990 apple_protect_pager_count_mapped--;
991 count_unmapped = (apple_protect_pager_count -
992 apple_protect_pager_count_mapped);
993 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
994 apple_protect_pager_count_unmapped_max = count_unmapped;
995 }
996 pager->is_mapped = FALSE;
997 apple_protect_pager_deallocate_internal(pager, TRUE);
998 /* caution: deallocate_internal() released the lock ! */
999 } else {
1000 lck_mtx_unlock(&apple_protect_pager_lock);
1001 }
1002
1003 return KERN_SUCCESS;
1004 }
1005
1006
1007 /*
1008 *
1009 */
1010 apple_protect_pager_t
1011 apple_protect_pager_lookup(
1012 memory_object_t mem_obj)
1013 {
1014 apple_protect_pager_t pager;
1015
1016 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1017 pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1018 assert(os_ref_get_count(&pager->ref_count) > 0);
1019 return pager;
1020 }
1021
1022 apple_protect_pager_t
1023 apple_protect_pager_create(
1024 vm_object_t backing_object,
1025 vm_object_offset_t backing_offset,
1026 vm_object_offset_t crypto_backing_offset,
1027 struct pager_crypt_info *crypt_info,
1028 vm_object_offset_t crypto_start,
1029 vm_object_offset_t crypto_end)
1030 {
1031 apple_protect_pager_t pager, pager2;
1032 memory_object_control_t control;
1033 kern_return_t kr;
1034 struct pager_crypt_info *old_crypt_info;
1035
1036 pager = (apple_protect_pager_t) kalloc(sizeof(*pager));
1037 if (pager == APPLE_PROTECT_PAGER_NULL) {
1038 return APPLE_PROTECT_PAGER_NULL;
1039 }
1040
1041 /*
1042 * The vm_map call takes both named entry ports and raw memory
1043 * objects in the same parameter. We need to make sure that
1044 * vm_map does not see this object as a named entry port. So,
1045 * we reserve the first word in the object for a fake ip_kotype
1046 * setting - that will tell vm_map to use it as a memory object.
1047 */
1048 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1049 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1050 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1051
1052 pager->is_ready = FALSE;/* not ready until it has a "name" */
1053 os_ref_init_count(&pager->ref_count, NULL, 2); /* existence reference (for the cache) and another for the caller */
1054 pager->is_mapped = FALSE;
1055 pager->backing_object = backing_object;
1056 pager->backing_offset = backing_offset;
1057 pager->crypto_backing_offset = crypto_backing_offset;
1058 pager->crypto_start = crypto_start;
1059 pager->crypto_end = crypto_end;
1060 pager->crypt_info = crypt_info; /* allocated by caller */
1061
1062 #if CRYPT_INFO_DEBUG
1063 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1064 __FUNCTION__,
1065 crypt_info,
1066 crypt_info->page_decrypt,
1067 crypt_info->crypt_end,
1068 crypt_info->crypt_ops,
1069 crypt_info->crypt_refcnt);
1070 #endif /* CRYPT_INFO_DEBUG */
1071
1072 vm_object_reference(backing_object);
1073
1074 old_crypt_info = NULL;
1075
1076 lck_mtx_lock(&apple_protect_pager_lock);
1077 /* see if anyone raced us to create a pager for the same object */
1078 queue_iterate(&apple_protect_pager_queue,
1079 pager2,
1080 apple_protect_pager_t,
1081 pager_queue) {
1082 if ((pager2->crypt_info->page_decrypt !=
1083 crypt_info->page_decrypt) ||
1084 (pager2->crypt_info->crypt_end !=
1085 crypt_info->crypt_end) ||
1086 (pager2->crypt_info->crypt_ops !=
1087 crypt_info->crypt_ops)) {
1088 /* crypt_info contents do not match: next pager */
1089 continue;
1090 }
1091
1092 /* found a match for crypt_info ... */
1093 if (old_crypt_info) {
1094 /* ... already switched to that crypt_info */
1095 assert(old_crypt_info == pager2->crypt_info);
1096 } else if (pager2->crypt_info != crypt_info) {
1097 /* ... switch to that pager's crypt_info */
1098 #if CRYPT_INFO_DEBUG
1099 printf("CRYPT_INFO %s: reference %p ref %d "
1100 "(create match)\n",
1101 __FUNCTION__,
1102 pager2->crypt_info,
1103 pager2->crypt_info->crypt_refcnt);
1104 #endif /* CRYPT_INFO_DEBUG */
1105 old_crypt_info = pager2->crypt_info;
1106 crypt_info_reference(old_crypt_info);
1107 pager->crypt_info = old_crypt_info;
1108 }
1109
1110 if (pager2->backing_object == backing_object &&
1111 pager2->backing_offset == backing_offset &&
1112 pager2->crypto_backing_offset == crypto_backing_offset &&
1113 pager2->crypto_start == crypto_start &&
1114 pager2->crypto_end == crypto_end) {
1115 /* full match: use that pager */
1116 break;
1117 }
1118 }
1119 if (!queue_end(&apple_protect_pager_queue,
1120 (queue_entry_t) pager2)) {
1121 /* we lost the race, down with the loser... */
1122 lck_mtx_unlock(&apple_protect_pager_lock);
1123 vm_object_deallocate(pager->backing_object);
1124 pager->backing_object = VM_OBJECT_NULL;
1125 #if CRYPT_INFO_DEBUG
1126 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1127 __FUNCTION__,
1128 pager->crypt_info,
1129 pager->crypt_info->crypt_refcnt);
1130 #endif /* CRYPT_INFO_DEBUG */
1131 crypt_info_deallocate(pager->crypt_info);
1132 pager->crypt_info = NULL;
1133 kfree(pager, sizeof(*pager));
1134 /* ... and go with the winner */
1135 pager = pager2;
1136 /* let the winner make sure the pager gets ready */
1137 return pager;
1138 }
1139
1140 /* enter new pager at the head of our list of pagers */
1141 queue_enter_first(&apple_protect_pager_queue,
1142 pager,
1143 apple_protect_pager_t,
1144 pager_queue);
1145 apple_protect_pager_count++;
1146 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1147 apple_protect_pager_count_max = apple_protect_pager_count;
1148 }
1149 lck_mtx_unlock(&apple_protect_pager_lock);
1150
1151 kr = memory_object_create_named((memory_object_t) pager,
1152 0,
1153 &control);
1154 assert(kr == KERN_SUCCESS);
1155
1156 memory_object_mark_trusted(control);
1157
1158 lck_mtx_lock(&apple_protect_pager_lock);
1159 /* the new pager is now ready to be used */
1160 pager->is_ready = TRUE;
1161 lck_mtx_unlock(&apple_protect_pager_lock);
1162
1163 /* wakeup anyone waiting for this pager to be ready */
1164 thread_wakeup(&pager->is_ready);
1165
1166 if (old_crypt_info != NULL &&
1167 old_crypt_info != crypt_info) {
1168 /* we re-used an old crypt_info instead of using our new one */
1169 #if CRYPT_INFO_DEBUG
1170 printf("CRYPT_INFO %s: deallocate %p ref %d "
1171 "(create used old)\n",
1172 __FUNCTION__,
1173 crypt_info,
1174 crypt_info->crypt_refcnt);
1175 #endif /* CRYPT_INFO_DEBUG */
1176 crypt_info_deallocate(crypt_info);
1177 crypt_info = NULL;
1178 }
1179
1180 return pager;
1181 }
1182
1183 /*
1184 * apple_protect_pager_setup()
1185 *
1186 * Provide the caller with a memory object backed by the provided
1187 * "backing_object" VM object. If such a memory object already exists,
1188 * re-use it, otherwise create a new memory object.
1189 */
1190 memory_object_t
1191 apple_protect_pager_setup(
1192 vm_object_t backing_object,
1193 vm_object_offset_t backing_offset,
1194 vm_object_offset_t crypto_backing_offset,
1195 struct pager_crypt_info *crypt_info,
1196 vm_object_offset_t crypto_start,
1197 vm_object_offset_t crypto_end)
1198 {
1199 apple_protect_pager_t pager;
1200 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1201
1202 #if CRYPT_INFO_DEBUG
1203 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1204 __FUNCTION__,
1205 crypt_info,
1206 crypt_info->page_decrypt,
1207 crypt_info->crypt_end,
1208 crypt_info->crypt_ops,
1209 crypt_info->crypt_refcnt);
1210 #endif /* CRYPT_INFO_DEBUG */
1211
1212 old_crypt_info = NULL;
1213
1214 lck_mtx_lock(&apple_protect_pager_lock);
1215
1216 queue_iterate(&apple_protect_pager_queue,
1217 pager,
1218 apple_protect_pager_t,
1219 pager_queue) {
1220 if ((pager->crypt_info->page_decrypt !=
1221 crypt_info->page_decrypt) ||
1222 (pager->crypt_info->crypt_end !=
1223 crypt_info->crypt_end) ||
1224 (pager->crypt_info->crypt_ops !=
1225 crypt_info->crypt_ops)) {
1226 /* no match for "crypt_info": next pager */
1227 continue;
1228 }
1229 /* found a match for crypt_info ... */
1230 if (old_crypt_info) {
1231 /* ... already switched to that crypt_info */
1232 assert(old_crypt_info == pager->crypt_info);
1233 } else {
1234 /* ... switch to that pager's crypt_info */
1235 old_crypt_info = pager->crypt_info;
1236 #if CRYPT_INFO_DEBUG
1237 printf("CRYPT_INFO %s: "
1238 "switching crypt_info from %p [%p,%p,%p,%d] "
1239 "to %p [%p,%p,%p,%d] from pager %p\n",
1240 __FUNCTION__,
1241 crypt_info,
1242 crypt_info->page_decrypt,
1243 crypt_info->crypt_end,
1244 crypt_info->crypt_ops,
1245 crypt_info->crypt_refcnt,
1246 old_crypt_info,
1247 old_crypt_info->page_decrypt,
1248 old_crypt_info->crypt_end,
1249 old_crypt_info->crypt_ops,
1250 old_crypt_info->crypt_refcnt,
1251 pager);
1252 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1253 __FUNCTION__,
1254 pager->crypt_info,
1255 pager->crypt_info->crypt_refcnt);
1256 #endif /* CRYPT_INFO_DEBUG */
1257 crypt_info_reference(pager->crypt_info);
1258 }
1259
1260 if (pager->backing_object == backing_object &&
1261 pager->backing_offset == backing_offset &&
1262 pager->crypto_backing_offset == crypto_backing_offset &&
1263 pager->crypto_start == crypto_start &&
1264 pager->crypto_end == crypto_end) {
1265 /* full match: use that pager! */
1266 assert(old_crypt_info == pager->crypt_info);
1267 assert(old_crypt_info->crypt_refcnt > 1);
1268 #if CRYPT_INFO_DEBUG
1269 printf("CRYPT_INFO %s: "
1270 "pager match with %p crypt_info %p\n",
1271 __FUNCTION__,
1272 pager,
1273 pager->crypt_info);
1274 printf("CRYPT_INFO %s: deallocate %p ref %d "
1275 "(pager match)\n",
1276 __FUNCTION__,
1277 old_crypt_info,
1278 old_crypt_info->crypt_refcnt);
1279 #endif /* CRYPT_INFO_DEBUG */
1280 /* release the extra ref on crypt_info we got above */
1281 crypt_info_deallocate(old_crypt_info);
1282 assert(old_crypt_info->crypt_refcnt > 0);
1283 /* give extra reference on pager to the caller */
1284 os_ref_retain_locked(&pager->ref_count);
1285 break;
1286 }
1287 }
1288 if (queue_end(&apple_protect_pager_queue,
1289 (queue_entry_t) pager)) {
1290 lck_mtx_unlock(&apple_protect_pager_lock);
1291 /* no existing pager for this backing object */
1292 pager = APPLE_PROTECT_PAGER_NULL;
1293 if (old_crypt_info) {
1294 /* use this old crypt_info for new pager */
1295 new_crypt_info = old_crypt_info;
1296 #if CRYPT_INFO_DEBUG
1297 printf("CRYPT_INFO %s: "
1298 "will use old_crypt_info %p for new pager\n",
1299 __FUNCTION__,
1300 old_crypt_info);
1301 #endif /* CRYPT_INFO_DEBUG */
1302 } else {
1303 /* allocate a new crypt_info for new pager */
1304 new_crypt_info = kalloc(sizeof(*new_crypt_info));
1305 *new_crypt_info = *crypt_info;
1306 new_crypt_info->crypt_refcnt = 1;
1307 #if CRYPT_INFO_DEBUG
1308 printf("CRYPT_INFO %s: "
1309 "will use new_crypt_info %p for new pager\n",
1310 __FUNCTION__,
1311 new_crypt_info);
1312 #endif /* CRYPT_INFO_DEBUG */
1313 }
1314 if (new_crypt_info == NULL) {
1315 /* can't create new pager without a crypt_info */
1316 } else {
1317 /* create new pager */
1318 pager = apple_protect_pager_create(
1319 backing_object,
1320 backing_offset,
1321 crypto_backing_offset,
1322 new_crypt_info,
1323 crypto_start,
1324 crypto_end);
1325 }
1326 if (pager == APPLE_PROTECT_PAGER_NULL) {
1327 /* could not create a new pager */
1328 if (new_crypt_info == old_crypt_info) {
1329 /* release extra reference on old_crypt_info */
1330 #if CRYPT_INFO_DEBUG
1331 printf("CRYPT_INFO %s: deallocate %p ref %d "
1332 "(create fail old_crypt_info)\n",
1333 __FUNCTION__,
1334 old_crypt_info,
1335 old_crypt_info->crypt_refcnt);
1336 #endif /* CRYPT_INFO_DEBUG */
1337 crypt_info_deallocate(old_crypt_info);
1338 old_crypt_info = NULL;
1339 } else {
1340 /* release unused new_crypt_info */
1341 assert(new_crypt_info->crypt_refcnt == 1);
1342 #if CRYPT_INFO_DEBUG
1343 printf("CRYPT_INFO %s: deallocate %p ref %d "
1344 "(create fail new_crypt_info)\n",
1345 __FUNCTION__,
1346 new_crypt_info,
1347 new_crypt_info->crypt_refcnt);
1348 #endif /* CRYPT_INFO_DEBUG */
1349 crypt_info_deallocate(new_crypt_info);
1350 new_crypt_info = NULL;
1351 }
1352 return MEMORY_OBJECT_NULL;
1353 }
1354 lck_mtx_lock(&apple_protect_pager_lock);
1355 } else {
1356 assert(old_crypt_info == pager->crypt_info);
1357 }
1358
1359 while (!pager->is_ready) {
1360 lck_mtx_sleep(&apple_protect_pager_lock,
1361 LCK_SLEEP_DEFAULT,
1362 &pager->is_ready,
1363 THREAD_UNINT);
1364 }
1365 lck_mtx_unlock(&apple_protect_pager_lock);
1366
1367 return (memory_object_t) pager;
1368 }
1369
1370 void
1371 apple_protect_pager_trim(void)
1372 {
1373 apple_protect_pager_t pager, prev_pager;
1374 queue_head_t trim_queue;
1375 int num_trim;
1376 int count_unmapped;
1377
1378 lck_mtx_lock(&apple_protect_pager_lock);
1379
1380 /*
1381 * We have too many pagers, try and trim some unused ones,
1382 * starting with the oldest pager at the end of the queue.
1383 */
1384 queue_init(&trim_queue);
1385 num_trim = 0;
1386
1387 for (pager = (apple_protect_pager_t)
1388 queue_last(&apple_protect_pager_queue);
1389 !queue_end(&apple_protect_pager_queue,
1390 (queue_entry_t) pager);
1391 pager = prev_pager) {
1392 /* get prev elt before we dequeue */
1393 prev_pager = (apple_protect_pager_t)
1394 queue_prev(&pager->pager_queue);
1395
1396 if (os_ref_get_count(&pager->ref_count) == 2 &&
1397 pager->is_ready &&
1398 !pager->is_mapped) {
1399 /* this pager can be trimmed */
1400 num_trim++;
1401 /* remove this pager from the main list ... */
1402 apple_protect_pager_dequeue(pager);
1403 /* ... and add it to our trim queue */
1404 queue_enter_first(&trim_queue,
1405 pager,
1406 apple_protect_pager_t,
1407 pager_queue);
1408
1409 count_unmapped = (apple_protect_pager_count -
1410 apple_protect_pager_count_mapped);
1411 if (count_unmapped <= apple_protect_pager_cache_limit) {
1412 /* we have enough pagers to trim */
1413 break;
1414 }
1415 }
1416 }
1417 if (num_trim > apple_protect_pager_num_trim_max) {
1418 apple_protect_pager_num_trim_max = num_trim;
1419 }
1420 apple_protect_pager_num_trim_total += num_trim;
1421
1422 lck_mtx_unlock(&apple_protect_pager_lock);
1423
1424 /* terminate the trimmed pagers */
1425 while (!queue_empty(&trim_queue)) {
1426 queue_remove_first(&trim_queue,
1427 pager,
1428 apple_protect_pager_t,
1429 pager_queue);
1430 pager->pager_queue.next = NULL;
1431 pager->pager_queue.prev = NULL;
1432 /*
1433 * We can't call deallocate_internal() because the pager
1434 * has already been dequeued, but we still need to remove
1435 * a reference.
1436 */
1437 os_ref_count_t __assert_only count = os_ref_release_locked(&pager->ref_count);
1438 assert(count == 1);
1439 apple_protect_pager_terminate_internal(pager);
1440 }
1441 }
1442
1443
1444 void
1445 crypt_info_reference(
1446 struct pager_crypt_info *crypt_info)
1447 {
1448 assert(crypt_info->crypt_refcnt != 0);
1449 #if CRYPT_INFO_DEBUG
1450 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1451 __FUNCTION__,
1452 crypt_info,
1453 crypt_info->crypt_refcnt,
1454 crypt_info->crypt_refcnt + 1);
1455 #endif /* CRYPT_INFO_DEBUG */
1456 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1457 }
1458
1459 void
1460 crypt_info_deallocate(
1461 struct pager_crypt_info *crypt_info)
1462 {
1463 #if CRYPT_INFO_DEBUG
1464 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1465 __FUNCTION__,
1466 crypt_info,
1467 crypt_info->crypt_refcnt,
1468 crypt_info->crypt_refcnt - 1);
1469 #endif /* CRYPT_INFO_DEBUG */
1470 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1471 if (crypt_info->crypt_refcnt == 0) {
1472 /* deallocate any crypt module data */
1473 if (crypt_info->crypt_end) {
1474 crypt_info->crypt_end(crypt_info->crypt_ops);
1475 crypt_info->crypt_end = NULL;
1476 }
1477 #if CRYPT_INFO_DEBUG
1478 printf("CRYPT_INFO %s: freeing %p\n",
1479 __FUNCTION__,
1480 crypt_info);
1481 #endif /* CRYPT_INFO_DEBUG */
1482 kfree(crypt_info, sizeof(*crypt_info));
1483 crypt_info = NULL;
1484 }
1485 }