]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49 #include <os/refcnt.h>
50
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
53
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/memory_object.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_protos.h>
60 #include <vm/vm_kern.h>
61
62 /*
63 * APPLE PROTECT MEMORY PAGER
64 *
65 * This external memory manager (EMM) handles memory from the encrypted
66 * sections of some executables protected by the DSMOS kernel extension.
67 *
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the encrypted data from its backing VM object, itself backed by
70 * the encrypted file, decrypting it and providing it to VM.
71 *
72 * The decrypted pages will never be dirtied, so the memory manager doesn't
73 * need to handle page-out requests (from memory_object_data_return()). The
74 * pages need to be mapped copy-on-write, so that the originals stay clean.
75 *
76 * We don't expect to have to handle a large number of apple-protected
77 * binaries, so the data structures are very simple (simple linked list)
78 * for now.
79 */
80
81 /* forward declarations */
82 void apple_protect_pager_reference(memory_object_t mem_obj);
83 void apple_protect_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
85 memory_object_control_t control,
86 memory_object_cluster_size_t pg_size);
87 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
89 memory_object_offset_t offset,
90 memory_object_cluster_size_t length,
91 vm_prot_t protection_required,
92 memory_object_fault_info_t fault_info);
93 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t data_cnt,
96 memory_object_offset_t *resid_offset,
97 int *io_error,
98 boolean_t dirty,
99 boolean_t kernel_copy,
100 int upl_flags);
101 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt);
104 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t size,
107 vm_prot_t desired_access);
108 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
109 memory_object_offset_t offset,
110 memory_object_size_t length,
111 vm_sync_t sync_flags);
112 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
115 boolean_t apple_protect_pager_backing_object(
116 memory_object_t mem_obj,
117 memory_object_offset_t mem_obj_offset,
118 vm_object_t *backing_object,
119 vm_object_offset_t *backing_offset);
120
121 #define CRYPT_INFO_DEBUG 0
122 void crypt_info_reference(struct pager_crypt_info *crypt_info);
123 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
124
125 /*
126 * Vector of VM operations for this EMM.
127 * These routines are invoked by VM via the memory_object_*() interfaces.
128 */
129 const struct memory_object_pager_ops apple_protect_pager_ops = {
130 .memory_object_reference = apple_protect_pager_reference,
131 .memory_object_deallocate = apple_protect_pager_deallocate,
132 .memory_object_init = apple_protect_pager_init,
133 .memory_object_terminate = apple_protect_pager_terminate,
134 .memory_object_data_request = apple_protect_pager_data_request,
135 .memory_object_data_return = apple_protect_pager_data_return,
136 .memory_object_data_initialize = apple_protect_pager_data_initialize,
137 .memory_object_data_unlock = apple_protect_pager_data_unlock,
138 .memory_object_synchronize = apple_protect_pager_synchronize,
139 .memory_object_map = apple_protect_pager_map,
140 .memory_object_last_unmap = apple_protect_pager_last_unmap,
141 .memory_object_data_reclaim = NULL,
142 .memory_object_backing_object = apple_protect_pager_backing_object,
143 .memory_object_pager_name = "apple_protect"
144 };
145
146 /*
147 * The "apple_protect_pager" describes a memory object backed by
148 * the "apple protect" EMM.
149 */
150 typedef struct apple_protect_pager {
151 /* mandatory generic header */
152 struct memory_object ap_pgr_hdr;
153
154 /* pager-specific data */
155 queue_chain_t pager_queue; /* next & prev pagers */
156 #if MEMORY_OBJECT_HAS_REFCOUNT
157 #define ap_pgr_hdr_ref ap_pgr_hdr.mo_ref
158 #else
159 os_ref_atomic_t ap_pgr_hdr_ref; /* reference count */
160 #endif
161 bool is_ready; /* is this pager ready ? */
162 bool is_mapped; /* is this mem_obj mapped ? */
163 bool is_cached; /* is this pager cached ? */
164 vm_object_t backing_object; /* VM obj w/ encrypted data */
165 vm_object_offset_t backing_offset;
166 vm_object_offset_t crypto_backing_offset; /* for key... */
167 vm_object_offset_t crypto_start;
168 vm_object_offset_t crypto_end;
169 struct pager_crypt_info *crypt_info;
170 } *apple_protect_pager_t;
171 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
172
173 /*
174 * List of memory objects managed by this EMM.
175 * The list is protected by the "apple_protect_pager_lock" lock.
176 */
177 unsigned int apple_protect_pager_count = 0; /* number of pagers */
178 unsigned int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
179 queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
180 LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
181 LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
182
183 /*
184 * Maximum number of unmapped pagers we're willing to keep around.
185 */
186 unsigned int apple_protect_pager_cache_limit = 20;
187
188 /*
189 * Statistics & counters.
190 */
191 unsigned int apple_protect_pager_count_max = 0;
192 unsigned int apple_protect_pager_count_unmapped_max = 0;
193 unsigned int apple_protect_pager_num_trim_max = 0;
194 unsigned int apple_protect_pager_num_trim_total = 0;
195
196
197
198 /* internal prototypes */
199 apple_protect_pager_t apple_protect_pager_create(
200 vm_object_t backing_object,
201 vm_object_offset_t backing_offset,
202 vm_object_offset_t crypto_backing_offset,
203 struct pager_crypt_info *crypt_info,
204 vm_object_offset_t crypto_start,
205 vm_object_offset_t crypto_end,
206 boolean_t cache_pager);
207 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
208 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
209 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
210 boolean_t locked);
211 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
212 void apple_protect_pager_trim(void);
213
214
215 #if DEBUG
216 int apple_protect_pagerdebug = 0;
217 #define PAGER_ALL 0xffffffff
218 #define PAGER_INIT 0x00000001
219 #define PAGER_PAGEIN 0x00000002
220
221 #define PAGER_DEBUG(LEVEL, A) \
222 MACRO_BEGIN \
223 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
224 printf A; \
225 } \
226 MACRO_END
227 #else
228 #define PAGER_DEBUG(LEVEL, A)
229 #endif
230
231 /*
232 * apple_protect_pager_init()
233 *
234 * Initialize the memory object and makes it ready to be used and mapped.
235 */
236 kern_return_t
237 apple_protect_pager_init(
238 memory_object_t mem_obj,
239 memory_object_control_t control,
240 #if !DEBUG
241 __unused
242 #endif
243 memory_object_cluster_size_t pg_size)
244 {
245 apple_protect_pager_t pager;
246 kern_return_t kr;
247 memory_object_attr_info_data_t attributes;
248
249 PAGER_DEBUG(PAGER_ALL,
250 ("apple_protect_pager_init: %p, %p, %x\n",
251 mem_obj, control, pg_size));
252
253 if (control == MEMORY_OBJECT_CONTROL_NULL) {
254 return KERN_INVALID_ARGUMENT;
255 }
256
257 pager = apple_protect_pager_lookup(mem_obj);
258
259 memory_object_control_reference(control);
260
261 pager->ap_pgr_hdr.mo_control = control;
262
263 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
264 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
265 attributes.cluster_size = (1 << (PAGE_SHIFT));
266 attributes.may_cache_object = FALSE;
267 attributes.temporary = TRUE;
268
269 kr = memory_object_change_attributes(
270 control,
271 MEMORY_OBJECT_ATTRIBUTE_INFO,
272 (memory_object_info_t) &attributes,
273 MEMORY_OBJECT_ATTR_INFO_COUNT);
274 if (kr != KERN_SUCCESS) {
275 panic("apple_protect_pager_init: "
276 "memory_object_change_attributes() failed");
277 }
278
279 #if CONFIG_SECLUDED_MEMORY
280 if (secluded_for_filecache) {
281 memory_object_mark_eligible_for_secluded(control, TRUE);
282 }
283 #endif /* CONFIG_SECLUDED_MEMORY */
284
285 return KERN_SUCCESS;
286 }
287
288 /*
289 * apple_protect_data_return()
290 *
291 * Handles page-out requests from VM. This should never happen since
292 * the pages provided by this EMM are not supposed to be dirty or dirtied
293 * and VM should simply discard the contents and reclaim the pages if it
294 * needs to.
295 */
296 kern_return_t
297 apple_protect_pager_data_return(
298 __unused memory_object_t mem_obj,
299 __unused memory_object_offset_t offset,
300 __unused memory_object_cluster_size_t data_cnt,
301 __unused memory_object_offset_t *resid_offset,
302 __unused int *io_error,
303 __unused boolean_t dirty,
304 __unused boolean_t kernel_copy,
305 __unused int upl_flags)
306 {
307 panic("apple_protect_pager_data_return: should never get called");
308 return KERN_FAILURE;
309 }
310
311 kern_return_t
312 apple_protect_pager_data_initialize(
313 __unused memory_object_t mem_obj,
314 __unused memory_object_offset_t offset,
315 __unused memory_object_cluster_size_t data_cnt)
316 {
317 panic("apple_protect_pager_data_initialize: should never get called");
318 return KERN_FAILURE;
319 }
320
321 kern_return_t
322 apple_protect_pager_data_unlock(
323 __unused memory_object_t mem_obj,
324 __unused memory_object_offset_t offset,
325 __unused memory_object_size_t size,
326 __unused vm_prot_t desired_access)
327 {
328 return KERN_FAILURE;
329 }
330
331 /*
332 * apple_protect_pager_data_request()
333 *
334 * Handles page-in requests from VM.
335 */
336 int apple_protect_pager_data_request_debug = 0;
337 kern_return_t
338 apple_protect_pager_data_request(
339 memory_object_t mem_obj,
340 memory_object_offset_t offset,
341 memory_object_cluster_size_t length,
342 #if !DEBUG
343 __unused
344 #endif
345 vm_prot_t protection_required,
346 memory_object_fault_info_t mo_fault_info)
347 {
348 apple_protect_pager_t pager;
349 memory_object_control_t mo_control;
350 upl_t upl;
351 int upl_flags;
352 upl_size_t upl_size;
353 upl_page_info_t *upl_pl;
354 unsigned int pl_count;
355 vm_object_t src_top_object, src_page_object, dst_object;
356 kern_return_t kr, retval;
357 vm_offset_t src_vaddr, dst_vaddr;
358 vm_offset_t cur_offset;
359 vm_offset_t offset_in_page;
360 kern_return_t error_code;
361 vm_prot_t prot;
362 vm_page_t src_page, top_page;
363 int interruptible;
364 struct vm_object_fault_info fault_info;
365 int ret;
366
367 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
368
369 retval = KERN_SUCCESS;
370 src_top_object = VM_OBJECT_NULL;
371 src_page_object = VM_OBJECT_NULL;
372 upl = NULL;
373 upl_pl = NULL;
374 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
375 fault_info.stealth = TRUE;
376 fault_info.io_sync = FALSE;
377 fault_info.mark_zf_absent = FALSE;
378 fault_info.batch_pmap_op = FALSE;
379 interruptible = fault_info.interruptible;
380
381 pager = apple_protect_pager_lookup(mem_obj);
382 assert(pager->is_ready);
383 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 1); /* pager is alive and mapped */
384
385 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
386
387 fault_info.lo_offset += pager->backing_offset;
388 fault_info.hi_offset += pager->backing_offset;
389
390 /*
391 * Gather in a UPL all the VM pages requested by VM.
392 */
393 mo_control = pager->ap_pgr_hdr.mo_control;
394
395 upl_size = length;
396 upl_flags =
397 UPL_RET_ONLY_ABSENT |
398 UPL_SET_LITE |
399 UPL_NO_SYNC |
400 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
401 UPL_SET_INTERNAL;
402 pl_count = 0;
403 kr = memory_object_upl_request(mo_control,
404 offset, upl_size,
405 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
406 if (kr != KERN_SUCCESS) {
407 retval = kr;
408 goto done;
409 }
410 dst_object = memory_object_control_to_vm_object(mo_control);
411 assert(dst_object != VM_OBJECT_NULL);
412
413 /*
414 * We'll map the encrypted data in the kernel address space from the
415 * backing VM object (itself backed by the encrypted file via
416 * the vnode pager).
417 */
418 src_top_object = pager->backing_object;
419 assert(src_top_object != VM_OBJECT_NULL);
420 vm_object_reference(src_top_object); /* keep the source object alive */
421
422 /*
423 * Fill in the contents of the pages requested by VM.
424 */
425 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
426 pl_count = length / PAGE_SIZE;
427 for (cur_offset = 0;
428 retval == KERN_SUCCESS && cur_offset < length;
429 cur_offset += PAGE_SIZE) {
430 ppnum_t dst_pnum;
431
432 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
433 /* this page is not in the UPL: skip it */
434 continue;
435 }
436
437 /*
438 * Map the source (encrypted) page in the kernel's
439 * virtual address space.
440 * We already hold a reference on the src_top_object.
441 */
442 retry_src_fault:
443 vm_object_lock(src_top_object);
444 vm_object_paging_begin(src_top_object);
445 error_code = 0;
446 prot = VM_PROT_READ;
447 src_page = VM_PAGE_NULL;
448 kr = vm_fault_page(src_top_object,
449 pager->backing_offset + offset + cur_offset,
450 VM_PROT_READ,
451 FALSE,
452 FALSE, /* src_page not looked up */
453 &prot,
454 &src_page,
455 &top_page,
456 NULL,
457 &error_code,
458 FALSE,
459 FALSE,
460 &fault_info);
461 switch (kr) {
462 case VM_FAULT_SUCCESS:
463 break;
464 case VM_FAULT_RETRY:
465 goto retry_src_fault;
466 case VM_FAULT_MEMORY_SHORTAGE:
467 if (vm_page_wait(interruptible)) {
468 goto retry_src_fault;
469 }
470 OS_FALLTHROUGH;
471 case VM_FAULT_INTERRUPTED:
472 retval = MACH_SEND_INTERRUPTED;
473 goto done;
474 case VM_FAULT_SUCCESS_NO_VM_PAGE:
475 /* success but no VM page: fail */
476 vm_object_paging_end(src_top_object);
477 vm_object_unlock(src_top_object);
478 OS_FALLTHROUGH;
479 case VM_FAULT_MEMORY_ERROR:
480 /* the page is not there ! */
481 if (error_code) {
482 retval = error_code;
483 } else {
484 retval = KERN_MEMORY_ERROR;
485 }
486 goto done;
487 default:
488 panic("apple_protect_pager_data_request: "
489 "vm_fault_page() unexpected error 0x%x\n",
490 kr);
491 }
492 assert(src_page != VM_PAGE_NULL);
493 assert(src_page->vmp_busy);
494
495 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
496 vm_page_lockspin_queues();
497
498 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
499 vm_page_speculate(src_page, FALSE);
500 }
501 vm_page_unlock_queues();
502 }
503
504 /*
505 * Establish pointers to the source
506 * and destination physical pages.
507 */
508 dst_pnum = (ppnum_t)
509 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
510 assert(dst_pnum != 0);
511
512 src_vaddr = (vm_map_offset_t)
513 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
514 << PAGE_SHIFT);
515 dst_vaddr = (vm_map_offset_t)
516 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
517
518 src_page_object = VM_PAGE_OBJECT(src_page);
519
520 /*
521 * Validate the original page...
522 */
523 if (src_page_object->code_signed) {
524 vm_page_validate_cs_mapped(
525 src_page, PAGE_SIZE, 0,
526 (const void *) src_vaddr);
527 }
528 /*
529 * ... and transfer the results to the destination page.
530 */
531 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
532 src_page->vmp_cs_validated);
533 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
534 src_page->vmp_cs_tainted);
535 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
536 src_page->vmp_cs_nx);
537
538 /*
539 * page_decrypt() might access a mapped file, so let's release
540 * the object lock for the source page to avoid a potential
541 * deadlock. The source page is kept busy and we have a
542 * "paging_in_progress" reference on its object, so it's safe
543 * to unlock the object here.
544 */
545 assert(src_page->vmp_busy);
546 assert(src_page_object->paging_in_progress > 0);
547 vm_object_unlock(src_page_object);
548
549 /*
550 * Decrypt the encrypted contents of the source page
551 * into the destination page.
552 */
553 for (offset_in_page = 0;
554 offset_in_page < PAGE_SIZE;
555 offset_in_page += 4096) {
556 if (offset + cur_offset + offset_in_page <
557 pager->crypto_start ||
558 offset + cur_offset + offset_in_page >=
559 pager->crypto_end) {
560 /* not encrypted: just copy */
561 bcopy((const char *)(src_vaddr +
562 offset_in_page),
563 (char *)(dst_vaddr + offset_in_page),
564 4096);
565
566 if (apple_protect_pager_data_request_debug) {
567 printf("apple_protect_data_request"
568 "(%p,0x%llx+0x%llx+0x%04llx): "
569 "out of crypto range "
570 "[0x%llx:0x%llx]: "
571 "COPY [0x%016llx 0x%016llx] "
572 "code_signed=%d "
573 "cs_validated=%d "
574 "cs_tainted=%d "
575 "cs_nx=%d\n",
576 pager,
577 offset,
578 (uint64_t) cur_offset,
579 (uint64_t) offset_in_page,
580 pager->crypto_start,
581 pager->crypto_end,
582 *(uint64_t *)(dst_vaddr +
583 offset_in_page),
584 *(uint64_t *)(dst_vaddr +
585 offset_in_page + 8),
586 src_page_object->code_signed,
587 src_page->vmp_cs_validated,
588 src_page->vmp_cs_tainted,
589 src_page->vmp_cs_nx);
590 }
591 ret = 0;
592 continue;
593 }
594 ret = pager->crypt_info->page_decrypt(
595 (const void *)(src_vaddr + offset_in_page),
596 (void *)(dst_vaddr + offset_in_page),
597 ((pager->crypto_backing_offset -
598 pager->crypto_start) + /* XXX ? */
599 offset +
600 cur_offset +
601 offset_in_page),
602 pager->crypt_info->crypt_ops);
603
604 if (apple_protect_pager_data_request_debug) {
605 printf("apple_protect_data_request"
606 "(%p,0x%llx+0x%llx+0x%04llx): "
607 "in crypto range [0x%llx:0x%llx]: "
608 "DECRYPT offset 0x%llx="
609 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
610 "[0x%016llx 0x%016llx] "
611 "code_signed=%d "
612 "cs_validated=%d "
613 "cs_tainted=%d "
614 "cs_nx=%d "
615 "ret=0x%x\n",
616 pager,
617 offset,
618 (uint64_t) cur_offset,
619 (uint64_t) offset_in_page,
620 pager->crypto_start, pager->crypto_end,
621 ((pager->crypto_backing_offset -
622 pager->crypto_start) +
623 offset +
624 cur_offset +
625 offset_in_page),
626 pager->crypto_backing_offset,
627 pager->crypto_start,
628 offset,
629 (uint64_t) cur_offset,
630 (uint64_t) offset_in_page,
631 *(uint64_t *)(dst_vaddr + offset_in_page),
632 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
633 src_page_object->code_signed,
634 src_page->vmp_cs_validated,
635 src_page->vmp_cs_tainted,
636 src_page->vmp_cs_nx,
637 ret);
638 }
639 if (ret) {
640 break;
641 }
642 }
643 if (ret) {
644 /*
645 * Decryption failed. Abort the fault.
646 */
647 retval = KERN_ABORTED;
648 }
649
650 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
651 assert(src_page->vmp_busy);
652 assert(src_page_object->paging_in_progress > 0);
653 vm_object_lock(src_page_object);
654
655 /*
656 * Cleanup the result of vm_fault_page() of the source page.
657 */
658 PAGE_WAKEUP_DONE(src_page);
659 src_page = VM_PAGE_NULL;
660 vm_object_paging_end(src_page_object);
661 vm_object_unlock(src_page_object);
662
663 if (top_page != VM_PAGE_NULL) {
664 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
665 vm_object_lock(src_top_object);
666 VM_PAGE_FREE(top_page);
667 vm_object_paging_end(src_top_object);
668 vm_object_unlock(src_top_object);
669 }
670 }
671
672 done:
673 if (upl != NULL) {
674 /* clean up the UPL */
675
676 /*
677 * The pages are currently dirty because we've just been
678 * writing on them, but as far as we're concerned, they're
679 * clean since they contain their "original" contents as
680 * provided by us, the pager.
681 * Tell the UPL to mark them "clean".
682 */
683 upl_clear_dirty(upl, TRUE);
684
685 /* abort or commit the UPL */
686 if (retval != KERN_SUCCESS) {
687 upl_abort(upl, 0);
688 if (retval == KERN_ABORTED) {
689 wait_result_t wait_result;
690
691 /*
692 * We aborted the fault and did not provide
693 * any contents for the requested pages but
694 * the pages themselves are not invalid, so
695 * let's return success and let the caller
696 * retry the fault, in case it might succeed
697 * later (when the decryption code is up and
698 * running in the kernel, for example).
699 */
700 retval = KERN_SUCCESS;
701 /*
702 * Wait a little bit first to avoid using
703 * too much CPU time retrying and failing
704 * the same fault over and over again.
705 */
706 wait_result = assert_wait_timeout(
707 (event_t) apple_protect_pager_data_request,
708 THREAD_UNINT,
709 10000, /* 10ms */
710 NSEC_PER_USEC);
711 assert(wait_result == THREAD_WAITING);
712 wait_result = thread_block(THREAD_CONTINUE_NULL);
713 assert(wait_result == THREAD_TIMED_OUT);
714 }
715 } else {
716 boolean_t empty;
717 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
718 "upl %p offset 0x%llx size 0x%x",
719 upl, upl->u_offset, upl->u_size);
720 upl_commit_range(upl, 0, upl->u_size,
721 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
722 upl_pl, pl_count, &empty);
723 }
724
725 /* and deallocate the UPL */
726 upl_deallocate(upl);
727 upl = NULL;
728 }
729 if (src_top_object != VM_OBJECT_NULL) {
730 vm_object_deallocate(src_top_object);
731 }
732 return retval;
733 }
734
735 /*
736 * apple_protect_pager_reference()
737 *
738 * Get a reference on this memory object.
739 * For external usage only. Assumes that the initial reference count is not 0,
740 * i.e one should not "revive" a dead pager this way.
741 */
742 void
743 apple_protect_pager_reference(
744 memory_object_t mem_obj)
745 {
746 apple_protect_pager_t pager;
747
748 pager = apple_protect_pager_lookup(mem_obj);
749
750 lck_mtx_lock(&apple_protect_pager_lock);
751 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
752 lck_mtx_unlock(&apple_protect_pager_lock);
753 }
754
755
756 /*
757 * apple_protect_pager_dequeue:
758 *
759 * Removes a pager from the list of pagers.
760 *
761 * The caller must hold "apple_protect_pager_lock".
762 */
763 void
764 apple_protect_pager_dequeue(
765 apple_protect_pager_t pager)
766 {
767 assert(!pager->is_mapped);
768
769 queue_remove(&apple_protect_pager_queue,
770 pager,
771 apple_protect_pager_t,
772 pager_queue);
773 pager->pager_queue.next = NULL;
774 pager->pager_queue.prev = NULL;
775
776 apple_protect_pager_count--;
777 }
778
779 /*
780 * apple_protect_pager_terminate_internal:
781 *
782 * Trigger the asynchronous termination of the memory object associated
783 * with this pager.
784 * When the memory object is terminated, there will be one more call
785 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
786 * to finish the clean up.
787 *
788 * "apple_protect_pager_lock" should not be held by the caller.
789 * We don't need the lock because the pager has already been removed from
790 * the pagers' list and is now ours exclusively.
791 */
792 void
793 apple_protect_pager_terminate_internal(
794 apple_protect_pager_t pager)
795 {
796 assert(pager->is_ready);
797 assert(!pager->is_mapped);
798
799 if (pager->backing_object != VM_OBJECT_NULL) {
800 vm_object_deallocate(pager->backing_object);
801 pager->backing_object = VM_OBJECT_NULL;
802 }
803
804 /* one less pager using this "pager_crypt_info" */
805 #if CRYPT_INFO_DEBUG
806 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
807 __FUNCTION__,
808 pager->crypt_info,
809 pager->crypt_info->crypt_refcnt);
810 #endif /* CRYPT_INFO_DEBUG */
811 crypt_info_deallocate(pager->crypt_info);
812 pager->crypt_info = NULL;
813
814 /* trigger the destruction of the memory object */
815 memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
816 }
817
818 /*
819 * apple_protect_pager_deallocate_internal()
820 *
821 * Release a reference on this pager and free it when the last
822 * reference goes away.
823 * Can be called with apple_protect_pager_lock held or not but always returns
824 * with it unlocked.
825 */
826 void
827 apple_protect_pager_deallocate_internal(
828 apple_protect_pager_t pager,
829 boolean_t locked)
830 {
831 boolean_t needs_trimming;
832 unsigned int count_unmapped;
833 os_ref_count_t ref_count;
834
835 if (!locked) {
836 lck_mtx_lock(&apple_protect_pager_lock);
837 }
838
839 count_unmapped = (apple_protect_pager_count -
840 apple_protect_pager_count_mapped);
841 if (count_unmapped > apple_protect_pager_cache_limit) {
842 /* we have too many unmapped pagers: trim some */
843 needs_trimming = TRUE;
844 } else {
845 needs_trimming = FALSE;
846 }
847
848 /* drop a reference on this pager */
849 ref_count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
850
851 if (ref_count == 1) {
852 /*
853 * Only the "named" reference is left, which means that
854 * no one is really holding on to this pager anymore.
855 * Terminate it.
856 */
857 apple_protect_pager_dequeue(pager);
858 /* the pager is all ours: no need for the lock now */
859 lck_mtx_unlock(&apple_protect_pager_lock);
860 apple_protect_pager_terminate_internal(pager);
861 } else if (ref_count == 0) {
862 /*
863 * Dropped the existence reference; the memory object has
864 * been terminated. Do some final cleanup and release the
865 * pager structure.
866 */
867 lck_mtx_unlock(&apple_protect_pager_lock);
868 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
869 memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
870 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
871 }
872 kfree(pager, sizeof(*pager));
873 pager = APPLE_PROTECT_PAGER_NULL;
874 } else {
875 /* there are still plenty of references: keep going... */
876 lck_mtx_unlock(&apple_protect_pager_lock);
877 }
878
879 if (needs_trimming) {
880 apple_protect_pager_trim();
881 }
882 /* caution: lock is not held on return... */
883 }
884
885 /*
886 * apple_protect_pager_deallocate()
887 *
888 * Release a reference on this pager and free it when the last
889 * reference goes away.
890 */
891 void
892 apple_protect_pager_deallocate(
893 memory_object_t mem_obj)
894 {
895 apple_protect_pager_t pager;
896
897 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
898 pager = apple_protect_pager_lookup(mem_obj);
899 apple_protect_pager_deallocate_internal(pager, FALSE);
900 }
901
902 /*
903 *
904 */
905 kern_return_t
906 apple_protect_pager_terminate(
907 #if !DEBUG
908 __unused
909 #endif
910 memory_object_t mem_obj)
911 {
912 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
913
914 return KERN_SUCCESS;
915 }
916
917 /*
918 *
919 */
920 kern_return_t
921 apple_protect_pager_synchronize(
922 __unused memory_object_t mem_obj,
923 __unused memory_object_offset_t offset,
924 __unused memory_object_size_t length,
925 __unused vm_sync_t sync_flags)
926 {
927 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
928 return KERN_FAILURE;
929 }
930
931 /*
932 * apple_protect_pager_map()
933 *
934 * This allows VM to let us, the EMM, know that this memory object
935 * is currently mapped one or more times. This is called by VM each time
936 * the memory object gets mapped and we take one extra reference on the
937 * memory object to account for all its mappings.
938 */
939 kern_return_t
940 apple_protect_pager_map(
941 memory_object_t mem_obj,
942 __unused vm_prot_t prot)
943 {
944 apple_protect_pager_t pager;
945
946 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
947
948 pager = apple_protect_pager_lookup(mem_obj);
949
950 lck_mtx_lock(&apple_protect_pager_lock);
951 assert(pager->is_ready);
952 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0); /* pager is alive */
953 if (pager->is_mapped == FALSE) {
954 /*
955 * First mapping of this pager: take an extra reference
956 * that will remain until all the mappings of this pager
957 * are removed.
958 */
959 pager->is_mapped = TRUE;
960 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
961 apple_protect_pager_count_mapped++;
962 }
963 lck_mtx_unlock(&apple_protect_pager_lock);
964
965 return KERN_SUCCESS;
966 }
967
968 /*
969 * apple_protect_pager_last_unmap()
970 *
971 * This is called by VM when this memory object is no longer mapped anywhere.
972 */
973 kern_return_t
974 apple_protect_pager_last_unmap(
975 memory_object_t mem_obj)
976 {
977 apple_protect_pager_t pager;
978 unsigned int count_unmapped;
979
980 PAGER_DEBUG(PAGER_ALL,
981 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
982
983 pager = apple_protect_pager_lookup(mem_obj);
984
985 lck_mtx_lock(&apple_protect_pager_lock);
986 if (pager->is_mapped) {
987 /*
988 * All the mappings are gone, so let go of the one extra
989 * reference that represents all the mappings of this pager.
990 */
991 apple_protect_pager_count_mapped--;
992 count_unmapped = (apple_protect_pager_count -
993 apple_protect_pager_count_mapped);
994 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
995 apple_protect_pager_count_unmapped_max = count_unmapped;
996 }
997 pager->is_mapped = FALSE;
998 apple_protect_pager_deallocate_internal(pager, TRUE);
999 /* caution: deallocate_internal() released the lock ! */
1000 } else {
1001 lck_mtx_unlock(&apple_protect_pager_lock);
1002 }
1003
1004 return KERN_SUCCESS;
1005 }
1006
1007 boolean_t
1008 apple_protect_pager_backing_object(
1009 memory_object_t mem_obj,
1010 memory_object_offset_t offset,
1011 vm_object_t *backing_object,
1012 vm_object_offset_t *backing_offset)
1013 {
1014 apple_protect_pager_t pager;
1015
1016 PAGER_DEBUG(PAGER_ALL,
1017 ("apple_protect_pager_backing_object: %p\n", mem_obj));
1018
1019 pager = apple_protect_pager_lookup(mem_obj);
1020
1021 *backing_object = pager->backing_object;
1022 *backing_offset = pager->backing_offset + offset;
1023
1024 return TRUE;
1025 }
1026
1027 /*
1028 *
1029 */
1030 apple_protect_pager_t
1031 apple_protect_pager_lookup(
1032 memory_object_t mem_obj)
1033 {
1034 apple_protect_pager_t pager;
1035
1036 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1037 pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
1038 assert(os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) > 0);
1039 return pager;
1040 }
1041
1042 apple_protect_pager_t
1043 apple_protect_pager_create(
1044 vm_object_t backing_object,
1045 vm_object_offset_t backing_offset,
1046 vm_object_offset_t crypto_backing_offset,
1047 struct pager_crypt_info *crypt_info,
1048 vm_object_offset_t crypto_start,
1049 vm_object_offset_t crypto_end,
1050 boolean_t cache_pager)
1051 {
1052 apple_protect_pager_t pager, pager2;
1053 memory_object_control_t control;
1054 kern_return_t kr;
1055 struct pager_crypt_info *old_crypt_info;
1056
1057 pager = (apple_protect_pager_t) kalloc(sizeof(*pager));
1058 if (pager == APPLE_PROTECT_PAGER_NULL) {
1059 return APPLE_PROTECT_PAGER_NULL;
1060 }
1061
1062 /*
1063 * The vm_map call takes both named entry ports and raw memory
1064 * objects in the same parameter. We need to make sure that
1065 * vm_map does not see this object as a named entry port. So,
1066 * we reserve the first word in the object for a fake ip_kotype
1067 * setting - that will tell vm_map to use it as a memory object.
1068 */
1069 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1070 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1071 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1072
1073 pager->is_ready = FALSE;/* not ready until it has a "name" */
1074 /* one reference for the caller */
1075 os_ref_init_count_raw(&pager->ap_pgr_hdr_ref, NULL, 1);
1076 pager->is_mapped = FALSE;
1077 if (cache_pager) {
1078 /* extra reference for the cache */
1079 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1080 pager->is_cached = true;
1081 } else {
1082 pager->is_cached = false;
1083 }
1084 pager->backing_object = backing_object;
1085 pager->backing_offset = backing_offset;
1086 pager->crypto_backing_offset = crypto_backing_offset;
1087 pager->crypto_start = crypto_start;
1088 pager->crypto_end = crypto_end;
1089 pager->crypt_info = crypt_info; /* allocated by caller */
1090
1091 #if CRYPT_INFO_DEBUG
1092 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1093 __FUNCTION__,
1094 crypt_info,
1095 crypt_info->page_decrypt,
1096 crypt_info->crypt_end,
1097 crypt_info->crypt_ops,
1098 crypt_info->crypt_refcnt);
1099 #endif /* CRYPT_INFO_DEBUG */
1100
1101 vm_object_reference(backing_object);
1102
1103 old_crypt_info = NULL;
1104
1105 lck_mtx_lock(&apple_protect_pager_lock);
1106 /* see if anyone raced us to create a pager for the same object */
1107 queue_iterate(&apple_protect_pager_queue,
1108 pager2,
1109 apple_protect_pager_t,
1110 pager_queue) {
1111 if ((pager2->crypt_info->page_decrypt !=
1112 crypt_info->page_decrypt) ||
1113 (pager2->crypt_info->crypt_end !=
1114 crypt_info->crypt_end) ||
1115 (pager2->crypt_info->crypt_ops !=
1116 crypt_info->crypt_ops)) {
1117 /* crypt_info contents do not match: next pager */
1118 continue;
1119 }
1120
1121 /* found a match for crypt_info ... */
1122 if (old_crypt_info) {
1123 /* ... already switched to that crypt_info */
1124 assert(old_crypt_info == pager2->crypt_info);
1125 } else if (pager2->crypt_info != crypt_info) {
1126 /* ... switch to that pager's crypt_info */
1127 #if CRYPT_INFO_DEBUG
1128 printf("CRYPT_INFO %s: reference %p ref %d "
1129 "(create match)\n",
1130 __FUNCTION__,
1131 pager2->crypt_info,
1132 pager2->crypt_info->crypt_refcnt);
1133 #endif /* CRYPT_INFO_DEBUG */
1134 old_crypt_info = pager2->crypt_info;
1135 crypt_info_reference(old_crypt_info);
1136 pager->crypt_info = old_crypt_info;
1137 }
1138
1139 if (pager2->backing_object == backing_object &&
1140 pager2->backing_offset == backing_offset &&
1141 pager2->crypto_backing_offset == crypto_backing_offset &&
1142 pager2->crypto_start == crypto_start &&
1143 pager2->crypto_end == crypto_end) {
1144 /* full match: use that pager */
1145 break;
1146 }
1147 }
1148 if (!queue_end(&apple_protect_pager_queue,
1149 (queue_entry_t) pager2)) {
1150 /* we lost the race, down with the loser... */
1151 lck_mtx_unlock(&apple_protect_pager_lock);
1152 vm_object_deallocate(pager->backing_object);
1153 pager->backing_object = VM_OBJECT_NULL;
1154 #if CRYPT_INFO_DEBUG
1155 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1156 __FUNCTION__,
1157 pager->crypt_info,
1158 pager->crypt_info->crypt_refcnt);
1159 #endif /* CRYPT_INFO_DEBUG */
1160 crypt_info_deallocate(pager->crypt_info);
1161 pager->crypt_info = NULL;
1162 kfree(pager, sizeof(*pager));
1163 /* ... and go with the winner */
1164 pager = pager2;
1165 /* let the winner make sure the pager gets ready */
1166 return pager;
1167 }
1168
1169 /* enter new pager at the head of our list of pagers */
1170 queue_enter_first(&apple_protect_pager_queue,
1171 pager,
1172 apple_protect_pager_t,
1173 pager_queue);
1174 apple_protect_pager_count++;
1175 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1176 apple_protect_pager_count_max = apple_protect_pager_count;
1177 }
1178 lck_mtx_unlock(&apple_protect_pager_lock);
1179
1180 kr = memory_object_create_named((memory_object_t) pager,
1181 0,
1182 &control);
1183 assert(kr == KERN_SUCCESS);
1184
1185 memory_object_mark_trusted(control);
1186
1187 lck_mtx_lock(&apple_protect_pager_lock);
1188 /* the new pager is now ready to be used */
1189 pager->is_ready = TRUE;
1190 lck_mtx_unlock(&apple_protect_pager_lock);
1191
1192 /* wakeup anyone waiting for this pager to be ready */
1193 thread_wakeup(&pager->is_ready);
1194
1195 if (old_crypt_info != NULL &&
1196 old_crypt_info != crypt_info) {
1197 /* we re-used an old crypt_info instead of using our new one */
1198 #if CRYPT_INFO_DEBUG
1199 printf("CRYPT_INFO %s: deallocate %p ref %d "
1200 "(create used old)\n",
1201 __FUNCTION__,
1202 crypt_info,
1203 crypt_info->crypt_refcnt);
1204 #endif /* CRYPT_INFO_DEBUG */
1205 crypt_info_deallocate(crypt_info);
1206 crypt_info = NULL;
1207 }
1208
1209 return pager;
1210 }
1211
1212 /*
1213 * apple_protect_pager_setup()
1214 *
1215 * Provide the caller with a memory object backed by the provided
1216 * "backing_object" VM object. If such a memory object already exists,
1217 * re-use it, otherwise create a new memory object.
1218 */
1219 memory_object_t
1220 apple_protect_pager_setup(
1221 vm_object_t backing_object,
1222 vm_object_offset_t backing_offset,
1223 vm_object_offset_t crypto_backing_offset,
1224 struct pager_crypt_info *crypt_info,
1225 vm_object_offset_t crypto_start,
1226 vm_object_offset_t crypto_end,
1227 boolean_t cache_pager)
1228 {
1229 apple_protect_pager_t pager;
1230 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1231
1232 #if CRYPT_INFO_DEBUG
1233 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1234 __FUNCTION__,
1235 crypt_info,
1236 crypt_info->page_decrypt,
1237 crypt_info->crypt_end,
1238 crypt_info->crypt_ops,
1239 crypt_info->crypt_refcnt);
1240 #endif /* CRYPT_INFO_DEBUG */
1241
1242 old_crypt_info = NULL;
1243
1244 lck_mtx_lock(&apple_protect_pager_lock);
1245
1246 queue_iterate(&apple_protect_pager_queue,
1247 pager,
1248 apple_protect_pager_t,
1249 pager_queue) {
1250 if ((pager->crypt_info->page_decrypt !=
1251 crypt_info->page_decrypt) ||
1252 (pager->crypt_info->crypt_end !=
1253 crypt_info->crypt_end) ||
1254 (pager->crypt_info->crypt_ops !=
1255 crypt_info->crypt_ops)) {
1256 /* no match for "crypt_info": next pager */
1257 continue;
1258 }
1259 /* found a match for crypt_info ... */
1260 if (old_crypt_info) {
1261 /* ... already switched to that crypt_info */
1262 assert(old_crypt_info == pager->crypt_info);
1263 } else {
1264 /* ... switch to that pager's crypt_info */
1265 old_crypt_info = pager->crypt_info;
1266 #if CRYPT_INFO_DEBUG
1267 printf("CRYPT_INFO %s: "
1268 "switching crypt_info from %p [%p,%p,%p,%d] "
1269 "to %p [%p,%p,%p,%d] from pager %p\n",
1270 __FUNCTION__,
1271 crypt_info,
1272 crypt_info->page_decrypt,
1273 crypt_info->crypt_end,
1274 crypt_info->crypt_ops,
1275 crypt_info->crypt_refcnt,
1276 old_crypt_info,
1277 old_crypt_info->page_decrypt,
1278 old_crypt_info->crypt_end,
1279 old_crypt_info->crypt_ops,
1280 old_crypt_info->crypt_refcnt,
1281 pager);
1282 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1283 __FUNCTION__,
1284 pager->crypt_info,
1285 pager->crypt_info->crypt_refcnt);
1286 #endif /* CRYPT_INFO_DEBUG */
1287 crypt_info_reference(pager->crypt_info);
1288 }
1289
1290 if (pager->backing_object == backing_object &&
1291 pager->backing_offset == backing_offset &&
1292 pager->crypto_backing_offset == crypto_backing_offset &&
1293 pager->crypto_start == crypto_start &&
1294 pager->crypto_end == crypto_end) {
1295 /* full match: use that pager! */
1296 assert(old_crypt_info == pager->crypt_info);
1297 assert(old_crypt_info->crypt_refcnt > 1);
1298 #if CRYPT_INFO_DEBUG
1299 printf("CRYPT_INFO %s: "
1300 "pager match with %p crypt_info %p\n",
1301 __FUNCTION__,
1302 pager,
1303 pager->crypt_info);
1304 printf("CRYPT_INFO %s: deallocate %p ref %d "
1305 "(pager match)\n",
1306 __FUNCTION__,
1307 old_crypt_info,
1308 old_crypt_info->crypt_refcnt);
1309 #endif /* CRYPT_INFO_DEBUG */
1310 /* release the extra ref on crypt_info we got above */
1311 crypt_info_deallocate(old_crypt_info);
1312 assert(old_crypt_info->crypt_refcnt > 0);
1313 /* give extra reference on pager to the caller */
1314 os_ref_retain_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1315 break;
1316 }
1317 }
1318 if (queue_end(&apple_protect_pager_queue,
1319 (queue_entry_t) pager)) {
1320 lck_mtx_unlock(&apple_protect_pager_lock);
1321 /* no existing pager for this backing object */
1322 pager = APPLE_PROTECT_PAGER_NULL;
1323 if (old_crypt_info) {
1324 /* use this old crypt_info for new pager */
1325 new_crypt_info = old_crypt_info;
1326 #if CRYPT_INFO_DEBUG
1327 printf("CRYPT_INFO %s: "
1328 "will use old_crypt_info %p for new pager\n",
1329 __FUNCTION__,
1330 old_crypt_info);
1331 #endif /* CRYPT_INFO_DEBUG */
1332 } else {
1333 /* allocate a new crypt_info for new pager */
1334 new_crypt_info = kalloc(sizeof(*new_crypt_info));
1335 *new_crypt_info = *crypt_info;
1336 new_crypt_info->crypt_refcnt = 1;
1337 #if CRYPT_INFO_DEBUG
1338 printf("CRYPT_INFO %s: "
1339 "will use new_crypt_info %p for new pager\n",
1340 __FUNCTION__,
1341 new_crypt_info);
1342 #endif /* CRYPT_INFO_DEBUG */
1343 }
1344 if (new_crypt_info == NULL) {
1345 /* can't create new pager without a crypt_info */
1346 } else {
1347 /* create new pager */
1348 pager = apple_protect_pager_create(
1349 backing_object,
1350 backing_offset,
1351 crypto_backing_offset,
1352 new_crypt_info,
1353 crypto_start,
1354 crypto_end,
1355 cache_pager);
1356 }
1357 if (pager == APPLE_PROTECT_PAGER_NULL) {
1358 /* could not create a new pager */
1359 if (new_crypt_info == old_crypt_info) {
1360 /* release extra reference on old_crypt_info */
1361 #if CRYPT_INFO_DEBUG
1362 printf("CRYPT_INFO %s: deallocate %p ref %d "
1363 "(create fail old_crypt_info)\n",
1364 __FUNCTION__,
1365 old_crypt_info,
1366 old_crypt_info->crypt_refcnt);
1367 #endif /* CRYPT_INFO_DEBUG */
1368 crypt_info_deallocate(old_crypt_info);
1369 old_crypt_info = NULL;
1370 } else {
1371 /* release unused new_crypt_info */
1372 assert(new_crypt_info->crypt_refcnt == 1);
1373 #if CRYPT_INFO_DEBUG
1374 printf("CRYPT_INFO %s: deallocate %p ref %d "
1375 "(create fail new_crypt_info)\n",
1376 __FUNCTION__,
1377 new_crypt_info,
1378 new_crypt_info->crypt_refcnt);
1379 #endif /* CRYPT_INFO_DEBUG */
1380 crypt_info_deallocate(new_crypt_info);
1381 new_crypt_info = NULL;
1382 }
1383 return MEMORY_OBJECT_NULL;
1384 }
1385 lck_mtx_lock(&apple_protect_pager_lock);
1386 } else {
1387 assert(old_crypt_info == pager->crypt_info);
1388 }
1389
1390 while (!pager->is_ready) {
1391 lck_mtx_sleep(&apple_protect_pager_lock,
1392 LCK_SLEEP_DEFAULT,
1393 &pager->is_ready,
1394 THREAD_UNINT);
1395 }
1396 lck_mtx_unlock(&apple_protect_pager_lock);
1397
1398 return (memory_object_t) pager;
1399 }
1400
1401 void
1402 apple_protect_pager_trim(void)
1403 {
1404 apple_protect_pager_t pager, prev_pager;
1405 queue_head_t trim_queue;
1406 unsigned int num_trim;
1407 unsigned int count_unmapped;
1408
1409 lck_mtx_lock(&apple_protect_pager_lock);
1410
1411 /*
1412 * We have too many pagers, try and trim some unused ones,
1413 * starting with the oldest pager at the end of the queue.
1414 */
1415 queue_init(&trim_queue);
1416 num_trim = 0;
1417
1418 for (pager = (apple_protect_pager_t)
1419 queue_last(&apple_protect_pager_queue);
1420 !queue_end(&apple_protect_pager_queue,
1421 (queue_entry_t) pager);
1422 pager = prev_pager) {
1423 /* get prev elt before we dequeue */
1424 prev_pager = (apple_protect_pager_t)
1425 queue_prev(&pager->pager_queue);
1426
1427 if (pager->is_cached &&
1428 os_ref_get_count_raw(&pager->ap_pgr_hdr_ref) == 2 &&
1429 pager->is_ready &&
1430 !pager->is_mapped) {
1431 /* this pager can be trimmed */
1432 num_trim++;
1433 /* remove this pager from the main list ... */
1434 apple_protect_pager_dequeue(pager);
1435 /* ... and add it to our trim queue */
1436 queue_enter_first(&trim_queue,
1437 pager,
1438 apple_protect_pager_t,
1439 pager_queue);
1440
1441 count_unmapped = (apple_protect_pager_count -
1442 apple_protect_pager_count_mapped);
1443 if (count_unmapped <= apple_protect_pager_cache_limit) {
1444 /* we have enough pagers to trim */
1445 break;
1446 }
1447 }
1448 }
1449 if (num_trim > apple_protect_pager_num_trim_max) {
1450 apple_protect_pager_num_trim_max = num_trim;
1451 }
1452 apple_protect_pager_num_trim_total += num_trim;
1453
1454 lck_mtx_unlock(&apple_protect_pager_lock);
1455
1456 /* terminate the trimmed pagers */
1457 while (!queue_empty(&trim_queue)) {
1458 queue_remove_first(&trim_queue,
1459 pager,
1460 apple_protect_pager_t,
1461 pager_queue);
1462 assert(pager->is_cached);
1463 pager->is_cached = false;
1464 pager->pager_queue.next = NULL;
1465 pager->pager_queue.prev = NULL;
1466 /*
1467 * We can't call deallocate_internal() because the pager
1468 * has already been dequeued, but we still need to remove
1469 * a reference.
1470 */
1471 os_ref_count_t __assert_only count;
1472 count = os_ref_release_locked_raw(&pager->ap_pgr_hdr_ref, NULL);
1473 assert(count == 1);
1474 apple_protect_pager_terminate_internal(pager);
1475 }
1476 }
1477
1478
1479 void
1480 crypt_info_reference(
1481 struct pager_crypt_info *crypt_info)
1482 {
1483 assert(crypt_info->crypt_refcnt != 0);
1484 #if CRYPT_INFO_DEBUG
1485 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1486 __FUNCTION__,
1487 crypt_info,
1488 crypt_info->crypt_refcnt,
1489 crypt_info->crypt_refcnt + 1);
1490 #endif /* CRYPT_INFO_DEBUG */
1491 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1492 }
1493
1494 void
1495 crypt_info_deallocate(
1496 struct pager_crypt_info *crypt_info)
1497 {
1498 #if CRYPT_INFO_DEBUG
1499 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1500 __FUNCTION__,
1501 crypt_info,
1502 crypt_info->crypt_refcnt,
1503 crypt_info->crypt_refcnt - 1);
1504 #endif /* CRYPT_INFO_DEBUG */
1505 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1506 if (crypt_info->crypt_refcnt == 0) {
1507 /* deallocate any crypt module data */
1508 if (crypt_info->crypt_end) {
1509 crypt_info->crypt_end(crypt_info->crypt_ops);
1510 crypt_info->crypt_end = NULL;
1511 }
1512 #if CRYPT_INFO_DEBUG
1513 printf("CRYPT_INFO %s: freeing %p\n",
1514 __FUNCTION__,
1515 crypt_info);
1516 #endif /* CRYPT_INFO_DEBUG */
1517 kfree(crypt_info, sizeof(*crypt_info));
1518 crypt_info = NULL;
1519 }
1520 }