]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
707c3e69591145df02544dc91f165f70f1f9f734
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48 #include <kern/ipc_kobject.h>
49
50 #include <ipc/ipc_port.h>
51 #include <ipc/ipc_space.h>
52
53 #include <vm/vm_fault.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/memory_object.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_kern.h>
60
61
62 /*
63 * APPLE PROTECT MEMORY PAGER
64 *
65 * This external memory manager (EMM) handles memory from the encrypted
66 * sections of some executables protected by the DSMOS kernel extension.
67 *
68 * It mostly handles page-in requests (from memory_object_data_request()) by
69 * getting the encrypted data from its backing VM object, itself backed by
70 * the encrypted file, decrypting it and providing it to VM.
71 *
72 * The decrypted pages will never be dirtied, so the memory manager doesn't
73 * need to handle page-out requests (from memory_object_data_return()). The
74 * pages need to be mapped copy-on-write, so that the originals stay clean.
75 *
76 * We don't expect to have to handle a large number of apple-protected
77 * binaries, so the data structures are very simple (simple linked list)
78 * for now.
79 */
80
81 /* forward declarations */
82 void apple_protect_pager_reference(memory_object_t mem_obj);
83 void apple_protect_pager_deallocate(memory_object_t mem_obj);
84 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
85 memory_object_control_t control,
86 memory_object_cluster_size_t pg_size);
87 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
88 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
89 memory_object_offset_t offset,
90 memory_object_cluster_size_t length,
91 vm_prot_t protection_required,
92 memory_object_fault_info_t fault_info);
93 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t data_cnt,
96 memory_object_offset_t *resid_offset,
97 int *io_error,
98 boolean_t dirty,
99 boolean_t kernel_copy,
100 int upl_flags);
101 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt);
104 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t size,
107 vm_prot_t desired_access);
108 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
109 memory_object_offset_t offset,
110 memory_object_size_t length,
111 vm_sync_t sync_flags);
112 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
115
116 #define CRYPT_INFO_DEBUG 0
117 void crypt_info_reference(struct pager_crypt_info *crypt_info);
118 void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
119
120 /*
121 * Vector of VM operations for this EMM.
122 * These routines are invoked by VM via the memory_object_*() interfaces.
123 */
124 const struct memory_object_pager_ops apple_protect_pager_ops = {
125 apple_protect_pager_reference,
126 apple_protect_pager_deallocate,
127 apple_protect_pager_init,
128 apple_protect_pager_terminate,
129 apple_protect_pager_data_request,
130 apple_protect_pager_data_return,
131 apple_protect_pager_data_initialize,
132 apple_protect_pager_data_unlock,
133 apple_protect_pager_synchronize,
134 apple_protect_pager_map,
135 apple_protect_pager_last_unmap,
136 NULL, /* data_reclaim */
137 "apple_protect"
138 };
139
140 /*
141 * The "apple_protect_pager" describes a memory object backed by
142 * the "apple protect" EMM.
143 */
144 typedef struct apple_protect_pager {
145 /* mandatory generic header */
146 struct memory_object ap_pgr_hdr;
147
148 /* pager-specific data */
149 queue_chain_t pager_queue; /* next & prev pagers */
150 unsigned int ref_count; /* reference count */
151 boolean_t is_ready; /* is this pager ready ? */
152 boolean_t is_mapped; /* is this mem_obj mapped ? */
153 vm_object_t backing_object; /* VM obj w/ encrypted data */
154 vm_object_offset_t backing_offset;
155 vm_object_offset_t crypto_backing_offset; /* for key... */
156 vm_object_offset_t crypto_start;
157 vm_object_offset_t crypto_end;
158 struct pager_crypt_info *crypt_info;
159 } *apple_protect_pager_t;
160 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
161
162 /*
163 * List of memory objects managed by this EMM.
164 * The list is protected by the "apple_protect_pager_lock" lock.
165 */
166 int apple_protect_pager_count = 0; /* number of pagers */
167 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
168 queue_head_t apple_protect_pager_queue;
169 decl_lck_mtx_data(,apple_protect_pager_lock)
170
171 /*
172 * Maximum number of unmapped pagers we're willing to keep around.
173 */
174 int apple_protect_pager_cache_limit = 20;
175
176 /*
177 * Statistics & counters.
178 */
179 int apple_protect_pager_count_max = 0;
180 int apple_protect_pager_count_unmapped_max = 0;
181 int apple_protect_pager_num_trim_max = 0;
182 int apple_protect_pager_num_trim_total = 0;
183
184
185 lck_grp_t apple_protect_pager_lck_grp;
186 lck_grp_attr_t apple_protect_pager_lck_grp_attr;
187 lck_attr_t apple_protect_pager_lck_attr;
188
189
190 /* internal prototypes */
191 apple_protect_pager_t apple_protect_pager_create(
192 vm_object_t backing_object,
193 vm_object_offset_t backing_offset,
194 vm_object_offset_t crypto_backing_offset,
195 struct pager_crypt_info *crypt_info,
196 vm_object_offset_t crypto_start,
197 vm_object_offset_t crypto_end);
198 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
199 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
200 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
201 boolean_t locked);
202 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
203 void apple_protect_pager_trim(void);
204
205
206 #if DEBUG
207 int apple_protect_pagerdebug = 0;
208 #define PAGER_ALL 0xffffffff
209 #define PAGER_INIT 0x00000001
210 #define PAGER_PAGEIN 0x00000002
211
212 #define PAGER_DEBUG(LEVEL, A) \
213 MACRO_BEGIN \
214 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
215 printf A; \
216 } \
217 MACRO_END
218 #else
219 #define PAGER_DEBUG(LEVEL, A)
220 #endif
221
222
223 void
224 apple_protect_pager_bootstrap(void)
225 {
226 lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
227 lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
228 lck_attr_setdefault(&apple_protect_pager_lck_attr);
229 lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
230 queue_init(&apple_protect_pager_queue);
231 }
232
233 /*
234 * apple_protect_pager_init()
235 *
236 * Initialize the memory object and makes it ready to be used and mapped.
237 */
238 kern_return_t
239 apple_protect_pager_init(
240 memory_object_t mem_obj,
241 memory_object_control_t control,
242 #if !DEBUG
243 __unused
244 #endif
245 memory_object_cluster_size_t pg_size)
246 {
247 apple_protect_pager_t pager;
248 kern_return_t kr;
249 memory_object_attr_info_data_t attributes;
250
251 PAGER_DEBUG(PAGER_ALL,
252 ("apple_protect_pager_init: %p, %p, %x\n",
253 mem_obj, control, pg_size));
254
255 if (control == MEMORY_OBJECT_CONTROL_NULL)
256 return KERN_INVALID_ARGUMENT;
257
258 pager = apple_protect_pager_lookup(mem_obj);
259
260 memory_object_control_reference(control);
261
262 pager->ap_pgr_hdr.mo_control = control;
263
264 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
265 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
266 attributes.cluster_size = (1 << (PAGE_SHIFT));
267 attributes.may_cache_object = FALSE;
268 attributes.temporary = TRUE;
269
270 kr = memory_object_change_attributes(
271 control,
272 MEMORY_OBJECT_ATTRIBUTE_INFO,
273 (memory_object_info_t) &attributes,
274 MEMORY_OBJECT_ATTR_INFO_COUNT);
275 if (kr != KERN_SUCCESS)
276 panic("apple_protect_pager_init: "
277 "memory_object_change_attributes() failed");
278
279 #if CONFIG_SECLUDED_MEMORY
280 if (secluded_for_filecache) {
281 memory_object_mark_eligible_for_secluded(control, TRUE);
282 }
283 #endif /* CONFIG_SECLUDED_MEMORY */
284
285 return KERN_SUCCESS;
286 }
287
288 /*
289 * apple_protect_data_return()
290 *
291 * Handles page-out requests from VM. This should never happen since
292 * the pages provided by this EMM are not supposed to be dirty or dirtied
293 * and VM should simply discard the contents and reclaim the pages if it
294 * needs to.
295 */
296 kern_return_t
297 apple_protect_pager_data_return(
298 __unused memory_object_t mem_obj,
299 __unused memory_object_offset_t offset,
300 __unused memory_object_cluster_size_t data_cnt,
301 __unused memory_object_offset_t *resid_offset,
302 __unused int *io_error,
303 __unused boolean_t dirty,
304 __unused boolean_t kernel_copy,
305 __unused int upl_flags)
306 {
307 panic("apple_protect_pager_data_return: should never get called");
308 return KERN_FAILURE;
309 }
310
311 kern_return_t
312 apple_protect_pager_data_initialize(
313 __unused memory_object_t mem_obj,
314 __unused memory_object_offset_t offset,
315 __unused memory_object_cluster_size_t data_cnt)
316 {
317 panic("apple_protect_pager_data_initialize: should never get called");
318 return KERN_FAILURE;
319 }
320
321 kern_return_t
322 apple_protect_pager_data_unlock(
323 __unused memory_object_t mem_obj,
324 __unused memory_object_offset_t offset,
325 __unused memory_object_size_t size,
326 __unused vm_prot_t desired_access)
327 {
328 return KERN_FAILURE;
329 }
330
331 /*
332 * apple_protect_pager_data_request()
333 *
334 * Handles page-in requests from VM.
335 */
336 int apple_protect_pager_data_request_debug = 0;
337 kern_return_t
338 apple_protect_pager_data_request(
339 memory_object_t mem_obj,
340 memory_object_offset_t offset,
341 memory_object_cluster_size_t length,
342 #if !DEBUG
343 __unused
344 #endif
345 vm_prot_t protection_required,
346 memory_object_fault_info_t mo_fault_info)
347 {
348 apple_protect_pager_t pager;
349 memory_object_control_t mo_control;
350 upl_t upl;
351 int upl_flags;
352 upl_size_t upl_size;
353 upl_page_info_t *upl_pl;
354 unsigned int pl_count;
355 vm_object_t src_top_object, src_page_object, dst_object;
356 kern_return_t kr, retval;
357 vm_map_offset_t kernel_mapping;
358 vm_offset_t src_vaddr, dst_vaddr;
359 vm_offset_t cur_offset;
360 vm_offset_t offset_in_page;
361 kern_return_t error_code;
362 vm_prot_t prot;
363 vm_page_t src_page, top_page;
364 int interruptible;
365 struct vm_object_fault_info fault_info;
366 int ret;
367
368 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
369
370 retval = KERN_SUCCESS;
371 src_top_object = VM_OBJECT_NULL;
372 src_page_object = VM_OBJECT_NULL;
373 kernel_mapping = 0;
374 upl = NULL;
375 upl_pl = NULL;
376 fault_info = *((struct vm_object_fault_info *) mo_fault_info);
377 fault_info.stealth = TRUE;
378 fault_info.io_sync = FALSE;
379 fault_info.mark_zf_absent = FALSE;
380 fault_info.batch_pmap_op = FALSE;
381 interruptible = fault_info.interruptible;
382
383 pager = apple_protect_pager_lookup(mem_obj);
384 assert(pager->is_ready);
385 assert(pager->ref_count > 1); /* pager is alive and mapped */
386
387 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
388
389 /*
390 * Gather in a UPL all the VM pages requested by VM.
391 */
392 mo_control = pager->ap_pgr_hdr.mo_control;
393
394 upl_size = length;
395 upl_flags =
396 UPL_RET_ONLY_ABSENT |
397 UPL_SET_LITE |
398 UPL_NO_SYNC |
399 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
400 UPL_SET_INTERNAL;
401 pl_count = 0;
402 kr = memory_object_upl_request(mo_control,
403 offset, upl_size,
404 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
405 if (kr != KERN_SUCCESS) {
406 retval = kr;
407 goto done;
408 }
409 dst_object = mo_control->moc_object;
410 assert(dst_object != VM_OBJECT_NULL);
411
412
413 #if __x86_64__ || __arm__ || __arm64__
414 /* we'll use the 1-to-1 mapping of physical memory */
415 src_vaddr = 0;
416 dst_vaddr = 0;
417 #else /* __x86_64__ || __arm__ || __arm64__ */
418 /*
419 * Reserve 2 virtual pages in the kernel address space to map each
420 * source and destination physical pages when it's their turn to
421 * be processed.
422 */
423 vm_map_entry_t map_entry;
424
425 vm_object_reference(kernel_object); /* ref. for mapping */
426 kr = vm_map_find_space(kernel_map,
427 &kernel_mapping,
428 2 * PAGE_SIZE_64,
429 0,
430 0,
431 VM_MAP_KERNEL_FLAGS_NONE,
432 &map_entry);
433 if (kr != KERN_SUCCESS) {
434 vm_object_deallocate(kernel_object);
435 retval = kr;
436 goto done;
437 }
438 map_entry->object.vm_object = kernel_object;
439 map_entry->offset = kernel_mapping;
440 vm_map_unlock(kernel_map);
441 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
442 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
443 #endif /* __x86_64__ || __arm__ || __arm64__ */
444
445 /*
446 * We'll map the encrypted data in the kernel address space from the
447 * backing VM object (itself backed by the encrypted file via
448 * the vnode pager).
449 */
450 src_top_object = pager->backing_object;
451 assert(src_top_object != VM_OBJECT_NULL);
452 vm_object_reference(src_top_object); /* keep the source object alive */
453
454 /*
455 * Fill in the contents of the pages requested by VM.
456 */
457 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
458 pl_count = length / PAGE_SIZE;
459 for (cur_offset = 0;
460 retval == KERN_SUCCESS && cur_offset < length;
461 cur_offset += PAGE_SIZE) {
462 ppnum_t dst_pnum;
463
464 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
465 /* this page is not in the UPL: skip it */
466 continue;
467 }
468
469 /*
470 * Map the source (encrypted) page in the kernel's
471 * virtual address space.
472 * We already hold a reference on the src_top_object.
473 */
474 retry_src_fault:
475 vm_object_lock(src_top_object);
476 vm_object_paging_begin(src_top_object);
477 error_code = 0;
478 prot = VM_PROT_READ;
479 src_page = VM_PAGE_NULL;
480 kr = vm_fault_page(src_top_object,
481 pager->backing_offset + offset + cur_offset,
482 VM_PROT_READ,
483 FALSE,
484 FALSE, /* src_page not looked up */
485 &prot,
486 &src_page,
487 &top_page,
488 NULL,
489 &error_code,
490 FALSE,
491 FALSE,
492 &fault_info);
493 switch (kr) {
494 case VM_FAULT_SUCCESS:
495 break;
496 case VM_FAULT_RETRY:
497 goto retry_src_fault;
498 case VM_FAULT_MEMORY_SHORTAGE:
499 if (vm_page_wait(interruptible)) {
500 goto retry_src_fault;
501 }
502 /* fall thru */
503 case VM_FAULT_INTERRUPTED:
504 retval = MACH_SEND_INTERRUPTED;
505 goto done;
506 case VM_FAULT_SUCCESS_NO_VM_PAGE:
507 /* success but no VM page: fail */
508 vm_object_paging_end(src_top_object);
509 vm_object_unlock(src_top_object);
510 /*FALLTHROUGH*/
511 case VM_FAULT_MEMORY_ERROR:
512 /* the page is not there ! */
513 if (error_code) {
514 retval = error_code;
515 } else {
516 retval = KERN_MEMORY_ERROR;
517 }
518 goto done;
519 default:
520 panic("apple_protect_pager_data_request: "
521 "vm_fault_page() unexpected error 0x%x\n",
522 kr);
523 }
524 assert(src_page != VM_PAGE_NULL);
525 assert(src_page->busy);
526
527 if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) {
528
529 vm_page_lockspin_queues();
530
531 if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(src_page))) {
532 vm_page_deactivate(src_page);
533 }
534 vm_page_unlock_queues();
535 }
536
537 /*
538 * Establish an explicit mapping of the source
539 * physical page.
540 */
541 #if __x86_64__
542 src_vaddr = (vm_map_offset_t)
543 PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
544 << PAGE_SHIFT);
545 #elif __arm__ || __arm64__
546 src_vaddr = (vm_map_offset_t)
547 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
548 << PAGE_SHIFT);
549 #else
550 kr = pmap_enter(kernel_pmap,
551 src_vaddr,
552 VM_PAGE_GET_PHYS_PAGE(src_page),
553 VM_PROT_READ,
554 VM_PROT_NONE,
555 0,
556 TRUE);
557
558 assert(kr == KERN_SUCCESS);
559 #endif
560 /*
561 * Establish an explicit pmap mapping of the destination
562 * physical page.
563 * We can't do a regular VM mapping because the VM page
564 * is "busy".
565 */
566 dst_pnum = (ppnum_t)
567 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
568 assert(dst_pnum != 0);
569 #if __x86_64__
570 dst_vaddr = (vm_map_offset_t)
571 PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
572 #elif __arm__ || __arm64__
573 dst_vaddr = (vm_map_offset_t)
574 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
575 #else
576 kr = pmap_enter(kernel_pmap,
577 dst_vaddr,
578 dst_pnum,
579 VM_PROT_READ | VM_PROT_WRITE,
580 VM_PROT_NONE,
581 0,
582 TRUE);
583
584 assert(kr == KERN_SUCCESS);
585 #endif
586 src_page_object = VM_PAGE_OBJECT(src_page);
587
588 /*
589 * Validate the original page...
590 */
591 if (src_page_object->code_signed) {
592 vm_page_validate_cs_mapped(
593 src_page,
594 (const void *) src_vaddr);
595 }
596 /*
597 * ... and transfer the results to the destination page.
598 */
599 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
600 src_page->cs_validated);
601 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
602 src_page->cs_tainted);
603 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
604 src_page->cs_nx);
605
606 /*
607 * page_decrypt() might access a mapped file, so let's release
608 * the object lock for the source page to avoid a potential
609 * deadlock. The source page is kept busy and we have a
610 * "paging_in_progress" reference on its object, so it's safe
611 * to unlock the object here.
612 */
613 assert(src_page->busy);
614 assert(src_page_object->paging_in_progress > 0);
615 vm_object_unlock(src_page_object);
616
617 /*
618 * Decrypt the encrypted contents of the source page
619 * into the destination page.
620 */
621 for (offset_in_page = 0;
622 offset_in_page < PAGE_SIZE;
623 offset_in_page += 4096) {
624 if (offset + cur_offset + offset_in_page <
625 pager->crypto_start ||
626 offset + cur_offset + offset_in_page >=
627 pager->crypto_end) {
628 /* not encrypted: just copy */
629 bcopy((const char *)(src_vaddr +
630 offset_in_page),
631 (char *)(dst_vaddr + offset_in_page),
632 4096);
633 if (apple_protect_pager_data_request_debug) {
634 printf("apple_protect_data_request"
635 "(%p,0x%llx+0x%llx+0x%04llx): "
636 "out of crypto range "
637 "[0x%llx:0x%llx]: "
638 "COPY [0x%016llx 0x%016llx] "
639 "code_signed=%d "
640 "cs_validated=%d "
641 "cs_tainted=%d "
642 "cs_nx=%d\n",
643 pager,
644 offset,
645 (uint64_t) cur_offset,
646 (uint64_t) offset_in_page,
647 pager->crypto_start,
648 pager->crypto_end,
649 *(uint64_t *)(dst_vaddr+
650 offset_in_page),
651 *(uint64_t *)(dst_vaddr+
652 offset_in_page+8),
653 src_page_object->code_signed,
654 src_page->cs_validated,
655 src_page->cs_tainted,
656 src_page->cs_nx);
657 }
658 ret = 0;
659 continue;
660 }
661 ret = pager->crypt_info->page_decrypt(
662 (const void *)(src_vaddr + offset_in_page),
663 (void *)(dst_vaddr + offset_in_page),
664 ((pager->crypto_backing_offset -
665 pager->crypto_start) + /* XXX ? */
666 offset +
667 cur_offset +
668 offset_in_page),
669 pager->crypt_info->crypt_ops);
670 if (apple_protect_pager_data_request_debug) {
671 printf("apple_protect_data_request"
672 "(%p,0x%llx+0x%llx+0x%04llx): "
673 "in crypto range [0x%llx:0x%llx]: "
674 "DECRYPT offset 0x%llx="
675 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
676 "[0x%016llx 0x%016llx] "
677 "code_signed=%d "
678 "cs_validated=%d "
679 "cs_tainted=%d "
680 "cs_nx=%d "
681 "ret=0x%x\n",
682 pager,
683 offset,
684 (uint64_t) cur_offset,
685 (uint64_t) offset_in_page,
686 pager->crypto_start, pager->crypto_end,
687 ((pager->crypto_backing_offset -
688 pager->crypto_start) +
689 offset +
690 cur_offset +
691 offset_in_page),
692 pager->crypto_backing_offset,
693 pager->crypto_start,
694 offset,
695 (uint64_t) cur_offset,
696 (uint64_t) offset_in_page,
697 *(uint64_t *)(dst_vaddr+offset_in_page),
698 *(uint64_t *)(dst_vaddr+offset_in_page+8),
699 src_page_object->code_signed,
700 src_page->cs_validated,
701 src_page->cs_tainted,
702 src_page->cs_nx,
703 ret);
704 }
705 if (ret) {
706 break;
707 }
708 }
709 if (ret) {
710 /*
711 * Decryption failed. Abort the fault.
712 */
713 retval = KERN_ABORTED;
714 }
715
716 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
717 assert(src_page->busy);
718 assert(src_page_object->paging_in_progress > 0);
719 vm_object_lock(src_page_object);
720
721 #if __x86_64__ || __arm__ || __arm64__
722 /* we used the 1-to-1 mapping of physical memory */
723 src_vaddr = 0;
724 dst_vaddr = 0;
725 #else /* __x86_64__ || __arm__ || __arm64__ */
726 /*
727 * Remove the pmap mapping of the source and destination pages
728 * in the kernel.
729 */
730 pmap_remove(kernel_pmap,
731 (addr64_t) kernel_mapping,
732 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
733 #endif /* __x86_64__ || __arm__ || __arm64__ */
734
735 /*
736 * Cleanup the result of vm_fault_page() of the source page.
737 */
738 if (retval == KERN_SUCCESS &&
739 src_page->busy &&
740 !VM_PAGE_WIRED(src_page) &&
741 !src_page->dirty &&
742 !src_page->precious &&
743 !src_page->laundry &&
744 !src_page->cleaning) {
745 int refmod_state;
746
747 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(src_page));
748
749 if (refmod_state & VM_MEM_MODIFIED) {
750 SET_PAGE_DIRTY(src_page, FALSE);
751 }
752 if (!src_page->dirty) {
753 vm_page_free_unlocked(src_page, TRUE);
754 src_page = VM_PAGE_NULL;
755 } else {
756 PAGE_WAKEUP_DONE(src_page);
757 }
758 } else {
759 PAGE_WAKEUP_DONE(src_page);
760 }
761 src_page = VM_PAGE_NULL;
762 vm_object_paging_end(src_page_object);
763 vm_object_unlock(src_page_object);
764 if (top_page != VM_PAGE_NULL) {
765 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
766 vm_object_lock(src_top_object);
767 VM_PAGE_FREE(top_page);
768 vm_object_paging_end(src_top_object);
769 vm_object_unlock(src_top_object);
770 }
771 }
772
773 done:
774 if (upl != NULL) {
775 /* clean up the UPL */
776
777 /*
778 * The pages are currently dirty because we've just been
779 * writing on them, but as far as we're concerned, they're
780 * clean since they contain their "original" contents as
781 * provided by us, the pager.
782 * Tell the UPL to mark them "clean".
783 */
784 upl_clear_dirty(upl, TRUE);
785
786 /* abort or commit the UPL */
787 if (retval != KERN_SUCCESS) {
788 upl_abort(upl, 0);
789 if (retval == KERN_ABORTED) {
790 wait_result_t wait_result;
791
792 /*
793 * We aborted the fault and did not provide
794 * any contents for the requested pages but
795 * the pages themselves are not invalid, so
796 * let's return success and let the caller
797 * retry the fault, in case it might succeed
798 * later (when the decryption code is up and
799 * running in the kernel, for example).
800 */
801 retval = KERN_SUCCESS;
802 /*
803 * Wait a little bit first to avoid using
804 * too much CPU time retrying and failing
805 * the same fault over and over again.
806 */
807 wait_result = assert_wait_timeout(
808 (event_t) apple_protect_pager_data_request,
809 THREAD_UNINT,
810 10000, /* 10ms */
811 NSEC_PER_USEC);
812 assert(wait_result == THREAD_WAITING);
813 wait_result = thread_block(THREAD_CONTINUE_NULL);
814 assert(wait_result == THREAD_TIMED_OUT);
815 }
816 } else {
817 boolean_t empty;
818 upl_commit_range(upl, 0, upl->size,
819 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
820 upl_pl, pl_count, &empty);
821 }
822
823 /* and deallocate the UPL */
824 upl_deallocate(upl);
825 upl = NULL;
826 }
827 if (kernel_mapping != 0) {
828 /* clean up the mapping of the source and destination pages */
829 kr = vm_map_remove(kernel_map,
830 kernel_mapping,
831 kernel_mapping + (2 * PAGE_SIZE_64),
832 VM_MAP_NO_FLAGS);
833 assert(kr == KERN_SUCCESS);
834 kernel_mapping = 0;
835 src_vaddr = 0;
836 dst_vaddr = 0;
837 }
838 if (src_top_object != VM_OBJECT_NULL) {
839 vm_object_deallocate(src_top_object);
840 }
841
842 return retval;
843 }
844
845 /*
846 * apple_protect_pager_reference()
847 *
848 * Get a reference on this memory object.
849 * For external usage only. Assumes that the initial reference count is not 0,
850 * i.e one should not "revive" a dead pager this way.
851 */
852 void
853 apple_protect_pager_reference(
854 memory_object_t mem_obj)
855 {
856 apple_protect_pager_t pager;
857
858 pager = apple_protect_pager_lookup(mem_obj);
859
860 lck_mtx_lock(&apple_protect_pager_lock);
861 assert(pager->ref_count > 0);
862 pager->ref_count++;
863 lck_mtx_unlock(&apple_protect_pager_lock);
864 }
865
866
867 /*
868 * apple_protect_pager_dequeue:
869 *
870 * Removes a pager from the list of pagers.
871 *
872 * The caller must hold "apple_protect_pager_lock".
873 */
874 void
875 apple_protect_pager_dequeue(
876 apple_protect_pager_t pager)
877 {
878 assert(!pager->is_mapped);
879
880 queue_remove(&apple_protect_pager_queue,
881 pager,
882 apple_protect_pager_t,
883 pager_queue);
884 pager->pager_queue.next = NULL;
885 pager->pager_queue.prev = NULL;
886
887 apple_protect_pager_count--;
888 }
889
890 /*
891 * apple_protect_pager_terminate_internal:
892 *
893 * Trigger the asynchronous termination of the memory object associated
894 * with this pager.
895 * When the memory object is terminated, there will be one more call
896 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
897 * to finish the clean up.
898 *
899 * "apple_protect_pager_lock" should not be held by the caller.
900 * We don't need the lock because the pager has already been removed from
901 * the pagers' list and is now ours exclusively.
902 */
903 void
904 apple_protect_pager_terminate_internal(
905 apple_protect_pager_t pager)
906 {
907 assert(pager->is_ready);
908 assert(!pager->is_mapped);
909
910 if (pager->backing_object != VM_OBJECT_NULL) {
911 vm_object_deallocate(pager->backing_object);
912 pager->backing_object = VM_OBJECT_NULL;
913 }
914
915 /* one less pager using this "pager_crypt_info" */
916 #if CRYPT_INFO_DEBUG
917 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
918 __FUNCTION__,
919 pager->crypt_info,
920 pager->crypt_info->crypt_refcnt);
921 #endif /* CRYPT_INFO_DEBUG */
922 crypt_info_deallocate(pager->crypt_info);
923 pager->crypt_info = NULL;
924
925 /* trigger the destruction of the memory object */
926 memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
927 }
928
929 /*
930 * apple_protect_pager_deallocate_internal()
931 *
932 * Release a reference on this pager and free it when the last
933 * reference goes away.
934 * Can be called with apple_protect_pager_lock held or not but always returns
935 * with it unlocked.
936 */
937 void
938 apple_protect_pager_deallocate_internal(
939 apple_protect_pager_t pager,
940 boolean_t locked)
941 {
942 boolean_t needs_trimming;
943 int count_unmapped;
944
945 if (! locked) {
946 lck_mtx_lock(&apple_protect_pager_lock);
947 }
948
949 count_unmapped = (apple_protect_pager_count -
950 apple_protect_pager_count_mapped);
951 if (count_unmapped > apple_protect_pager_cache_limit) {
952 /* we have too many unmapped pagers: trim some */
953 needs_trimming = TRUE;
954 } else {
955 needs_trimming = FALSE;
956 }
957
958 /* drop a reference on this pager */
959 pager->ref_count--;
960
961 if (pager->ref_count == 1) {
962 /*
963 * Only the "named" reference is left, which means that
964 * no one is really holding on to this pager anymore.
965 * Terminate it.
966 */
967 apple_protect_pager_dequeue(pager);
968 /* the pager is all ours: no need for the lock now */
969 lck_mtx_unlock(&apple_protect_pager_lock);
970 apple_protect_pager_terminate_internal(pager);
971 } else if (pager->ref_count == 0) {
972 /*
973 * Dropped the existence reference; the memory object has
974 * been terminated. Do some final cleanup and release the
975 * pager structure.
976 */
977 lck_mtx_unlock(&apple_protect_pager_lock);
978 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
979 memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
980 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
981 }
982 kfree(pager, sizeof (*pager));
983 pager = APPLE_PROTECT_PAGER_NULL;
984 } else {
985 /* there are still plenty of references: keep going... */
986 lck_mtx_unlock(&apple_protect_pager_lock);
987 }
988
989 if (needs_trimming) {
990 apple_protect_pager_trim();
991 }
992 /* caution: lock is not held on return... */
993 }
994
995 /*
996 * apple_protect_pager_deallocate()
997 *
998 * Release a reference on this pager and free it when the last
999 * reference goes away.
1000 */
1001 void
1002 apple_protect_pager_deallocate(
1003 memory_object_t mem_obj)
1004 {
1005 apple_protect_pager_t pager;
1006
1007 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
1008 pager = apple_protect_pager_lookup(mem_obj);
1009 apple_protect_pager_deallocate_internal(pager, FALSE);
1010 }
1011
1012 /*
1013 *
1014 */
1015 kern_return_t
1016 apple_protect_pager_terminate(
1017 #if !DEBUG
1018 __unused
1019 #endif
1020 memory_object_t mem_obj)
1021 {
1022 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
1023
1024 return KERN_SUCCESS;
1025 }
1026
1027 /*
1028 *
1029 */
1030 kern_return_t
1031 apple_protect_pager_synchronize(
1032 __unused memory_object_t mem_obj,
1033 __unused memory_object_offset_t offset,
1034 __unused memory_object_size_t length,
1035 __unused vm_sync_t sync_flags)
1036 {
1037 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
1038 return KERN_FAILURE;
1039 }
1040
1041 /*
1042 * apple_protect_pager_map()
1043 *
1044 * This allows VM to let us, the EMM, know that this memory object
1045 * is currently mapped one or more times. This is called by VM each time
1046 * the memory object gets mapped and we take one extra reference on the
1047 * memory object to account for all its mappings.
1048 */
1049 kern_return_t
1050 apple_protect_pager_map(
1051 memory_object_t mem_obj,
1052 __unused vm_prot_t prot)
1053 {
1054 apple_protect_pager_t pager;
1055
1056 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
1057
1058 pager = apple_protect_pager_lookup(mem_obj);
1059
1060 lck_mtx_lock(&apple_protect_pager_lock);
1061 assert(pager->is_ready);
1062 assert(pager->ref_count > 0); /* pager is alive */
1063 if (pager->is_mapped == FALSE) {
1064 /*
1065 * First mapping of this pager: take an extra reference
1066 * that will remain until all the mappings of this pager
1067 * are removed.
1068 */
1069 pager->is_mapped = TRUE;
1070 pager->ref_count++;
1071 apple_protect_pager_count_mapped++;
1072 }
1073 lck_mtx_unlock(&apple_protect_pager_lock);
1074
1075 return KERN_SUCCESS;
1076 }
1077
1078 /*
1079 * apple_protect_pager_last_unmap()
1080 *
1081 * This is called by VM when this memory object is no longer mapped anywhere.
1082 */
1083 kern_return_t
1084 apple_protect_pager_last_unmap(
1085 memory_object_t mem_obj)
1086 {
1087 apple_protect_pager_t pager;
1088 int count_unmapped;
1089
1090 PAGER_DEBUG(PAGER_ALL,
1091 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
1092
1093 pager = apple_protect_pager_lookup(mem_obj);
1094
1095 lck_mtx_lock(&apple_protect_pager_lock);
1096 if (pager->is_mapped) {
1097 /*
1098 * All the mappings are gone, so let go of the one extra
1099 * reference that represents all the mappings of this pager.
1100 */
1101 apple_protect_pager_count_mapped--;
1102 count_unmapped = (apple_protect_pager_count -
1103 apple_protect_pager_count_mapped);
1104 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
1105 apple_protect_pager_count_unmapped_max = count_unmapped;
1106 }
1107 pager->is_mapped = FALSE;
1108 apple_protect_pager_deallocate_internal(pager, TRUE);
1109 /* caution: deallocate_internal() released the lock ! */
1110 } else {
1111 lck_mtx_unlock(&apple_protect_pager_lock);
1112 }
1113
1114 return KERN_SUCCESS;
1115 }
1116
1117
1118 /*
1119 *
1120 */
1121 apple_protect_pager_t
1122 apple_protect_pager_lookup(
1123 memory_object_t mem_obj)
1124 {
1125 apple_protect_pager_t pager;
1126
1127 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
1128 pager = (apple_protect_pager_t) mem_obj;
1129 assert(pager->ref_count > 0);
1130 return pager;
1131 }
1132
1133 apple_protect_pager_t
1134 apple_protect_pager_create(
1135 vm_object_t backing_object,
1136 vm_object_offset_t backing_offset,
1137 vm_object_offset_t crypto_backing_offset,
1138 struct pager_crypt_info *crypt_info,
1139 vm_object_offset_t crypto_start,
1140 vm_object_offset_t crypto_end)
1141 {
1142 apple_protect_pager_t pager, pager2;
1143 memory_object_control_t control;
1144 kern_return_t kr;
1145 struct pager_crypt_info *old_crypt_info;
1146
1147 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
1148 if (pager == APPLE_PROTECT_PAGER_NULL) {
1149 return APPLE_PROTECT_PAGER_NULL;
1150 }
1151
1152 /*
1153 * The vm_map call takes both named entry ports and raw memory
1154 * objects in the same parameter. We need to make sure that
1155 * vm_map does not see this object as a named entry port. So,
1156 * we reserve the first word in the object for a fake ip_kotype
1157 * setting - that will tell vm_map to use it as a memory object.
1158 */
1159 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1160 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1161 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1162
1163 pager->is_ready = FALSE;/* not ready until it has a "name" */
1164 pager->ref_count = 1; /* existence reference (for the cache) */
1165 pager->ref_count++; /* for the caller */
1166 pager->is_mapped = FALSE;
1167 pager->backing_object = backing_object;
1168 pager->backing_offset = backing_offset;
1169 pager->crypto_backing_offset = crypto_backing_offset;
1170 pager->crypto_start = crypto_start;
1171 pager->crypto_end = crypto_end;
1172 pager->crypt_info = crypt_info; /* allocated by caller */
1173
1174 #if CRYPT_INFO_DEBUG
1175 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1176 __FUNCTION__,
1177 crypt_info,
1178 crypt_info->page_decrypt,
1179 crypt_info->crypt_end,
1180 crypt_info->crypt_ops,
1181 crypt_info->crypt_refcnt);
1182 #endif /* CRYPT_INFO_DEBUG */
1183
1184 vm_object_reference(backing_object);
1185
1186 old_crypt_info = NULL;
1187
1188 lck_mtx_lock(&apple_protect_pager_lock);
1189 /* see if anyone raced us to create a pager for the same object */
1190 queue_iterate(&apple_protect_pager_queue,
1191 pager2,
1192 apple_protect_pager_t,
1193 pager_queue) {
1194 if ((pager2->crypt_info->page_decrypt !=
1195 crypt_info->page_decrypt) ||
1196 (pager2->crypt_info->crypt_end !=
1197 crypt_info->crypt_end) ||
1198 (pager2->crypt_info->crypt_ops !=
1199 crypt_info->crypt_ops)) {
1200 /* crypt_info contents do not match: next pager */
1201 continue;
1202 }
1203
1204 /* found a match for crypt_info ... */
1205 if (old_crypt_info) {
1206 /* ... already switched to that crypt_info */
1207 assert(old_crypt_info == pager2->crypt_info);
1208 } else if (pager2->crypt_info != crypt_info) {
1209 /* ... switch to that pager's crypt_info */
1210 #if CRYPT_INFO_DEBUG
1211 printf("CRYPT_INFO %s: reference %p ref %d "
1212 "(create match)\n",
1213 __FUNCTION__,
1214 pager2->crypt_info,
1215 pager2->crypt_info->crypt_refcnt);
1216 #endif /* CRYPT_INFO_DEBUG */
1217 old_crypt_info = pager2->crypt_info;
1218 crypt_info_reference(old_crypt_info);
1219 pager->crypt_info = old_crypt_info;
1220 }
1221
1222 if (pager2->backing_object == backing_object &&
1223 pager2->backing_offset == backing_offset &&
1224 pager2->crypto_backing_offset == crypto_backing_offset &&
1225 pager2->crypto_start == crypto_start &&
1226 pager2->crypto_end == crypto_end) {
1227 /* full match: use that pager */
1228 break;
1229 }
1230 }
1231 if (! queue_end(&apple_protect_pager_queue,
1232 (queue_entry_t) pager2)) {
1233 /* we lost the race, down with the loser... */
1234 lck_mtx_unlock(&apple_protect_pager_lock);
1235 vm_object_deallocate(pager->backing_object);
1236 pager->backing_object = VM_OBJECT_NULL;
1237 #if CRYPT_INFO_DEBUG
1238 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1239 __FUNCTION__,
1240 pager->crypt_info,
1241 pager->crypt_info->crypt_refcnt);
1242 #endif /* CRYPT_INFO_DEBUG */
1243 crypt_info_deallocate(pager->crypt_info);
1244 pager->crypt_info = NULL;
1245 kfree(pager, sizeof (*pager));
1246 /* ... and go with the winner */
1247 pager = pager2;
1248 /* let the winner make sure the pager gets ready */
1249 return pager;
1250 }
1251
1252 /* enter new pager at the head of our list of pagers */
1253 queue_enter_first(&apple_protect_pager_queue,
1254 pager,
1255 apple_protect_pager_t,
1256 pager_queue);
1257 apple_protect_pager_count++;
1258 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1259 apple_protect_pager_count_max = apple_protect_pager_count;
1260 }
1261 lck_mtx_unlock(&apple_protect_pager_lock);
1262
1263 kr = memory_object_create_named((memory_object_t) pager,
1264 0,
1265 &control);
1266 assert(kr == KERN_SUCCESS);
1267
1268 lck_mtx_lock(&apple_protect_pager_lock);
1269 /* the new pager is now ready to be used */
1270 pager->is_ready = TRUE;
1271 lck_mtx_unlock(&apple_protect_pager_lock);
1272
1273 /* wakeup anyone waiting for this pager to be ready */
1274 thread_wakeup(&pager->is_ready);
1275
1276 if (old_crypt_info != NULL &&
1277 old_crypt_info != crypt_info) {
1278 /* we re-used an old crypt_info instead of using our new one */
1279 #if CRYPT_INFO_DEBUG
1280 printf("CRYPT_INFO %s: deallocate %p ref %d "
1281 "(create used old)\n",
1282 __FUNCTION__,
1283 crypt_info,
1284 crypt_info->crypt_refcnt);
1285 #endif /* CRYPT_INFO_DEBUG */
1286 crypt_info_deallocate(crypt_info);
1287 crypt_info = NULL;
1288 }
1289
1290 return pager;
1291 }
1292
1293 /*
1294 * apple_protect_pager_setup()
1295 *
1296 * Provide the caller with a memory object backed by the provided
1297 * "backing_object" VM object. If such a memory object already exists,
1298 * re-use it, otherwise create a new memory object.
1299 */
1300 memory_object_t
1301 apple_protect_pager_setup(
1302 vm_object_t backing_object,
1303 vm_object_offset_t backing_offset,
1304 vm_object_offset_t crypto_backing_offset,
1305 struct pager_crypt_info *crypt_info,
1306 vm_object_offset_t crypto_start,
1307 vm_object_offset_t crypto_end)
1308 {
1309 apple_protect_pager_t pager;
1310 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1311
1312 #if CRYPT_INFO_DEBUG
1313 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1314 __FUNCTION__,
1315 crypt_info,
1316 crypt_info->page_decrypt,
1317 crypt_info->crypt_end,
1318 crypt_info->crypt_ops,
1319 crypt_info->crypt_refcnt);
1320 #endif /* CRYPT_INFO_DEBUG */
1321
1322 old_crypt_info = NULL;
1323
1324 lck_mtx_lock(&apple_protect_pager_lock);
1325
1326 queue_iterate(&apple_protect_pager_queue,
1327 pager,
1328 apple_protect_pager_t,
1329 pager_queue) {
1330 if ((pager->crypt_info->page_decrypt !=
1331 crypt_info->page_decrypt) ||
1332 (pager->crypt_info->crypt_end !=
1333 crypt_info->crypt_end) ||
1334 (pager->crypt_info->crypt_ops !=
1335 crypt_info->crypt_ops)) {
1336 /* no match for "crypt_info": next pager */
1337 continue;
1338 }
1339 /* found a match for crypt_info ... */
1340 if (old_crypt_info) {
1341 /* ... already switched to that crypt_info */
1342 assert(old_crypt_info == pager->crypt_info);
1343 } else {
1344 /* ... switch to that pager's crypt_info */
1345 old_crypt_info = pager->crypt_info;
1346 #if CRYPT_INFO_DEBUG
1347 printf("CRYPT_INFO %s: "
1348 "switching crypt_info from %p [%p,%p,%p,%d] "
1349 "to %p [%p,%p,%p,%d] from pager %p\n",
1350 __FUNCTION__,
1351 crypt_info,
1352 crypt_info->page_decrypt,
1353 crypt_info->crypt_end,
1354 crypt_info->crypt_ops,
1355 crypt_info->crypt_refcnt,
1356 old_crypt_info,
1357 old_crypt_info->page_decrypt,
1358 old_crypt_info->crypt_end,
1359 old_crypt_info->crypt_ops,
1360 old_crypt_info->crypt_refcnt,
1361 pager);
1362 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1363 __FUNCTION__,
1364 pager->crypt_info,
1365 pager->crypt_info->crypt_refcnt);
1366 #endif /* CRYPT_INFO_DEBUG */
1367 crypt_info_reference(pager->crypt_info);
1368 }
1369
1370 if (pager->backing_object == backing_object &&
1371 pager->backing_offset == backing_offset &&
1372 pager->crypto_backing_offset == crypto_backing_offset &&
1373 pager->crypto_start == crypto_start &&
1374 pager->crypto_end == crypto_end) {
1375 /* full match: use that pager! */
1376 assert(old_crypt_info == pager->crypt_info);
1377 assert(old_crypt_info->crypt_refcnt > 1);
1378 #if CRYPT_INFO_DEBUG
1379 printf("CRYPT_INFO %s: "
1380 "pager match with %p crypt_info %p\n",
1381 __FUNCTION__,
1382 pager,
1383 pager->crypt_info);
1384 printf("CRYPT_INFO %s: deallocate %p ref %d "
1385 "(pager match)\n",
1386 __FUNCTION__,
1387 old_crypt_info,
1388 old_crypt_info->crypt_refcnt);
1389 #endif /* CRYPT_INFO_DEBUG */
1390 /* release the extra ref on crypt_info we got above */
1391 crypt_info_deallocate(old_crypt_info);
1392 assert(old_crypt_info->crypt_refcnt > 0);
1393 /* give extra reference on pager to the caller */
1394 assert(pager->ref_count > 0);
1395 pager->ref_count++;
1396 break;
1397 }
1398 }
1399 if (queue_end(&apple_protect_pager_queue,
1400 (queue_entry_t) pager)) {
1401 lck_mtx_unlock(&apple_protect_pager_lock);
1402 /* no existing pager for this backing object */
1403 pager = APPLE_PROTECT_PAGER_NULL;
1404 if (old_crypt_info) {
1405 /* use this old crypt_info for new pager */
1406 new_crypt_info = old_crypt_info;
1407 #if CRYPT_INFO_DEBUG
1408 printf("CRYPT_INFO %s: "
1409 "will use old_crypt_info %p for new pager\n",
1410 __FUNCTION__,
1411 old_crypt_info);
1412 #endif /* CRYPT_INFO_DEBUG */
1413 } else {
1414 /* allocate a new crypt_info for new pager */
1415 new_crypt_info = kalloc(sizeof (*new_crypt_info));
1416 *new_crypt_info = *crypt_info;
1417 new_crypt_info->crypt_refcnt = 1;
1418 #if CRYPT_INFO_DEBUG
1419 printf("CRYPT_INFO %s: "
1420 "will use new_crypt_info %p for new pager\n",
1421 __FUNCTION__,
1422 new_crypt_info);
1423 #endif /* CRYPT_INFO_DEBUG */
1424 }
1425 if (new_crypt_info == NULL) {
1426 /* can't create new pager without a crypt_info */
1427 } else {
1428 /* create new pager */
1429 pager = apple_protect_pager_create(
1430 backing_object,
1431 backing_offset,
1432 crypto_backing_offset,
1433 new_crypt_info,
1434 crypto_start,
1435 crypto_end);
1436 }
1437 if (pager == APPLE_PROTECT_PAGER_NULL) {
1438 /* could not create a new pager */
1439 if (new_crypt_info == old_crypt_info) {
1440 /* release extra reference on old_crypt_info */
1441 #if CRYPT_INFO_DEBUG
1442 printf("CRYPT_INFO %s: deallocate %p ref %d "
1443 "(create fail old_crypt_info)\n",
1444 __FUNCTION__,
1445 old_crypt_info,
1446 old_crypt_info->crypt_refcnt);
1447 #endif /* CRYPT_INFO_DEBUG */
1448 crypt_info_deallocate(old_crypt_info);
1449 old_crypt_info = NULL;
1450 } else {
1451 /* release unused new_crypt_info */
1452 assert(new_crypt_info->crypt_refcnt == 1);
1453 #if CRYPT_INFO_DEBUG
1454 printf("CRYPT_INFO %s: deallocate %p ref %d "
1455 "(create fail new_crypt_info)\n",
1456 __FUNCTION__,
1457 new_crypt_info,
1458 new_crypt_info->crypt_refcnt);
1459 #endif /* CRYPT_INFO_DEBUG */
1460 crypt_info_deallocate(new_crypt_info);
1461 new_crypt_info = NULL;
1462 }
1463 return MEMORY_OBJECT_NULL;
1464 }
1465 lck_mtx_lock(&apple_protect_pager_lock);
1466 } else {
1467 assert(old_crypt_info == pager->crypt_info);
1468 }
1469
1470 while (!pager->is_ready) {
1471 lck_mtx_sleep(&apple_protect_pager_lock,
1472 LCK_SLEEP_DEFAULT,
1473 &pager->is_ready,
1474 THREAD_UNINT);
1475 }
1476 lck_mtx_unlock(&apple_protect_pager_lock);
1477
1478 return (memory_object_t) pager;
1479 }
1480
1481 void
1482 apple_protect_pager_trim(void)
1483 {
1484 apple_protect_pager_t pager, prev_pager;
1485 queue_head_t trim_queue;
1486 int num_trim;
1487 int count_unmapped;
1488
1489 lck_mtx_lock(&apple_protect_pager_lock);
1490
1491 /*
1492 * We have too many pagers, try and trim some unused ones,
1493 * starting with the oldest pager at the end of the queue.
1494 */
1495 queue_init(&trim_queue);
1496 num_trim = 0;
1497
1498 for (pager = (apple_protect_pager_t)
1499 queue_last(&apple_protect_pager_queue);
1500 !queue_end(&apple_protect_pager_queue,
1501 (queue_entry_t) pager);
1502 pager = prev_pager) {
1503 /* get prev elt before we dequeue */
1504 prev_pager = (apple_protect_pager_t)
1505 queue_prev(&pager->pager_queue);
1506
1507 if (pager->ref_count == 2 &&
1508 pager->is_ready &&
1509 !pager->is_mapped) {
1510 /* this pager can be trimmed */
1511 num_trim++;
1512 /* remove this pager from the main list ... */
1513 apple_protect_pager_dequeue(pager);
1514 /* ... and add it to our trim queue */
1515 queue_enter_first(&trim_queue,
1516 pager,
1517 apple_protect_pager_t,
1518 pager_queue);
1519
1520 count_unmapped = (apple_protect_pager_count -
1521 apple_protect_pager_count_mapped);
1522 if (count_unmapped <= apple_protect_pager_cache_limit) {
1523 /* we have enough pagers to trim */
1524 break;
1525 }
1526 }
1527 }
1528 if (num_trim > apple_protect_pager_num_trim_max) {
1529 apple_protect_pager_num_trim_max = num_trim;
1530 }
1531 apple_protect_pager_num_trim_total += num_trim;
1532
1533 lck_mtx_unlock(&apple_protect_pager_lock);
1534
1535 /* terminate the trimmed pagers */
1536 while (!queue_empty(&trim_queue)) {
1537 queue_remove_first(&trim_queue,
1538 pager,
1539 apple_protect_pager_t,
1540 pager_queue);
1541 pager->pager_queue.next = NULL;
1542 pager->pager_queue.prev = NULL;
1543 assert(pager->ref_count == 2);
1544 /*
1545 * We can't call deallocate_internal() because the pager
1546 * has already been dequeued, but we still need to remove
1547 * a reference.
1548 */
1549 pager->ref_count--;
1550 apple_protect_pager_terminate_internal(pager);
1551 }
1552 }
1553
1554
1555 void
1556 crypt_info_reference(
1557 struct pager_crypt_info *crypt_info)
1558 {
1559 assert(crypt_info->crypt_refcnt != 0);
1560 #if CRYPT_INFO_DEBUG
1561 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1562 __FUNCTION__,
1563 crypt_info,
1564 crypt_info->crypt_refcnt,
1565 crypt_info->crypt_refcnt + 1);
1566 #endif /* CRYPT_INFO_DEBUG */
1567 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1568 }
1569
1570 void
1571 crypt_info_deallocate(
1572 struct pager_crypt_info *crypt_info)
1573 {
1574 #if CRYPT_INFO_DEBUG
1575 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1576 __FUNCTION__,
1577 crypt_info,
1578 crypt_info->crypt_refcnt,
1579 crypt_info->crypt_refcnt - 1);
1580 #endif /* CRYPT_INFO_DEBUG */
1581 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1582 if (crypt_info->crypt_refcnt == 0) {
1583 /* deallocate any crypt module data */
1584 if (crypt_info->crypt_end) {
1585 crypt_info->crypt_end(crypt_info->crypt_ops);
1586 crypt_info->crypt_end = NULL;
1587 }
1588 #if CRYPT_INFO_DEBUG
1589 printf("CRYPT_INFO %s: freeing %p\n",
1590 __FUNCTION__,
1591 crypt_info);
1592 #endif /* CRYPT_INFO_DEBUG */
1593 kfree(crypt_info, sizeof (*crypt_info));
1594 crypt_info = NULL;
1595 }
1596 }