]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_apple_protect.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
CommitLineData
0c530ab8
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/page_decrypt.h>
46#include <kern/queue.h>
47#include <kern/thread.h>
48
49#include <ipc/ipc_port.h>
50#include <ipc/ipc_space.h>
51
52#include <default_pager/default_pager_types.h>
53#include <default_pager/default_pager_object_server.h>
54
2d21ac55 55#include <vm/vm_fault.h>
0c530ab8
A
56#include <vm/vm_map.h>
57#include <vm/vm_pageout.h>
58#include <vm/memory_object.h>
59#include <vm/vm_pageout.h>
60#include <vm/vm_protos.h>
61
62
63/*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82/* forward declarations */
83void apple_protect_pager_reference(memory_object_t mem_obj);
84void apple_protect_pager_deallocate(memory_object_t mem_obj);
85kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
b0d623f7 87 memory_object_cluster_size_t pg_size);
0c530ab8
A
88kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
b0d623f7 91 memory_object_cluster_size_t length,
2d21ac55
A
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
0c530ab8
A
94kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
b0d623f7 96 memory_object_cluster_size_t data_cnt,
0c530ab8
A
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
b0d623f7 104 memory_object_cluster_size_t data_cnt);
0c530ab8
A
105kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
b0d623f7 107 memory_object_size_t size,
0c530ab8
A
108 vm_prot_t desired_access);
109kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
b0d623f7 111 memory_object_size_t length,
0c530ab8 112 vm_sync_t sync_flags);
593a1d5f
A
113kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
0c530ab8 116
3e170ce0
A
117#define CRYPT_INFO_DEBUG 0
118void crypt_info_reference(struct pager_crypt_info *crypt_info);
119void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
120
0c530ab8
A
121/*
122 * Vector of VM operations for this EMM.
123 * These routines are invoked by VM via the memory_object_*() interfaces.
124 */
125const struct memory_object_pager_ops apple_protect_pager_ops = {
126 apple_protect_pager_reference,
127 apple_protect_pager_deallocate,
128 apple_protect_pager_init,
129 apple_protect_pager_terminate,
130 apple_protect_pager_data_request,
131 apple_protect_pager_data_return,
132 apple_protect_pager_data_initialize,
133 apple_protect_pager_data_unlock,
134 apple_protect_pager_synchronize,
593a1d5f
A
135 apple_protect_pager_map,
136 apple_protect_pager_last_unmap,
6d2010ae 137 NULL, /* data_reclaim */
3e170ce0 138 "apple_protect"
0c530ab8
A
139};
140
141/*
142 * The "apple_protect_pager" describes a memory object backed by
143 * the "apple protect" EMM.
144 */
145typedef struct apple_protect_pager {
3e170ce0 146 struct ipc_object_header pager_header; /* fake ip_kotype() */
0c530ab8 147 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
0c530ab8
A
148 queue_chain_t pager_queue; /* next & prev pagers */
149 unsigned int ref_count; /* reference count */
150 boolean_t is_ready; /* is this pager ready ? */
151 boolean_t is_mapped; /* is this mem_obj mapped ? */
152 memory_object_control_t pager_control; /* mem object control handle */
153 vm_object_t backing_object; /* VM obj w/ encrypted data */
3e170ce0
A
154 vm_object_offset_t backing_offset;
155 vm_object_offset_t crypto_backing_offset; /* for key... */
156 vm_object_offset_t crypto_start;
157 vm_object_offset_t crypto_end;
158 struct pager_crypt_info *crypt_info;
0c530ab8
A
159} *apple_protect_pager_t;
160#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
b0d623f7 161#define pager_ikot pager_header.io_bits
0c530ab8
A
162
163/*
164 * List of memory objects managed by this EMM.
165 * The list is protected by the "apple_protect_pager_lock" lock.
166 */
167int apple_protect_pager_count = 0; /* number of pagers */
168int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
169queue_head_t apple_protect_pager_queue;
b0d623f7 170decl_lck_mtx_data(,apple_protect_pager_lock)
0c530ab8
A
171
172/*
173 * Maximum number of unmapped pagers we're willing to keep around.
174 */
175int apple_protect_pager_cache_limit = 10;
176
177/*
178 * Statistics & counters.
179 */
180int apple_protect_pager_count_max = 0;
181int apple_protect_pager_count_unmapped_max = 0;
182int apple_protect_pager_num_trim_max = 0;
183int apple_protect_pager_num_trim_total = 0;
184
b0d623f7
A
185
186lck_grp_t apple_protect_pager_lck_grp;
187lck_grp_attr_t apple_protect_pager_lck_grp_attr;
188lck_attr_t apple_protect_pager_lck_attr;
189
190
0c530ab8 191/* internal prototypes */
3e170ce0
A
192apple_protect_pager_t apple_protect_pager_create(
193 vm_object_t backing_object,
194 vm_object_offset_t backing_offset,
195 vm_object_offset_t crypto_backing_offset,
196 struct pager_crypt_info *crypt_info,
197 vm_object_offset_t crypto_start,
198 vm_object_offset_t crypto_end);
0c530ab8
A
199apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
200void apple_protect_pager_dequeue(apple_protect_pager_t pager);
201void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
202 boolean_t locked);
203void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
204void apple_protect_pager_trim(void);
205
206
207#if DEBUG
208int apple_protect_pagerdebug = 0;
209#define PAGER_ALL 0xffffffff
210#define PAGER_INIT 0x00000001
211#define PAGER_PAGEIN 0x00000002
212
213#define PAGER_DEBUG(LEVEL, A) \
214 MACRO_BEGIN \
215 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
216 printf A; \
217 } \
218 MACRO_END
219#else
220#define PAGER_DEBUG(LEVEL, A)
221#endif
222
223
224void
225apple_protect_pager_bootstrap(void)
226{
b0d623f7
A
227 lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
228 lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
229 lck_attr_setdefault(&apple_protect_pager_lck_attr);
230 lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
0c530ab8
A
231 queue_init(&apple_protect_pager_queue);
232}
233
234/*
235 * apple_protect_pager_init()
236 *
237 * Initialize the memory object and makes it ready to be used and mapped.
238 */
239kern_return_t
240apple_protect_pager_init(
241 memory_object_t mem_obj,
242 memory_object_control_t control,
243#if !DEBUG
244 __unused
245#endif
b0d623f7 246 memory_object_cluster_size_t pg_size)
0c530ab8
A
247{
248 apple_protect_pager_t pager;
249 kern_return_t kr;
250 memory_object_attr_info_data_t attributes;
251
252 PAGER_DEBUG(PAGER_ALL,
253 ("apple_protect_pager_init: %p, %p, %x\n",
254 mem_obj, control, pg_size));
255
256 if (control == MEMORY_OBJECT_CONTROL_NULL)
257 return KERN_INVALID_ARGUMENT;
258
259 pager = apple_protect_pager_lookup(mem_obj);
260
261 memory_object_control_reference(control);
262
263 pager->pager_control = control;
264
265 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
266 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
267 attributes.cluster_size = (1 << (PAGE_SHIFT));
268 attributes.may_cache_object = FALSE;
269 attributes.temporary = TRUE;
270
271 kr = memory_object_change_attributes(
272 control,
273 MEMORY_OBJECT_ATTRIBUTE_INFO,
274 (memory_object_info_t) &attributes,
275 MEMORY_OBJECT_ATTR_INFO_COUNT);
276 if (kr != KERN_SUCCESS)
277 panic("apple_protect_pager_init: "
278 "memory_object_change_attributes() failed");
279
280 return KERN_SUCCESS;
281}
282
283/*
284 * apple_protect_data_return()
285 *
286 * Handles page-out requests from VM. This should never happen since
287 * the pages provided by this EMM are not supposed to be dirty or dirtied
288 * and VM should simply discard the contents and reclaim the pages if it
289 * needs to.
290 */
291kern_return_t
292apple_protect_pager_data_return(
293 __unused memory_object_t mem_obj,
294 __unused memory_object_offset_t offset,
b0d623f7 295 __unused memory_object_cluster_size_t data_cnt,
0c530ab8
A
296 __unused memory_object_offset_t *resid_offset,
297 __unused int *io_error,
298 __unused boolean_t dirty,
299 __unused boolean_t kernel_copy,
300 __unused int upl_flags)
301{
302 panic("apple_protect_pager_data_return: should never get called");
303 return KERN_FAILURE;
304}
305
306kern_return_t
307apple_protect_pager_data_initialize(
308 __unused memory_object_t mem_obj,
309 __unused memory_object_offset_t offset,
b0d623f7 310 __unused memory_object_cluster_size_t data_cnt)
0c530ab8
A
311{
312 panic("apple_protect_pager_data_initialize: should never get called");
313 return KERN_FAILURE;
314}
315
316kern_return_t
317apple_protect_pager_data_unlock(
318 __unused memory_object_t mem_obj,
319 __unused memory_object_offset_t offset,
b0d623f7 320 __unused memory_object_size_t size,
0c530ab8
A
321 __unused vm_prot_t desired_access)
322{
323 return KERN_FAILURE;
324}
325
326/*
327 * apple_protect_pager_data_request()
328 *
329 * Handles page-in requests from VM.
330 */
3e170ce0 331int apple_protect_pager_data_request_debug = 0;
0c530ab8
A
332kern_return_t
333apple_protect_pager_data_request(
334 memory_object_t mem_obj,
335 memory_object_offset_t offset,
b0d623f7 336 memory_object_cluster_size_t length,
0c530ab8
A
337#if !DEBUG
338 __unused
339#endif
2d21ac55
A
340 vm_prot_t protection_required,
341 memory_object_fault_info_t mo_fault_info)
0c530ab8
A
342{
343 apple_protect_pager_t pager;
344 memory_object_control_t mo_control;
2d21ac55 345 upl_t upl;
0c530ab8
A
346 int upl_flags;
347 upl_size_t upl_size;
b0d623f7 348 upl_page_info_t *upl_pl;
593a1d5f 349 unsigned int pl_count;
0c530ab8
A
350 vm_object_t src_object, dst_object;
351 kern_return_t kr, retval;
2d21ac55 352 vm_map_offset_t kernel_mapping;
0c530ab8
A
353 vm_offset_t src_vaddr, dst_vaddr;
354 vm_offset_t cur_offset;
fe8ab488 355 vm_offset_t offset_in_page;
2d21ac55
A
356 kern_return_t error_code;
357 vm_prot_t prot;
358 vm_page_t src_page, top_page;
359 int interruptible;
b0d623f7
A
360 struct vm_object_fault_info fault_info;
361 int ret;
2d21ac55
A
362
363 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
0c530ab8 364
b0d623f7 365 retval = KERN_SUCCESS;
2d21ac55
A
366 src_object = VM_OBJECT_NULL;
367 kernel_mapping = 0;
368 upl = NULL;
593a1d5f 369 upl_pl = NULL;
b0d623f7
A
370 fault_info = *((struct vm_object_fault_info *) mo_fault_info);
371 fault_info.stealth = TRUE;
6d2010ae 372 fault_info.io_sync = FALSE;
0b4c1975 373 fault_info.mark_zf_absent = FALSE;
316670eb 374 fault_info.batch_pmap_op = FALSE;
b0d623f7 375 interruptible = fault_info.interruptible;
0c530ab8
A
376
377 pager = apple_protect_pager_lookup(mem_obj);
378 assert(pager->is_ready);
379 assert(pager->ref_count > 1); /* pager is alive and mapped */
380
2d21ac55 381 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
0c530ab8
A
382
383 /*
384 * Gather in a UPL all the VM pages requested by VM.
385 */
386 mo_control = pager->pager_control;
387
388 upl_size = length;
389 upl_flags =
390 UPL_RET_ONLY_ABSENT |
391 UPL_SET_LITE |
392 UPL_NO_SYNC |
393 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
394 UPL_SET_INTERNAL;
593a1d5f 395 pl_count = 0;
0c530ab8
A
396 kr = memory_object_upl_request(mo_control,
397 offset, upl_size,
398 &upl, NULL, NULL, upl_flags);
399 if (kr != KERN_SUCCESS) {
400 retval = kr;
401 goto done;
402 }
2d21ac55
A
403 dst_object = mo_control->moc_object;
404 assert(dst_object != VM_OBJECT_NULL);
405
0c530ab8 406
3e170ce0
A
407#if __x86_64__ || __arm__ || __arm64__
408 /* we'll use the 1-to-1 mapping of physical memory */
409 src_vaddr = 0;
410 dst_vaddr = 0;
411#else /* __x86_64__ || __arm__ || __arm64__ */
0c530ab8 412 /*
2d21ac55
A
413 * Reserve 2 virtual pages in the kernel address space to map each
414 * source and destination physical pages when it's their turn to
415 * be processed.
0c530ab8 416 */
3e170ce0
A
417 vm_map_entry_t map_entry;
418
0c530ab8
A
419 vm_object_reference(kernel_object); /* ref. for mapping */
420 kr = vm_map_find_space(kernel_map,
2d21ac55
A
421 &kernel_mapping,
422 2 * PAGE_SIZE_64,
0c530ab8
A
423 0,
424 0,
425 &map_entry);
426 if (kr != KERN_SUCCESS) {
427 vm_object_deallocate(kernel_object);
428 retval = kr;
429 goto done;
430 }
431 map_entry->object.vm_object = kernel_object;
b0d623f7 432 map_entry->offset = kernel_mapping;
0c530ab8 433 vm_map_unlock(kernel_map);
2d21ac55
A
434 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
435 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
3e170ce0 436#endif /* __x86_64__ || __arm__ || __arm64__ */
2d21ac55
A
437
438 /*
439 * We'll map the encrypted data in the kernel address space from the
440 * backing VM object (itself backed by the encrypted file via
441 * the vnode pager).
442 */
443 src_object = pager->backing_object;
444 assert(src_object != VM_OBJECT_NULL);
445 vm_object_reference(src_object); /* to keep the source object alive */
0c530ab8
A
446
447 /*
448 * Fill in the contents of the pages requested by VM.
449 */
450 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
593a1d5f 451 pl_count = length / PAGE_SIZE;
b0d623f7
A
452 for (cur_offset = 0;
453 retval == KERN_SUCCESS && cur_offset < length;
454 cur_offset += PAGE_SIZE) {
0c530ab8
A
455 ppnum_t dst_pnum;
456
b0d623f7 457 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
0c530ab8
A
458 /* this page is not in the UPL: skip it */
459 continue;
460 }
461
462 /*
463 * Map the source (encrypted) page in the kernel's
464 * virtual address space.
2d21ac55 465 * We already hold a reference on the src_object.
0c530ab8 466 */
2d21ac55
A
467 retry_src_fault:
468 vm_object_lock(src_object);
469 vm_object_paging_begin(src_object);
470 error_code = 0;
471 prot = VM_PROT_READ;
39236c6e 472 src_page = VM_PAGE_NULL;
2d21ac55 473 kr = vm_fault_page(src_object,
3e170ce0 474 pager->backing_offset + offset + cur_offset,
2d21ac55
A
475 VM_PROT_READ,
476 FALSE,
39236c6e 477 FALSE, /* src_page not looked up */
2d21ac55
A
478 &prot,
479 &src_page,
480 &top_page,
b0d623f7 481 NULL,
2d21ac55
A
482 &error_code,
483 FALSE,
484 FALSE,
b0d623f7 485 &fault_info);
2d21ac55
A
486 switch (kr) {
487 case VM_FAULT_SUCCESS:
488 break;
489 case VM_FAULT_RETRY:
490 goto retry_src_fault;
491 case VM_FAULT_MEMORY_SHORTAGE:
492 if (vm_page_wait(interruptible)) {
493 goto retry_src_fault;
0c530ab8 494 }
2d21ac55
A
495 /* fall thru */
496 case VM_FAULT_INTERRUPTED:
497 retval = MACH_SEND_INTERRUPTED;
498 goto done;
b0d623f7
A
499 case VM_FAULT_SUCCESS_NO_VM_PAGE:
500 /* success but no VM page: fail */
501 vm_object_paging_end(src_object);
502 vm_object_unlock(src_object);
503 /*FALLTHROUGH*/
2d21ac55
A
504 case VM_FAULT_MEMORY_ERROR:
505 /* the page is not there ! */
506 if (error_code) {
507 retval = error_code;
508 } else {
509 retval = KERN_MEMORY_ERROR;
0c530ab8 510 }
2d21ac55
A
511 goto done;
512 default:
b0d623f7
A
513 panic("apple_protect_pager_data_request: "
514 "vm_fault_page() unexpected error 0x%x\n",
515 kr);
0c530ab8 516 }
2d21ac55
A
517 assert(src_page != VM_PAGE_NULL);
518 assert(src_page->busy);
b0d623f7
A
519
520 if (!src_page->active &&
521 !src_page->inactive &&
522 !src_page->throttled) {
523 vm_page_lockspin_queues();
524 if (!src_page->active &&
525 !src_page->inactive &&
526 !src_page->throttled) {
527 vm_page_deactivate(src_page);
528 }
529 vm_page_unlock_queues();
530 }
3e170ce0 531
2d21ac55
A
532 /*
533 * Establish an explicit mapping of the source
534 * physical page.
535 */
3e170ce0
A
536#if __x86_64__
537 src_vaddr = (vm_map_offset_t)
538 PHYSMAP_PTOV((pmap_paddr_t)src_page->phys_page
539 << PAGE_SHIFT);
540#else
2d21ac55 541 pmap_enter(kernel_pmap,
3e170ce0 542 src_vaddr,
2d21ac55
A
543 src_page->phys_page,
544 VM_PROT_READ,
316670eb 545 VM_PROT_NONE,
6d2010ae 546 0,
2d21ac55 547 TRUE);
3e170ce0 548#endif
0c530ab8
A
549 /*
550 * Establish an explicit pmap mapping of the destination
551 * physical page.
552 * We can't do a regular VM mapping because the VM page
553 * is "busy".
554 */
b0d623f7
A
555 dst_pnum = (ppnum_t)
556 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
0c530ab8 557 assert(dst_pnum != 0);
3e170ce0
A
558#if __x86_64__
559 dst_vaddr = (vm_map_offset_t)
560 PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
561#else
2d21ac55 562 pmap_enter(kernel_pmap,
3e170ce0 563 dst_vaddr,
2d21ac55 564 dst_pnum,
0c530ab8 565 VM_PROT_READ | VM_PROT_WRITE,
316670eb 566 VM_PROT_NONE,
6d2010ae 567 0,
2d21ac55 568 TRUE);
3e170ce0
A
569#endif
570
571 /*
572 * Validate the original page...
573 */
574 if (src_page->object->code_signed) {
575 vm_page_validate_cs_mapped(
576 src_page,
577 (const void *) src_vaddr);
578 }
579 /*
580 * ... and transfer the results to the destination page.
581 */
582 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
583 src_page->cs_validated);
584 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
585 src_page->cs_tainted);
586 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
587 src_page->cs_nx);
588
589 /*
590 * page_decrypt() might access a mapped file, so let's release
591 * the object lock for the source page to avoid a potential
592 * deadlock. The source page is kept busy and we have a
593 * "paging_in_progress" reference on its object, so it's safe
594 * to unlock the object here.
595 */
596 assert(src_page->busy);
597 assert(src_page->object->paging_in_progress > 0);
598 vm_object_unlock(src_page->object);
0c530ab8
A
599
600 /*
601 * Decrypt the encrypted contents of the source page
602 * into the destination page.
603 */
fe8ab488
A
604 for (offset_in_page = 0;
605 offset_in_page < PAGE_SIZE;
606 offset_in_page += 4096) {
3e170ce0
A
607 if (offset + cur_offset + offset_in_page <
608 pager->crypto_start ||
609 offset + cur_offset + offset_in_page >=
610 pager->crypto_end) {
611 /* not encrypted: just copy */
612 bcopy((const char *)(src_vaddr +
613 offset_in_page),
614 (char *)(dst_vaddr + offset_in_page),
615 4096);
616 if (apple_protect_pager_data_request_debug) {
617 printf("apple_protect_data_request"
618 "(%p,0x%llx+0x%llx+0x%04llx): "
619 "out of crypto range "
620 "[0x%llx:0x%llx]: "
621 "COPY [0x%016llx 0x%016llx] "
622 "code_signed=%d "
623 "cs_validated=%d "
624 "cs_tainted=%d "
625 "cs_nx=%d\n",
626 pager,
627 offset,
628 (uint64_t) cur_offset,
629 (uint64_t) offset_in_page,
630 pager->crypto_start,
631 pager->crypto_end,
632 *(uint64_t *)(dst_vaddr+
633 offset_in_page),
634 *(uint64_t *)(dst_vaddr+
635 offset_in_page+8),
636 src_page->object->code_signed,
637 src_page->cs_validated,
638 src_page->cs_tainted,
639 src_page->cs_nx);
640 }
641 ret = 0;
642 continue;
643 }
644 ret = pager->crypt_info->page_decrypt(
645 (const void *)(src_vaddr + offset_in_page),
646 (void *)(dst_vaddr + offset_in_page),
647 ((pager->crypto_backing_offset -
648 pager->crypto_start) + /* XXX ? */
649 offset +
650 cur_offset +
651 offset_in_page),
652 pager->crypt_info->crypt_ops);
653 if (apple_protect_pager_data_request_debug) {
654 printf("apple_protect_data_request"
655 "(%p,0x%llx+0x%llx+0x%04llx): "
656 "in crypto range [0x%llx:0x%llx]: "
657 "DECRYPT offset 0x%llx="
658 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
659 "[0x%016llx 0x%016llx] "
660 "code_signed=%d "
661 "cs_validated=%d "
662 "cs_tainted=%d "
663 "cs_nx=%d "
664 "ret=0x%x\n",
665 pager,
666 offset,
667 (uint64_t) cur_offset,
668 (uint64_t) offset_in_page,
669 pager->crypto_start, pager->crypto_end,
670 ((pager->crypto_backing_offset -
671 pager->crypto_start) +
672 offset +
673 cur_offset +
674 offset_in_page),
675 pager->crypto_backing_offset,
676 pager->crypto_start,
677 offset,
678 (uint64_t) cur_offset,
679 (uint64_t) offset_in_page,
680 *(uint64_t *)(dst_vaddr+offset_in_page),
681 *(uint64_t *)(dst_vaddr+offset_in_page+8),
682 src_page->object->code_signed,
683 src_page->cs_validated,
684 src_page->cs_tainted,
685 src_page->cs_nx,
686 ret);
687 }
fe8ab488
A
688 if (ret) {
689 break;
690 }
691 }
b0d623f7
A
692 if (ret) {
693 /*
694 * Decryption failed. Abort the fault.
695 */
696 retval = KERN_ABORTED;
b0d623f7 697 }
3e170ce0
A
698
699 assert(src_page->busy);
700 assert(src_page->object->paging_in_progress > 0);
701 vm_object_lock(src_page->object);
702
703#if __x86_64__ || __arm__ || __arm64__
704 /* we used the 1-to-1 mapping of physical memory */
705 src_vaddr = 0;
706 dst_vaddr = 0;
707#else /* __x86_64__ || __arm__ || __arm64__ */
0c530ab8 708 /*
2d21ac55 709 * Remove the pmap mapping of the source and destination pages
0c530ab8
A
710 * in the kernel.
711 */
712 pmap_remove(kernel_pmap,
2d21ac55
A
713 (addr64_t) kernel_mapping,
714 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
3e170ce0 715#endif /* __x86_64__ || __arm__ || __arm64__ */
2d21ac55
A
716
717 /*
718 * Cleanup the result of vm_fault_page() of the source page.
719 */
720 PAGE_WAKEUP_DONE(src_page);
721 vm_object_paging_end(src_page->object);
722 vm_object_unlock(src_page->object);
723 if (top_page != VM_PAGE_NULL) {
724 vm_object_t top_object;
725
726 top_object = top_page->object;
727 vm_object_lock(top_object);
728 VM_PAGE_FREE(top_page);
729 vm_object_paging_end(top_object);
730 vm_object_unlock(top_object);
0c530ab8
A
731 }
732 }
733
0c530ab8 734done:
0c530ab8
A
735 if (upl != NULL) {
736 /* clean up the UPL */
737
738 /*
739 * The pages are currently dirty because we've just been
740 * writing on them, but as far as we're concerned, they're
741 * clean since they contain their "original" contents as
742 * provided by us, the pager.
743 * Tell the UPL to mark them "clean".
744 */
745 upl_clear_dirty(upl, TRUE);
746
747 /* abort or commit the UPL */
748 if (retval != KERN_SUCCESS) {
749 upl_abort(upl, 0);
b0d623f7
A
750 if (retval == KERN_ABORTED) {
751 wait_result_t wait_result;
752
753 /*
754 * We aborted the fault and did not provide
755 * any contents for the requested pages but
756 * the pages themselves are not invalid, so
757 * let's return success and let the caller
758 * retry the fault, in case it might succeed
759 * later (when the decryption code is up and
760 * running in the kernel, for example).
761 */
762 retval = KERN_SUCCESS;
763 /*
764 * Wait a little bit first to avoid using
765 * too much CPU time retrying and failing
766 * the same fault over and over again.
767 */
768 wait_result = assert_wait_timeout(
769 (event_t) apple_protect_pager_data_request,
770 THREAD_UNINT,
771 10000, /* 10ms */
772 NSEC_PER_USEC);
773 assert(wait_result == THREAD_WAITING);
774 wait_result = thread_block(THREAD_CONTINUE_NULL);
775 assert(wait_result == THREAD_TIMED_OUT);
776 }
0c530ab8 777 } else {
593a1d5f
A
778 boolean_t empty;
779 upl_commit_range(upl, 0, upl->size,
15129b1c 780 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
593a1d5f 781 upl_pl, pl_count, &empty);
0c530ab8
A
782 }
783
784 /* and deallocate the UPL */
785 upl_deallocate(upl);
786 upl = NULL;
787 }
2d21ac55
A
788 if (kernel_mapping != 0) {
789 /* clean up the mapping of the source and destination pages */
0c530ab8 790 kr = vm_map_remove(kernel_map,
2d21ac55
A
791 kernel_mapping,
792 kernel_mapping + (2 * PAGE_SIZE_64),
0c530ab8
A
793 VM_MAP_NO_FLAGS);
794 assert(kr == KERN_SUCCESS);
2d21ac55
A
795 kernel_mapping = 0;
796 src_vaddr = 0;
0c530ab8
A
797 dst_vaddr = 0;
798 }
2d21ac55
A
799 if (src_object != VM_OBJECT_NULL) {
800 vm_object_deallocate(src_object);
801 }
0c530ab8
A
802
803 return retval;
804}
805
806/*
807 * apple_protect_pager_reference()
808 *
809 * Get a reference on this memory object.
810 * For external usage only. Assumes that the initial reference count is not 0,
811 * i.e one should not "revive" a dead pager this way.
812 */
813void
814apple_protect_pager_reference(
815 memory_object_t mem_obj)
816{
817 apple_protect_pager_t pager;
818
819 pager = apple_protect_pager_lookup(mem_obj);
820
b0d623f7 821 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
822 assert(pager->ref_count > 0);
823 pager->ref_count++;
b0d623f7 824 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
825}
826
827
828/*
829 * apple_protect_pager_dequeue:
830 *
831 * Removes a pager from the list of pagers.
832 *
833 * The caller must hold "apple_protect_pager_lock".
834 */
835void
836apple_protect_pager_dequeue(
837 apple_protect_pager_t pager)
838{
839 assert(!pager->is_mapped);
840
841 queue_remove(&apple_protect_pager_queue,
842 pager,
843 apple_protect_pager_t,
844 pager_queue);
845 pager->pager_queue.next = NULL;
846 pager->pager_queue.prev = NULL;
847
848 apple_protect_pager_count--;
849}
850
851/*
852 * apple_protect_pager_terminate_internal:
853 *
854 * Trigger the asynchronous termination of the memory object associated
855 * with this pager.
856 * When the memory object is terminated, there will be one more call
857 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
858 * to finish the clean up.
859 *
860 * "apple_protect_pager_lock" should not be held by the caller.
861 * We don't need the lock because the pager has already been removed from
862 * the pagers' list and is now ours exclusively.
863 */
864void
865apple_protect_pager_terminate_internal(
866 apple_protect_pager_t pager)
867{
868 assert(pager->is_ready);
869 assert(!pager->is_mapped);
870
871 if (pager->backing_object != VM_OBJECT_NULL) {
872 vm_object_deallocate(pager->backing_object);
873 pager->backing_object = VM_OBJECT_NULL;
874 }
3e170ce0
A
875
876 /* one less pager using this "pager_crypt_info" */
877#if CRYPT_INFO_DEBUG
878 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
879 __FUNCTION__,
880 pager->crypt_info,
881 pager->crypt_info->crypt_refcnt);
882#endif /* CRYPT_INFO_DEBUG */
883 crypt_info_deallocate(pager->crypt_info);
884 pager->crypt_info = NULL;
6d2010ae
A
885
886 /* trigger the destruction of the memory object */
887 memory_object_destroy(pager->pager_control, 0);
0c530ab8
A
888}
889
890/*
891 * apple_protect_pager_deallocate_internal()
892 *
893 * Release a reference on this pager and free it when the last
894 * reference goes away.
895 * Can be called with apple_protect_pager_lock held or not but always returns
896 * with it unlocked.
897 */
898void
899apple_protect_pager_deallocate_internal(
900 apple_protect_pager_t pager,
901 boolean_t locked)
902{
903 boolean_t needs_trimming;
904 int count_unmapped;
905
906 if (! locked) {
b0d623f7 907 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
908 }
909
910 count_unmapped = (apple_protect_pager_count -
911 apple_protect_pager_count_mapped);
912 if (count_unmapped > apple_protect_pager_cache_limit) {
913 /* we have too many unmapped pagers: trim some */
914 needs_trimming = TRUE;
915 } else {
916 needs_trimming = FALSE;
917 }
918
919 /* drop a reference on this pager */
920 pager->ref_count--;
921
922 if (pager->ref_count == 1) {
923 /*
924 * Only the "named" reference is left, which means that
2d21ac55 925 * no one is really holding on to this pager anymore.
0c530ab8
A
926 * Terminate it.
927 */
928 apple_protect_pager_dequeue(pager);
929 /* the pager is all ours: no need for the lock now */
b0d623f7 930 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
931 apple_protect_pager_terminate_internal(pager);
932 } else if (pager->ref_count == 0) {
933 /*
934 * Dropped the existence reference; the memory object has
935 * been terminated. Do some final cleanup and release the
936 * pager structure.
937 */
b0d623f7 938 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
939 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
940 memory_object_control_deallocate(pager->pager_control);
941 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
942 }
943 kfree(pager, sizeof (*pager));
944 pager = APPLE_PROTECT_PAGER_NULL;
945 } else {
946 /* there are still plenty of references: keep going... */
b0d623f7 947 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
948 }
949
950 if (needs_trimming) {
951 apple_protect_pager_trim();
952 }
953 /* caution: lock is not held on return... */
954}
955
956/*
957 * apple_protect_pager_deallocate()
958 *
959 * Release a reference on this pager and free it when the last
960 * reference goes away.
961 */
962void
963apple_protect_pager_deallocate(
964 memory_object_t mem_obj)
965{
966 apple_protect_pager_t pager;
967
2d21ac55 968 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
0c530ab8
A
969 pager = apple_protect_pager_lookup(mem_obj);
970 apple_protect_pager_deallocate_internal(pager, FALSE);
971}
972
973/*
974 *
975 */
976kern_return_t
977apple_protect_pager_terminate(
978#if !DEBUG
979 __unused
980#endif
981 memory_object_t mem_obj)
982{
2d21ac55 983 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
0c530ab8
A
984
985 return KERN_SUCCESS;
986}
987
988/*
989 *
990 */
991kern_return_t
992apple_protect_pager_synchronize(
993 memory_object_t mem_obj,
994 memory_object_offset_t offset,
b0d623f7 995 memory_object_size_t length,
0c530ab8
A
996 __unused vm_sync_t sync_flags)
997{
998 apple_protect_pager_t pager;
999
2d21ac55 1000 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
0c530ab8
A
1001
1002 pager = apple_protect_pager_lookup(mem_obj);
1003
1004 memory_object_synchronize_completed(pager->pager_control,
1005 offset, length);
1006
1007 return KERN_SUCCESS;
1008}
1009
1010/*
1011 * apple_protect_pager_map()
1012 *
1013 * This allows VM to let us, the EMM, know that this memory object
b0d623f7
A
1014 * is currently mapped one or more times. This is called by VM each time
1015 * the memory object gets mapped and we take one extra reference on the
0c530ab8
A
1016 * memory object to account for all its mappings.
1017 */
593a1d5f 1018kern_return_t
0c530ab8 1019apple_protect_pager_map(
593a1d5f
A
1020 memory_object_t mem_obj,
1021 __unused vm_prot_t prot)
0c530ab8
A
1022{
1023 apple_protect_pager_t pager;
1024
2d21ac55 1025 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
0c530ab8
A
1026
1027 pager = apple_protect_pager_lookup(mem_obj);
1028
b0d623f7 1029 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1030 assert(pager->is_ready);
1031 assert(pager->ref_count > 0); /* pager is alive */
1032 if (pager->is_mapped == FALSE) {
1033 /*
1034 * First mapping of this pager: take an extra reference
1035 * that will remain until all the mappings of this pager
1036 * are removed.
1037 */
1038 pager->is_mapped = TRUE;
1039 pager->ref_count++;
1040 apple_protect_pager_count_mapped++;
1041 }
b0d623f7 1042 lck_mtx_unlock(&apple_protect_pager_lock);
593a1d5f
A
1043
1044 return KERN_SUCCESS;
0c530ab8
A
1045}
1046
1047/*
593a1d5f 1048 * apple_protect_pager_last_unmap()
0c530ab8
A
1049 *
1050 * This is called by VM when this memory object is no longer mapped anywhere.
1051 */
1052kern_return_t
593a1d5f 1053apple_protect_pager_last_unmap(
0c530ab8
A
1054 memory_object_t mem_obj)
1055{
1056 apple_protect_pager_t pager;
1057 int count_unmapped;
1058
593a1d5f
A
1059 PAGER_DEBUG(PAGER_ALL,
1060 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
0c530ab8
A
1061
1062 pager = apple_protect_pager_lookup(mem_obj);
1063
b0d623f7 1064 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1065 if (pager->is_mapped) {
1066 /*
1067 * All the mappings are gone, so let go of the one extra
1068 * reference that represents all the mappings of this pager.
1069 */
1070 apple_protect_pager_count_mapped--;
1071 count_unmapped = (apple_protect_pager_count -
1072 apple_protect_pager_count_mapped);
1073 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
1074 apple_protect_pager_count_unmapped_max = count_unmapped;
1075 }
1076 pager->is_mapped = FALSE;
1077 apple_protect_pager_deallocate_internal(pager, TRUE);
1078 /* caution: deallocate_internal() released the lock ! */
1079 } else {
b0d623f7 1080 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1081 }
1082
1083 return KERN_SUCCESS;
1084}
1085
1086
1087/*
1088 *
1089 */
1090apple_protect_pager_t
1091apple_protect_pager_lookup(
1092 memory_object_t mem_obj)
1093{
1094 apple_protect_pager_t pager;
1095
1096 pager = (apple_protect_pager_t) mem_obj;
1097 assert(pager->pager_ops == &apple_protect_pager_ops);
1098 assert(pager->ref_count > 0);
1099 return pager;
1100}
1101
1102apple_protect_pager_t
1103apple_protect_pager_create(
3e170ce0
A
1104 vm_object_t backing_object,
1105 vm_object_offset_t backing_offset,
1106 vm_object_offset_t crypto_backing_offset,
1107 struct pager_crypt_info *crypt_info,
1108 vm_object_offset_t crypto_start,
1109 vm_object_offset_t crypto_end)
0c530ab8
A
1110{
1111 apple_protect_pager_t pager, pager2;
1112 memory_object_control_t control;
1113 kern_return_t kr;
3e170ce0 1114 struct pager_crypt_info *old_crypt_info;
0c530ab8
A
1115
1116 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
1117 if (pager == APPLE_PROTECT_PAGER_NULL) {
1118 return APPLE_PROTECT_PAGER_NULL;
1119 }
1120
1121 /*
1122 * The vm_map call takes both named entry ports and raw memory
1123 * objects in the same parameter. We need to make sure that
1124 * vm_map does not see this object as a named entry port. So,
b0d623f7 1125 * we reserve the first word in the object for a fake ip_kotype
0c530ab8
A
1126 * setting - that will tell vm_map to use it as a memory object.
1127 */
1128 pager->pager_ops = &apple_protect_pager_ops;
1129 pager->pager_ikot = IKOT_MEMORY_OBJECT;
1130 pager->is_ready = FALSE;/* not ready until it has a "name" */
3e170ce0
A
1131 pager->ref_count = 1; /* existence reference (for the cache) */
1132 pager->ref_count++; /* for the caller */
0c530ab8
A
1133 pager->is_mapped = FALSE;
1134 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
1135 pager->backing_object = backing_object;
3e170ce0
A
1136 pager->backing_offset = backing_offset;
1137 pager->crypto_backing_offset = crypto_backing_offset;
1138 pager->crypto_start = crypto_start;
1139 pager->crypto_end = crypto_end;
1140 pager->crypt_info = crypt_info; /* allocated by caller */
1141
1142#if CRYPT_INFO_DEBUG
1143 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
1144 __FUNCTION__,
1145 crypt_info,
1146 crypt_info->page_decrypt,
1147 crypt_info->crypt_end,
1148 crypt_info->crypt_ops,
1149 crypt_info->crypt_refcnt);
1150#endif /* CRYPT_INFO_DEBUG */
593a1d5f 1151
0c530ab8
A
1152 vm_object_reference(backing_object);
1153
3e170ce0
A
1154 old_crypt_info = NULL;
1155
b0d623f7 1156 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1157 /* see if anyone raced us to create a pager for the same object */
1158 queue_iterate(&apple_protect_pager_queue,
1159 pager2,
1160 apple_protect_pager_t,
1161 pager_queue) {
3e170ce0
A
1162 if ((pager2->crypt_info->page_decrypt !=
1163 crypt_info->page_decrypt) ||
1164 (pager2->crypt_info->crypt_end !=
1165 crypt_info->crypt_end) ||
1166 (pager2->crypt_info->crypt_ops !=
1167 crypt_info->crypt_ops)) {
1168 /* crypt_info contents do not match: next pager */
1169 continue;
1170 }
1171
1172 /* found a match for crypt_info ... */
1173 if (old_crypt_info) {
1174 /* ... already switched to that crypt_info */
1175 assert(old_crypt_info == pager2->crypt_info);
1176 } else if (pager2->crypt_info != crypt_info) {
1177 /* ... switch to that pager's crypt_info */
1178#if CRYPT_INFO_DEBUG
1179 printf("CRYPT_INFO %s: reference %p ref %d "
1180 "(create match)\n",
1181 __FUNCTION__,
1182 pager2->crypt_info,
1183 pager2->crypt_info->crypt_refcnt);
1184#endif /* CRYPT_INFO_DEBUG */
1185 old_crypt_info = pager2->crypt_info;
1186 crypt_info_reference(old_crypt_info);
1187 pager->crypt_info = old_crypt_info;
1188 }
1189
1190 if (pager2->backing_object == backing_object &&
1191 pager2->backing_offset == backing_offset &&
1192 pager2->crypto_backing_offset == crypto_backing_offset &&
1193 pager2->crypto_start == crypto_start &&
1194 pager2->crypto_end == crypto_end) {
1195 /* full match: use that pager */
0c530ab8
A
1196 break;
1197 }
1198 }
1199 if (! queue_end(&apple_protect_pager_queue,
1200 (queue_entry_t) pager2)) {
0c530ab8 1201 /* we lost the race, down with the loser... */
b0d623f7 1202 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1203 vm_object_deallocate(pager->backing_object);
1204 pager->backing_object = VM_OBJECT_NULL;
3e170ce0
A
1205#if CRYPT_INFO_DEBUG
1206 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
1207 __FUNCTION__,
1208 pager->crypt_info,
1209 pager->crypt_info->crypt_refcnt);
1210#endif /* CRYPT_INFO_DEBUG */
1211 crypt_info_deallocate(pager->crypt_info);
1212 pager->crypt_info = NULL;
0c530ab8
A
1213 kfree(pager, sizeof (*pager));
1214 /* ... and go with the winner */
1215 pager = pager2;
1216 /* let the winner make sure the pager gets ready */
1217 return pager;
1218 }
1219
1220 /* enter new pager at the head of our list of pagers */
1221 queue_enter_first(&apple_protect_pager_queue,
1222 pager,
1223 apple_protect_pager_t,
1224 pager_queue);
1225 apple_protect_pager_count++;
1226 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1227 apple_protect_pager_count_max = apple_protect_pager_count;
1228 }
b0d623f7 1229 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1230
1231 kr = memory_object_create_named((memory_object_t) pager,
1232 0,
1233 &control);
1234 assert(kr == KERN_SUCCESS);
1235
b0d623f7 1236 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1237 /* the new pager is now ready to be used */
1238 pager->is_ready = TRUE;
b0d623f7 1239 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1240
1241 /* wakeup anyone waiting for this pager to be ready */
1242 thread_wakeup(&pager->is_ready);
1243
3e170ce0
A
1244 if (old_crypt_info != NULL &&
1245 old_crypt_info != crypt_info) {
1246 /* we re-used an old crypt_info instead of using our new one */
1247#if CRYPT_INFO_DEBUG
1248 printf("CRYPT_INFO %s: deallocate %p ref %d "
1249 "(create used old)\n",
1250 __FUNCTION__,
1251 crypt_info,
1252 crypt_info->crypt_refcnt);
1253#endif /* CRYPT_INFO_DEBUG */
1254 crypt_info_deallocate(crypt_info);
1255 crypt_info = NULL;
1256 }
1257
0c530ab8
A
1258 return pager;
1259}
1260
1261/*
1262 * apple_protect_pager_setup()
1263 *
1264 * Provide the caller with a memory object backed by the provided
1265 * "backing_object" VM object. If such a memory object already exists,
1266 * re-use it, otherwise create a new memory object.
1267 */
1268memory_object_t
1269apple_protect_pager_setup(
3e170ce0
A
1270 vm_object_t backing_object,
1271 vm_object_offset_t backing_offset,
1272 vm_object_offset_t crypto_backing_offset,
1273 struct pager_crypt_info *crypt_info,
1274 vm_object_offset_t crypto_start,
1275 vm_object_offset_t crypto_end)
0c530ab8
A
1276{
1277 apple_protect_pager_t pager;
3e170ce0
A
1278 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
1279
1280#if CRYPT_INFO_DEBUG
1281 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
1282 __FUNCTION__,
1283 crypt_info,
1284 crypt_info->page_decrypt,
1285 crypt_info->crypt_end,
1286 crypt_info->crypt_ops,
1287 crypt_info->crypt_refcnt);
1288#endif /* CRYPT_INFO_DEBUG */
1289
1290 old_crypt_info = NULL;
0c530ab8 1291
b0d623f7 1292 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1293
1294 queue_iterate(&apple_protect_pager_queue,
1295 pager,
1296 apple_protect_pager_t,
1297 pager_queue) {
3e170ce0
A
1298 if ((pager->crypt_info->page_decrypt !=
1299 crypt_info->page_decrypt) ||
1300 (pager->crypt_info->crypt_end !=
1301 crypt_info->crypt_end) ||
1302 (pager->crypt_info->crypt_ops !=
1303 crypt_info->crypt_ops)) {
1304 /* no match for "crypt_info": next pager */
1305 continue;
1306 }
1307 /* found a match for crypt_info ... */
1308 if (old_crypt_info) {
1309 /* ... already switched to that crypt_info */
1310 assert(old_crypt_info == pager->crypt_info);
1311 } else {
1312 /* ... switch to that pager's crypt_info */
1313 old_crypt_info = pager->crypt_info;
1314#if CRYPT_INFO_DEBUG
1315 printf("CRYPT_INFO %s: "
1316 "switching crypt_info from %p [%p,%p,%p,%d] "
1317 "to %p [%p,%p,%p,%d] from pager %p\n",
1318 __FUNCTION__,
1319 crypt_info,
1320 crypt_info->page_decrypt,
1321 crypt_info->crypt_end,
1322 crypt_info->crypt_ops,
1323 crypt_info->crypt_refcnt,
1324 old_crypt_info,
1325 old_crypt_info->page_decrypt,
1326 old_crypt_info->crypt_end,
1327 old_crypt_info->crypt_ops,
1328 old_crypt_info->crypt_refcnt,
1329 pager);
1330 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
1331 __FUNCTION__,
1332 pager->crypt_info,
1333 pager->crypt_info->crypt_refcnt);
1334#endif /* CRYPT_INFO_DEBUG */
1335 crypt_info_reference(pager->crypt_info);
1336 }
1337
1338 if (pager->backing_object == backing_object &&
1339 pager->backing_offset == backing_offset &&
1340 pager->crypto_backing_offset == crypto_backing_offset &&
1341 pager->crypto_start == crypto_start &&
1342 pager->crypto_end == crypto_end) {
1343 /* full match: use that pager! */
1344 assert(old_crypt_info == pager->crypt_info);
1345 assert(old_crypt_info->crypt_refcnt > 1);
1346#if CRYPT_INFO_DEBUG
1347 printf("CRYPT_INFO %s: "
1348 "pager match with %p crypt_info %p\n",
1349 __FUNCTION__,
1350 pager,
1351 pager->crypt_info);
1352 printf("CRYPT_INFO %s: deallocate %p ref %d "
1353 "(pager match)\n",
1354 __FUNCTION__,
1355 old_crypt_info,
1356 old_crypt_info->crypt_refcnt);
1357#endif /* CRYPT_INFO_DEBUG */
1358 /* release the extra ref on crypt_info we got above */
1359 crypt_info_deallocate(old_crypt_info);
1360 assert(old_crypt_info->crypt_refcnt > 0);
1361 /* give extra reference on pager to the caller */
1362 assert(pager->ref_count > 0);
1363 pager->ref_count++;
0c530ab8
A
1364 break;
1365 }
1366 }
1367 if (queue_end(&apple_protect_pager_queue,
1368 (queue_entry_t) pager)) {
3e170ce0 1369 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1370 /* no existing pager for this backing object */
1371 pager = APPLE_PROTECT_PAGER_NULL;
3e170ce0
A
1372 if (old_crypt_info) {
1373 /* use this old crypt_info for new pager */
1374 new_crypt_info = old_crypt_info;
1375#if CRYPT_INFO_DEBUG
1376 printf("CRYPT_INFO %s: "
1377 "will use old_crypt_info %p for new pager\n",
1378 __FUNCTION__,
1379 old_crypt_info);
1380#endif /* CRYPT_INFO_DEBUG */
1381 } else {
1382 /* allocate a new crypt_info for new pager */
1383 new_crypt_info = kalloc(sizeof (*new_crypt_info));
1384 *new_crypt_info = *crypt_info;
1385 new_crypt_info->crypt_refcnt = 1;
1386#if CRYPT_INFO_DEBUG
1387 printf("CRYPT_INFO %s: "
1388 "will use new_crypt_info %p for new pager\n",
1389 __FUNCTION__,
1390 new_crypt_info);
1391#endif /* CRYPT_INFO_DEBUG */
1392 }
1393 if (new_crypt_info == NULL) {
1394 /* can't create new pager without a crypt_info */
1395 } else {
1396 /* create new pager */
1397 pager = apple_protect_pager_create(
1398 backing_object,
1399 backing_offset,
1400 crypto_backing_offset,
1401 new_crypt_info,
1402 crypto_start,
1403 crypto_end);
1404 }
0c530ab8 1405 if (pager == APPLE_PROTECT_PAGER_NULL) {
3e170ce0
A
1406 /* could not create a new pager */
1407 if (new_crypt_info == old_crypt_info) {
1408 /* release extra reference on old_crypt_info */
1409#if CRYPT_INFO_DEBUG
1410 printf("CRYPT_INFO %s: deallocate %p ref %d "
1411 "(create fail old_crypt_info)\n",
1412 __FUNCTION__,
1413 old_crypt_info,
1414 old_crypt_info->crypt_refcnt);
1415#endif /* CRYPT_INFO_DEBUG */
1416 crypt_info_deallocate(old_crypt_info);
1417 old_crypt_info = NULL;
1418 } else {
1419 /* release unused new_crypt_info */
1420 assert(new_crypt_info->crypt_refcnt == 1);
1421#if CRYPT_INFO_DEBUG
1422 printf("CRYPT_INFO %s: deallocate %p ref %d "
1423 "(create fail new_crypt_info)\n",
1424 __FUNCTION__,
1425 new_crypt_info,
1426 new_crypt_info->crypt_refcnt);
1427#endif /* CRYPT_INFO_DEBUG */
1428 crypt_info_deallocate(new_crypt_info);
1429 new_crypt_info = NULL;
1430 }
0c530ab8
A
1431 return MEMORY_OBJECT_NULL;
1432 }
3e170ce0
A
1433 lck_mtx_lock(&apple_protect_pager_lock);
1434 } else {
1435 assert(old_crypt_info == pager->crypt_info);
0c530ab8
A
1436 }
1437
0c530ab8 1438 while (!pager->is_ready) {
b0d623f7
A
1439 lck_mtx_sleep(&apple_protect_pager_lock,
1440 LCK_SLEEP_DEFAULT,
1441 &pager->is_ready,
1442 THREAD_UNINT);
0c530ab8 1443 }
b0d623f7 1444 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1445
1446 return (memory_object_t) pager;
1447}
1448
1449void
1450apple_protect_pager_trim(void)
1451{
1452 apple_protect_pager_t pager, prev_pager;
1453 queue_head_t trim_queue;
1454 int num_trim;
1455 int count_unmapped;
1456
b0d623f7 1457 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1458
1459 /*
1460 * We have too many pagers, try and trim some unused ones,
1461 * starting with the oldest pager at the end of the queue.
1462 */
1463 queue_init(&trim_queue);
1464 num_trim = 0;
1465
1466 for (pager = (apple_protect_pager_t)
1467 queue_last(&apple_protect_pager_queue);
1468 !queue_end(&apple_protect_pager_queue,
1469 (queue_entry_t) pager);
1470 pager = prev_pager) {
1471 /* get prev elt before we dequeue */
1472 prev_pager = (apple_protect_pager_t)
1473 queue_prev(&pager->pager_queue);
1474
1475 if (pager->ref_count == 2 &&
1476 pager->is_ready &&
1477 !pager->is_mapped) {
1478 /* this pager can be trimmed */
1479 num_trim++;
1480 /* remove this pager from the main list ... */
1481 apple_protect_pager_dequeue(pager);
1482 /* ... and add it to our trim queue */
1483 queue_enter_first(&trim_queue,
1484 pager,
1485 apple_protect_pager_t,
1486 pager_queue);
1487
1488 count_unmapped = (apple_protect_pager_count -
1489 apple_protect_pager_count_mapped);
1490 if (count_unmapped <= apple_protect_pager_cache_limit) {
1491 /* we have enough pagers to trim */
1492 break;
1493 }
1494 }
1495 }
1496 if (num_trim > apple_protect_pager_num_trim_max) {
1497 apple_protect_pager_num_trim_max = num_trim;
1498 }
1499 apple_protect_pager_num_trim_total += num_trim;
1500
b0d623f7 1501 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1502
1503 /* terminate the trimmed pagers */
1504 while (!queue_empty(&trim_queue)) {
1505 queue_remove_first(&trim_queue,
1506 pager,
1507 apple_protect_pager_t,
1508 pager_queue);
1509 pager->pager_queue.next = NULL;
1510 pager->pager_queue.prev = NULL;
1511 assert(pager->ref_count == 2);
1512 /*
1513 * We can't call deallocate_internal() because the pager
1514 * has already been dequeued, but we still need to remove
1515 * a reference.
1516 */
1517 pager->ref_count--;
1518 apple_protect_pager_terminate_internal(pager);
1519 }
1520}
3e170ce0
A
1521
1522
1523void
1524crypt_info_reference(
1525 struct pager_crypt_info *crypt_info)
1526{
1527 assert(crypt_info->crypt_refcnt != 0);
1528#if CRYPT_INFO_DEBUG
1529 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1530 __FUNCTION__,
1531 crypt_info,
1532 crypt_info->crypt_refcnt,
1533 crypt_info->crypt_refcnt + 1);
1534#endif /* CRYPT_INFO_DEBUG */
1535 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1536}
1537
1538void
1539crypt_info_deallocate(
1540 struct pager_crypt_info *crypt_info)
1541{
1542#if CRYPT_INFO_DEBUG
1543 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
1544 __FUNCTION__,
1545 crypt_info,
1546 crypt_info->crypt_refcnt,
1547 crypt_info->crypt_refcnt - 1);
1548#endif /* CRYPT_INFO_DEBUG */
1549 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1550 if (crypt_info->crypt_refcnt == 0) {
1551 /* deallocate any crypt module data */
1552 if (crypt_info->crypt_end) {
1553 crypt_info->crypt_end(crypt_info->crypt_ops);
1554 crypt_info->crypt_end = NULL;
1555 }
1556#if CRYPT_INFO_DEBUG
1557 printf("CRYPT_INFO %s: freeing %p\n",
1558 __FUNCTION__,
1559 crypt_info);
1560#endif /* CRYPT_INFO_DEBUG */
1561 kfree(crypt_info, sizeof (*crypt_info));
1562 crypt_info = NULL;
1563 }
1564}