]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_apple_protect.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
CommitLineData
0c530ab8 1/*
f427ee49 2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/page_decrypt.h>
46#include <kern/queue.h>
47#include <kern/thread.h>
39037602 48#include <kern/ipc_kobject.h>
0a7de745 49#include <os/refcnt.h>
0c530ab8
A
50
51#include <ipc/ipc_port.h>
52#include <ipc/ipc_space.h>
53
2d21ac55 54#include <vm/vm_fault.h>
0c530ab8
A
55#include <vm/vm_map.h>
56#include <vm/vm_pageout.h>
57#include <vm/memory_object.h>
58#include <vm/vm_pageout.h>
59#include <vm/vm_protos.h>
39037602 60#include <vm/vm_kern.h>
0c530ab8
A
61
62
0a7de745
A
63/*
64 * APPLE PROTECT MEMORY PAGER
0c530ab8
A
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
0a7de745 68 *
0c530ab8
A
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82/* forward declarations */
83void apple_protect_pager_reference(memory_object_t mem_obj);
84void apple_protect_pager_deallocate(memory_object_t mem_obj);
85kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
0a7de745
A
86 memory_object_control_t control,
87 memory_object_cluster_size_t pg_size);
0c530ab8
A
88kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
0a7de745
A
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
0c530ab8 94kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
0a7de745
A
95 memory_object_offset_t offset,
96 memory_object_cluster_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
0c530ab8 102kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
0a7de745
A
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t data_cnt);
0c530ab8 105kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
0a7de745
A
106 memory_object_offset_t offset,
107 memory_object_size_t size,
108 vm_prot_t desired_access);
0c530ab8 109kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
0a7de745
A
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 vm_sync_t sync_flags);
593a1d5f 113kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
0a7de745 114 vm_prot_t prot);
593a1d5f 115kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
a991bd8d
A
116boolean_t apple_protect_pager_backing_object(
117 memory_object_t mem_obj,
118 memory_object_offset_t mem_obj_offset,
119 vm_object_t *backing_object,
120 vm_object_offset_t *backing_offset);
0c530ab8 121
3e170ce0
A
122#define CRYPT_INFO_DEBUG 0
123void crypt_info_reference(struct pager_crypt_info *crypt_info);
124void crypt_info_deallocate(struct pager_crypt_info *crypt_info);
125
0c530ab8
A
126/*
127 * Vector of VM operations for this EMM.
128 * These routines are invoked by VM via the memory_object_*() interfaces.
129 */
130const struct memory_object_pager_ops apple_protect_pager_ops = {
cb323159
A
131 .memory_object_reference = apple_protect_pager_reference,
132 .memory_object_deallocate = apple_protect_pager_deallocate,
133 .memory_object_init = apple_protect_pager_init,
134 .memory_object_terminate = apple_protect_pager_terminate,
135 .memory_object_data_request = apple_protect_pager_data_request,
136 .memory_object_data_return = apple_protect_pager_data_return,
137 .memory_object_data_initialize = apple_protect_pager_data_initialize,
138 .memory_object_data_unlock = apple_protect_pager_data_unlock,
139 .memory_object_synchronize = apple_protect_pager_synchronize,
140 .memory_object_map = apple_protect_pager_map,
141 .memory_object_last_unmap = apple_protect_pager_last_unmap,
142 .memory_object_data_reclaim = NULL,
a991bd8d 143 .memory_object_backing_object = apple_protect_pager_backing_object,
cb323159 144 .memory_object_pager_name = "apple_protect"
0c530ab8
A
145};
146
147/*
148 * The "apple_protect_pager" describes a memory object backed by
149 * the "apple protect" EMM.
150 */
151typedef struct apple_protect_pager {
5ba3f43e
A
152 /* mandatory generic header */
153 struct memory_object ap_pgr_hdr;
154
155 /* pager-specific data */
0a7de745
A
156 queue_chain_t pager_queue; /* next & prev pagers */
157 struct os_refcnt ref_count; /* reference count */
158 boolean_t is_ready; /* is this pager ready ? */
159 boolean_t is_mapped; /* is this mem_obj mapped ? */
160 vm_object_t backing_object; /* VM obj w/ encrypted data */
161 vm_object_offset_t backing_offset;
162 vm_object_offset_t crypto_backing_offset; /* for key... */
163 vm_object_offset_t crypto_start;
164 vm_object_offset_t crypto_end;
3e170ce0 165 struct pager_crypt_info *crypt_info;
0c530ab8 166} *apple_protect_pager_t;
0a7de745 167#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
0c530ab8
A
168
169/*
170 * List of memory objects managed by this EMM.
171 * The list is protected by the "apple_protect_pager_lock" lock.
172 */
0a7de745
A
173int apple_protect_pager_count = 0; /* number of pagers */
174int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
f427ee49
A
175queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue);
176LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect");
177LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp);
0c530ab8
A
178
179/*
180 * Maximum number of unmapped pagers we're willing to keep around.
181 */
490019cf 182int apple_protect_pager_cache_limit = 20;
0c530ab8
A
183
184/*
185 * Statistics & counters.
186 */
187int apple_protect_pager_count_max = 0;
188int apple_protect_pager_count_unmapped_max = 0;
189int apple_protect_pager_num_trim_max = 0;
190int apple_protect_pager_num_trim_total = 0;
191
b0d623f7 192
b0d623f7 193
0c530ab8 194/* internal prototypes */
3e170ce0
A
195apple_protect_pager_t apple_protect_pager_create(
196 vm_object_t backing_object,
197 vm_object_offset_t backing_offset,
198 vm_object_offset_t crypto_backing_offset,
199 struct pager_crypt_info *crypt_info,
200 vm_object_offset_t crypto_start,
201 vm_object_offset_t crypto_end);
0c530ab8
A
202apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
203void apple_protect_pager_dequeue(apple_protect_pager_t pager);
204void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
0a7de745 205 boolean_t locked);
0c530ab8
A
206void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
207void apple_protect_pager_trim(void);
208
209
210#if DEBUG
211int apple_protect_pagerdebug = 0;
0a7de745
A
212#define PAGER_ALL 0xffffffff
213#define PAGER_INIT 0x00000001
214#define PAGER_PAGEIN 0x00000002
215
216#define PAGER_DEBUG(LEVEL, A) \
217 MACRO_BEGIN \
218 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
219 printf A; \
220 } \
0c530ab8
A
221 MACRO_END
222#else
223#define PAGER_DEBUG(LEVEL, A)
224#endif
225
0c530ab8
A
226/*
227 * apple_protect_pager_init()
228 *
229 * Initialize the memory object and makes it ready to be used and mapped.
230 */
231kern_return_t
232apple_protect_pager_init(
0a7de745
A
233 memory_object_t mem_obj,
234 memory_object_control_t control,
0c530ab8
A
235#if !DEBUG
236 __unused
237#endif
b0d623f7 238 memory_object_cluster_size_t pg_size)
0c530ab8 239{
0a7de745
A
240 apple_protect_pager_t pager;
241 kern_return_t kr;
0c530ab8
A
242 memory_object_attr_info_data_t attributes;
243
244 PAGER_DEBUG(PAGER_ALL,
0a7de745
A
245 ("apple_protect_pager_init: %p, %p, %x\n",
246 mem_obj, control, pg_size));
0c530ab8 247
0a7de745 248 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0c530ab8 249 return KERN_INVALID_ARGUMENT;
0a7de745 250 }
0c530ab8
A
251
252 pager = apple_protect_pager_lookup(mem_obj);
253
254 memory_object_control_reference(control);
255
5ba3f43e 256 pager->ap_pgr_hdr.mo_control = control;
0c530ab8
A
257
258 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
259 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
260 attributes.cluster_size = (1 << (PAGE_SHIFT));
261 attributes.may_cache_object = FALSE;
262 attributes.temporary = TRUE;
263
264 kr = memory_object_change_attributes(
0a7de745
A
265 control,
266 MEMORY_OBJECT_ATTRIBUTE_INFO,
267 (memory_object_info_t) &attributes,
268 MEMORY_OBJECT_ATTR_INFO_COUNT);
269 if (kr != KERN_SUCCESS) {
0c530ab8 270 panic("apple_protect_pager_init: "
0a7de745
A
271 "memory_object_change_attributes() failed");
272 }
0c530ab8 273
39037602
A
274#if CONFIG_SECLUDED_MEMORY
275 if (secluded_for_filecache) {
276 memory_object_mark_eligible_for_secluded(control, TRUE);
277 }
278#endif /* CONFIG_SECLUDED_MEMORY */
279
0c530ab8
A
280 return KERN_SUCCESS;
281}
282
283/*
284 * apple_protect_data_return()
285 *
286 * Handles page-out requests from VM. This should never happen since
287 * the pages provided by this EMM are not supposed to be dirty or dirtied
288 * and VM should simply discard the contents and reclaim the pages if it
289 * needs to.
290 */
291kern_return_t
292apple_protect_pager_data_return(
0a7de745
A
293 __unused memory_object_t mem_obj,
294 __unused memory_object_offset_t offset,
295 __unused memory_object_cluster_size_t data_cnt,
296 __unused memory_object_offset_t *resid_offset,
297 __unused int *io_error,
298 __unused boolean_t dirty,
299 __unused boolean_t kernel_copy,
300 __unused int upl_flags)
0c530ab8
A
301{
302 panic("apple_protect_pager_data_return: should never get called");
303 return KERN_FAILURE;
304}
305
306kern_return_t
307apple_protect_pager_data_initialize(
0a7de745
A
308 __unused memory_object_t mem_obj,
309 __unused memory_object_offset_t offset,
310 __unused memory_object_cluster_size_t data_cnt)
0c530ab8
A
311{
312 panic("apple_protect_pager_data_initialize: should never get called");
313 return KERN_FAILURE;
314}
315
316kern_return_t
317apple_protect_pager_data_unlock(
0a7de745
A
318 __unused memory_object_t mem_obj,
319 __unused memory_object_offset_t offset,
320 __unused memory_object_size_t size,
321 __unused vm_prot_t desired_access)
0c530ab8
A
322{
323 return KERN_FAILURE;
324}
325
326/*
327 * apple_protect_pager_data_request()
328 *
329 * Handles page-in requests from VM.
330 */
3e170ce0 331int apple_protect_pager_data_request_debug = 0;
0a7de745 332kern_return_t
0c530ab8 333apple_protect_pager_data_request(
0a7de745
A
334 memory_object_t mem_obj,
335 memory_object_offset_t offset,
336 memory_object_cluster_size_t length,
0c530ab8
A
337#if !DEBUG
338 __unused
339#endif
0a7de745 340 vm_prot_t protection_required,
2d21ac55 341 memory_object_fault_info_t mo_fault_info)
0c530ab8 342{
0a7de745
A
343 apple_protect_pager_t pager;
344 memory_object_control_t mo_control;
345 upl_t upl;
346 int upl_flags;
347 upl_size_t upl_size;
348 upl_page_info_t *upl_pl;
349 unsigned int pl_count;
350 vm_object_t src_top_object, src_page_object, dst_object;
351 kern_return_t kr, retval;
352 vm_offset_t src_vaddr, dst_vaddr;
353 vm_offset_t cur_offset;
354 vm_offset_t offset_in_page;
355 kern_return_t error_code;
356 vm_prot_t prot;
357 vm_page_t src_page, top_page;
358 int interruptible;
359 struct vm_object_fault_info fault_info;
360 int ret;
2d21ac55
A
361
362 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
0c530ab8 363
b0d623f7 364 retval = KERN_SUCCESS;
39037602
A
365 src_top_object = VM_OBJECT_NULL;
366 src_page_object = VM_OBJECT_NULL;
2d21ac55 367 upl = NULL;
593a1d5f 368 upl_pl = NULL;
d9a64523 369 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
b0d623f7 370 fault_info.stealth = TRUE;
6d2010ae 371 fault_info.io_sync = FALSE;
0b4c1975 372 fault_info.mark_zf_absent = FALSE;
316670eb 373 fault_info.batch_pmap_op = FALSE;
b0d623f7 374 interruptible = fault_info.interruptible;
0c530ab8
A
375
376 pager = apple_protect_pager_lookup(mem_obj);
377 assert(pager->is_ready);
0a7de745 378 assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */
0c530ab8 379
2d21ac55 380 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
0c530ab8 381
d9a64523
A
382 fault_info.lo_offset += pager->backing_offset;
383 fault_info.hi_offset += pager->backing_offset;
384
0c530ab8
A
385 /*
386 * Gather in a UPL all the VM pages requested by VM.
387 */
5ba3f43e 388 mo_control = pager->ap_pgr_hdr.mo_control;
0c530ab8
A
389
390 upl_size = length;
391 upl_flags =
0a7de745
A
392 UPL_RET_ONLY_ABSENT |
393 UPL_SET_LITE |
394 UPL_NO_SYNC |
395 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
396 UPL_SET_INTERNAL;
593a1d5f 397 pl_count = 0;
0c530ab8 398 kr = memory_object_upl_request(mo_control,
0a7de745
A
399 offset, upl_size,
400 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
0c530ab8
A
401 if (kr != KERN_SUCCESS) {
402 retval = kr;
403 goto done;
404 }
2d21ac55
A
405 dst_object = mo_control->moc_object;
406 assert(dst_object != VM_OBJECT_NULL);
407
2d21ac55 408 /*
0a7de745 409 * We'll map the encrypted data in the kernel address space from the
2d21ac55
A
410 * backing VM object (itself backed by the encrypted file via
411 * the vnode pager).
412 */
39037602
A
413 src_top_object = pager->backing_object;
414 assert(src_top_object != VM_OBJECT_NULL);
415 vm_object_reference(src_top_object); /* keep the source object alive */
0c530ab8
A
416
417 /*
418 * Fill in the contents of the pages requested by VM.
419 */
420 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
593a1d5f 421 pl_count = length / PAGE_SIZE;
b0d623f7 422 for (cur_offset = 0;
0a7de745
A
423 retval == KERN_SUCCESS && cur_offset < length;
424 cur_offset += PAGE_SIZE) {
0c530ab8
A
425 ppnum_t dst_pnum;
426
b0d623f7 427 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
0c530ab8
A
428 /* this page is not in the UPL: skip it */
429 continue;
430 }
431
432 /*
433 * Map the source (encrypted) page in the kernel's
434 * virtual address space.
39037602 435 * We already hold a reference on the src_top_object.
0c530ab8 436 */
0a7de745 437retry_src_fault:
39037602
A
438 vm_object_lock(src_top_object);
439 vm_object_paging_begin(src_top_object);
2d21ac55
A
440 error_code = 0;
441 prot = VM_PROT_READ;
39236c6e 442 src_page = VM_PAGE_NULL;
39037602 443 kr = vm_fault_page(src_top_object,
0a7de745
A
444 pager->backing_offset + offset + cur_offset,
445 VM_PROT_READ,
446 FALSE,
447 FALSE, /* src_page not looked up */
448 &prot,
449 &src_page,
450 &top_page,
451 NULL,
452 &error_code,
453 FALSE,
454 FALSE,
455 &fault_info);
2d21ac55
A
456 switch (kr) {
457 case VM_FAULT_SUCCESS:
458 break;
459 case VM_FAULT_RETRY:
460 goto retry_src_fault;
461 case VM_FAULT_MEMORY_SHORTAGE:
462 if (vm_page_wait(interruptible)) {
463 goto retry_src_fault;
0c530ab8 464 }
f427ee49 465 OS_FALLTHROUGH;
2d21ac55
A
466 case VM_FAULT_INTERRUPTED:
467 retval = MACH_SEND_INTERRUPTED;
468 goto done;
b0d623f7
A
469 case VM_FAULT_SUCCESS_NO_VM_PAGE:
470 /* success but no VM page: fail */
39037602
A
471 vm_object_paging_end(src_top_object);
472 vm_object_unlock(src_top_object);
f427ee49 473 OS_FALLTHROUGH;
2d21ac55
A
474 case VM_FAULT_MEMORY_ERROR:
475 /* the page is not there ! */
476 if (error_code) {
477 retval = error_code;
478 } else {
479 retval = KERN_MEMORY_ERROR;
0c530ab8 480 }
2d21ac55
A
481 goto done;
482 default:
b0d623f7 483 panic("apple_protect_pager_data_request: "
0a7de745
A
484 "vm_fault_page() unexpected error 0x%x\n",
485 kr);
0c530ab8 486 }
2d21ac55 487 assert(src_page != VM_PAGE_NULL);
d9a64523 488 assert(src_page->vmp_busy);
b0d623f7 489
d9a64523 490 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
b0d623f7 491 vm_page_lockspin_queues();
39037602 492
d9a64523 493 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
0a7de745 494 vm_page_speculate(src_page, FALSE);
b0d623f7
A
495 }
496 vm_page_unlock_queues();
497 }
3e170ce0 498
2d21ac55 499 /*
d9a64523
A
500 * Establish pointers to the source
501 * and destination physical pages.
2d21ac55 502 */
d9a64523 503 dst_pnum = (ppnum_t)
0a7de745
A
504 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
505 assert(dst_pnum != 0);
d9a64523 506
5ba3f43e 507 src_vaddr = (vm_map_offset_t)
0a7de745
A
508 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
509 << PAGE_SHIFT);
5ba3f43e 510 dst_vaddr = (vm_map_offset_t)
0a7de745 511 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
cb323159 512
39037602 513 src_page_object = VM_PAGE_OBJECT(src_page);
3e170ce0
A
514
515 /*
516 * Validate the original page...
517 */
39037602 518 if (src_page_object->code_signed) {
3e170ce0 519 vm_page_validate_cs_mapped(
f427ee49 520 src_page, PAGE_SIZE, 0,
3e170ce0
A
521 (const void *) src_vaddr);
522 }
523 /*
524 * ... and transfer the results to the destination page.
525 */
526 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 527 src_page->vmp_cs_validated);
3e170ce0 528 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 529 src_page->vmp_cs_tainted);
3e170ce0 530 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 531 src_page->vmp_cs_nx);
3e170ce0
A
532
533 /*
534 * page_decrypt() might access a mapped file, so let's release
535 * the object lock for the source page to avoid a potential
536 * deadlock. The source page is kept busy and we have a
537 * "paging_in_progress" reference on its object, so it's safe
538 * to unlock the object here.
539 */
d9a64523 540 assert(src_page->vmp_busy);
39037602
A
541 assert(src_page_object->paging_in_progress > 0);
542 vm_object_unlock(src_page_object);
0c530ab8
A
543
544 /*
545 * Decrypt the encrypted contents of the source page
546 * into the destination page.
547 */
fe8ab488 548 for (offset_in_page = 0;
0a7de745
A
549 offset_in_page < PAGE_SIZE;
550 offset_in_page += 4096) {
3e170ce0
A
551 if (offset + cur_offset + offset_in_page <
552 pager->crypto_start ||
553 offset + cur_offset + offset_in_page >=
554 pager->crypto_end) {
555 /* not encrypted: just copy */
556 bcopy((const char *)(src_vaddr +
0a7de745
A
557 offset_in_page),
558 (char *)(dst_vaddr + offset_in_page),
559 4096);
d9a64523 560
3e170ce0
A
561 if (apple_protect_pager_data_request_debug) {
562 printf("apple_protect_data_request"
0a7de745
A
563 "(%p,0x%llx+0x%llx+0x%04llx): "
564 "out of crypto range "
565 "[0x%llx:0x%llx]: "
566 "COPY [0x%016llx 0x%016llx] "
567 "code_signed=%d "
568 "cs_validated=%d "
569 "cs_tainted=%d "
570 "cs_nx=%d\n",
571 pager,
572 offset,
573 (uint64_t) cur_offset,
574 (uint64_t) offset_in_page,
575 pager->crypto_start,
576 pager->crypto_end,
577 *(uint64_t *)(dst_vaddr +
578 offset_in_page),
579 *(uint64_t *)(dst_vaddr +
580 offset_in_page + 8),
581 src_page_object->code_signed,
582 src_page->vmp_cs_validated,
583 src_page->vmp_cs_tainted,
584 src_page->vmp_cs_nx);
3e170ce0
A
585 }
586 ret = 0;
587 continue;
588 }
589 ret = pager->crypt_info->page_decrypt(
590 (const void *)(src_vaddr + offset_in_page),
591 (void *)(dst_vaddr + offset_in_page),
592 ((pager->crypto_backing_offset -
0a7de745
A
593 pager->crypto_start) + /* XXX ? */
594 offset +
595 cur_offset +
596 offset_in_page),
3e170ce0 597 pager->crypt_info->crypt_ops);
d9a64523 598
3e170ce0
A
599 if (apple_protect_pager_data_request_debug) {
600 printf("apple_protect_data_request"
0a7de745
A
601 "(%p,0x%llx+0x%llx+0x%04llx): "
602 "in crypto range [0x%llx:0x%llx]: "
603 "DECRYPT offset 0x%llx="
604 "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)"
605 "[0x%016llx 0x%016llx] "
606 "code_signed=%d "
607 "cs_validated=%d "
608 "cs_tainted=%d "
609 "cs_nx=%d "
610 "ret=0x%x\n",
611 pager,
612 offset,
613 (uint64_t) cur_offset,
614 (uint64_t) offset_in_page,
615 pager->crypto_start, pager->crypto_end,
616 ((pager->crypto_backing_offset -
617 pager->crypto_start) +
618 offset +
619 cur_offset +
620 offset_in_page),
621 pager->crypto_backing_offset,
622 pager->crypto_start,
623 offset,
624 (uint64_t) cur_offset,
625 (uint64_t) offset_in_page,
626 *(uint64_t *)(dst_vaddr + offset_in_page),
627 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
628 src_page_object->code_signed,
629 src_page->vmp_cs_validated,
630 src_page->vmp_cs_tainted,
631 src_page->vmp_cs_nx,
632 ret);
3e170ce0 633 }
fe8ab488
A
634 if (ret) {
635 break;
636 }
637 }
b0d623f7
A
638 if (ret) {
639 /*
640 * Decryption failed. Abort the fault.
641 */
642 retval = KERN_ABORTED;
b0d623f7 643 }
3e170ce0 644
39037602 645 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
d9a64523 646 assert(src_page->vmp_busy);
39037602
A
647 assert(src_page_object->paging_in_progress > 0);
648 vm_object_lock(src_page_object);
3e170ce0 649
2d21ac55
A
650 /*
651 * Cleanup the result of vm_fault_page() of the source page.
652 */
d9a64523 653 PAGE_WAKEUP_DONE(src_page);
39037602
A
654 src_page = VM_PAGE_NULL;
655 vm_object_paging_end(src_page_object);
656 vm_object_unlock(src_page_object);
d9a64523 657
2d21ac55 658 if (top_page != VM_PAGE_NULL) {
39037602
A
659 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
660 vm_object_lock(src_top_object);
2d21ac55 661 VM_PAGE_FREE(top_page);
39037602
A
662 vm_object_paging_end(src_top_object);
663 vm_object_unlock(src_top_object);
0c530ab8
A
664 }
665 }
666
0c530ab8 667done:
0c530ab8
A
668 if (upl != NULL) {
669 /* clean up the UPL */
670
671 /*
672 * The pages are currently dirty because we've just been
673 * writing on them, but as far as we're concerned, they're
674 * clean since they contain their "original" contents as
675 * provided by us, the pager.
676 * Tell the UPL to mark them "clean".
677 */
678 upl_clear_dirty(upl, TRUE);
679
680 /* abort or commit the UPL */
681 if (retval != KERN_SUCCESS) {
682 upl_abort(upl, 0);
b0d623f7 683 if (retval == KERN_ABORTED) {
0a7de745 684 wait_result_t wait_result;
b0d623f7
A
685
686 /*
687 * We aborted the fault and did not provide
688 * any contents for the requested pages but
689 * the pages themselves are not invalid, so
690 * let's return success and let the caller
691 * retry the fault, in case it might succeed
692 * later (when the decryption code is up and
693 * running in the kernel, for example).
694 */
695 retval = KERN_SUCCESS;
696 /*
697 * Wait a little bit first to avoid using
698 * too much CPU time retrying and failing
699 * the same fault over and over again.
700 */
701 wait_result = assert_wait_timeout(
702 (event_t) apple_protect_pager_data_request,
703 THREAD_UNINT,
0a7de745 704 10000, /* 10ms */
b0d623f7
A
705 NSEC_PER_USEC);
706 assert(wait_result == THREAD_WAITING);
707 wait_result = thread_block(THREAD_CONTINUE_NULL);
708 assert(wait_result == THREAD_TIMED_OUT);
709 }
0c530ab8 710 } else {
593a1d5f 711 boolean_t empty;
f427ee49
A
712 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
713 "upl %p offset 0x%llx size 0x%x",
714 upl, upl->u_offset, upl->u_size);
715 upl_commit_range(upl, 0, upl->u_size,
0a7de745
A
716 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
717 upl_pl, pl_count, &empty);
0c530ab8
A
718 }
719
720 /* and deallocate the UPL */
721 upl_deallocate(upl);
722 upl = NULL;
723 }
39037602
A
724 if (src_top_object != VM_OBJECT_NULL) {
725 vm_object_deallocate(src_top_object);
2d21ac55 726 }
0c530ab8
A
727 return retval;
728}
729
730/*
731 * apple_protect_pager_reference()
732 *
733 * Get a reference on this memory object.
734 * For external usage only. Assumes that the initial reference count is not 0,
735 * i.e one should not "revive" a dead pager this way.
736 */
737void
738apple_protect_pager_reference(
0a7de745
A
739 memory_object_t mem_obj)
740{
741 apple_protect_pager_t pager;
0c530ab8
A
742
743 pager = apple_protect_pager_lookup(mem_obj);
744
b0d623f7 745 lck_mtx_lock(&apple_protect_pager_lock);
0a7de745 746 os_ref_retain_locked(&pager->ref_count);
b0d623f7 747 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
748}
749
750
751/*
752 * apple_protect_pager_dequeue:
753 *
754 * Removes a pager from the list of pagers.
755 *
756 * The caller must hold "apple_protect_pager_lock".
757 */
758void
759apple_protect_pager_dequeue(
760 apple_protect_pager_t pager)
761{
762 assert(!pager->is_mapped);
763
764 queue_remove(&apple_protect_pager_queue,
0a7de745
A
765 pager,
766 apple_protect_pager_t,
767 pager_queue);
0c530ab8
A
768 pager->pager_queue.next = NULL;
769 pager->pager_queue.prev = NULL;
0a7de745 770
0c530ab8
A
771 apple_protect_pager_count--;
772}
773
774/*
775 * apple_protect_pager_terminate_internal:
776 *
777 * Trigger the asynchronous termination of the memory object associated
778 * with this pager.
779 * When the memory object is terminated, there will be one more call
780 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
781 * to finish the clean up.
782 *
783 * "apple_protect_pager_lock" should not be held by the caller.
784 * We don't need the lock because the pager has already been removed from
785 * the pagers' list and is now ours exclusively.
786 */
787void
788apple_protect_pager_terminate_internal(
789 apple_protect_pager_t pager)
790{
791 assert(pager->is_ready);
792 assert(!pager->is_mapped);
793
794 if (pager->backing_object != VM_OBJECT_NULL) {
795 vm_object_deallocate(pager->backing_object);
796 pager->backing_object = VM_OBJECT_NULL;
797 }
3e170ce0
A
798
799 /* one less pager using this "pager_crypt_info" */
800#if CRYPT_INFO_DEBUG
801 printf("CRYPT_INFO %s: deallocate %p ref %d\n",
0a7de745
A
802 __FUNCTION__,
803 pager->crypt_info,
804 pager->crypt_info->crypt_refcnt);
3e170ce0
A
805#endif /* CRYPT_INFO_DEBUG */
806 crypt_info_deallocate(pager->crypt_info);
807 pager->crypt_info = NULL;
6d2010ae
A
808
809 /* trigger the destruction of the memory object */
5ba3f43e 810 memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0);
0c530ab8
A
811}
812
813/*
814 * apple_protect_pager_deallocate_internal()
815 *
816 * Release a reference on this pager and free it when the last
817 * reference goes away.
818 * Can be called with apple_protect_pager_lock held or not but always returns
819 * with it unlocked.
820 */
821void
822apple_protect_pager_deallocate_internal(
0a7de745
A
823 apple_protect_pager_t pager,
824 boolean_t locked)
0c530ab8 825{
0a7de745
A
826 boolean_t needs_trimming;
827 int count_unmapped;
0c530ab8 828
0a7de745 829 if (!locked) {
b0d623f7 830 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
831 }
832
0a7de745
A
833 count_unmapped = (apple_protect_pager_count -
834 apple_protect_pager_count_mapped);
0c530ab8
A
835 if (count_unmapped > apple_protect_pager_cache_limit) {
836 /* we have too many unmapped pagers: trim some */
837 needs_trimming = TRUE;
838 } else {
839 needs_trimming = FALSE;
840 }
841
842 /* drop a reference on this pager */
0a7de745 843 os_ref_count_t ref_count = os_ref_release_locked(&pager->ref_count);
0c530ab8 844
0a7de745 845 if (ref_count == 1) {
0c530ab8
A
846 /*
847 * Only the "named" reference is left, which means that
2d21ac55 848 * no one is really holding on to this pager anymore.
0c530ab8
A
849 * Terminate it.
850 */
851 apple_protect_pager_dequeue(pager);
852 /* the pager is all ours: no need for the lock now */
b0d623f7 853 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8 854 apple_protect_pager_terminate_internal(pager);
0a7de745 855 } else if (ref_count == 0) {
0c530ab8
A
856 /*
857 * Dropped the existence reference; the memory object has
858 * been terminated. Do some final cleanup and release the
859 * pager structure.
860 */
b0d623f7 861 lck_mtx_unlock(&apple_protect_pager_lock);
5ba3f43e
A
862 if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
863 memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control);
864 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
0c530ab8 865 }
0a7de745 866 kfree(pager, sizeof(*pager));
0c530ab8
A
867 pager = APPLE_PROTECT_PAGER_NULL;
868 } else {
869 /* there are still plenty of references: keep going... */
b0d623f7 870 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
871 }
872
873 if (needs_trimming) {
874 apple_protect_pager_trim();
875 }
876 /* caution: lock is not held on return... */
877}
878
879/*
880 * apple_protect_pager_deallocate()
881 *
882 * Release a reference on this pager and free it when the last
883 * reference goes away.
884 */
885void
886apple_protect_pager_deallocate(
0a7de745 887 memory_object_t mem_obj)
0c530ab8 888{
0a7de745 889 apple_protect_pager_t pager;
0c530ab8 890
2d21ac55 891 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
0c530ab8
A
892 pager = apple_protect_pager_lookup(mem_obj);
893 apple_protect_pager_deallocate_internal(pager, FALSE);
894}
895
896/*
897 *
898 */
899kern_return_t
900apple_protect_pager_terminate(
901#if !DEBUG
902 __unused
903#endif
0a7de745 904 memory_object_t mem_obj)
0c530ab8 905{
2d21ac55 906 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
0c530ab8
A
907
908 return KERN_SUCCESS;
909}
910
911/*
912 *
913 */
914kern_return_t
915apple_protect_pager_synchronize(
0a7de745
A
916 __unused memory_object_t mem_obj,
917 __unused memory_object_offset_t offset,
918 __unused memory_object_size_t length,
919 __unused vm_sync_t sync_flags)
0c530ab8 920{
5ba3f43e
A
921 panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n");
922 return KERN_FAILURE;
0c530ab8
A
923}
924
925/*
926 * apple_protect_pager_map()
927 *
928 * This allows VM to let us, the EMM, know that this memory object
b0d623f7
A
929 * is currently mapped one or more times. This is called by VM each time
930 * the memory object gets mapped and we take one extra reference on the
0c530ab8
A
931 * memory object to account for all its mappings.
932 */
593a1d5f 933kern_return_t
0c530ab8 934apple_protect_pager_map(
0a7de745
A
935 memory_object_t mem_obj,
936 __unused vm_prot_t prot)
0c530ab8 937{
0a7de745 938 apple_protect_pager_t pager;
0c530ab8 939
2d21ac55 940 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
0c530ab8
A
941
942 pager = apple_protect_pager_lookup(mem_obj);
943
b0d623f7 944 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8 945 assert(pager->is_ready);
0a7de745 946 assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */
0c530ab8
A
947 if (pager->is_mapped == FALSE) {
948 /*
949 * First mapping of this pager: take an extra reference
950 * that will remain until all the mappings of this pager
951 * are removed.
952 */
953 pager->is_mapped = TRUE;
0a7de745 954 os_ref_retain_locked(&pager->ref_count);
0c530ab8
A
955 apple_protect_pager_count_mapped++;
956 }
b0d623f7 957 lck_mtx_unlock(&apple_protect_pager_lock);
593a1d5f
A
958
959 return KERN_SUCCESS;
0c530ab8
A
960}
961
962/*
593a1d5f 963 * apple_protect_pager_last_unmap()
0c530ab8
A
964 *
965 * This is called by VM when this memory object is no longer mapped anywhere.
966 */
967kern_return_t
593a1d5f 968apple_protect_pager_last_unmap(
0a7de745 969 memory_object_t mem_obj)
0c530ab8 970{
0a7de745
A
971 apple_protect_pager_t pager;
972 int count_unmapped;
0c530ab8 973
593a1d5f 974 PAGER_DEBUG(PAGER_ALL,
0a7de745 975 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
0c530ab8
A
976
977 pager = apple_protect_pager_lookup(mem_obj);
978
b0d623f7 979 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
980 if (pager->is_mapped) {
981 /*
982 * All the mappings are gone, so let go of the one extra
983 * reference that represents all the mappings of this pager.
984 */
985 apple_protect_pager_count_mapped--;
986 count_unmapped = (apple_protect_pager_count -
0a7de745 987 apple_protect_pager_count_mapped);
0c530ab8
A
988 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
989 apple_protect_pager_count_unmapped_max = count_unmapped;
990 }
991 pager->is_mapped = FALSE;
992 apple_protect_pager_deallocate_internal(pager, TRUE);
993 /* caution: deallocate_internal() released the lock ! */
994 } else {
b0d623f7 995 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8 996 }
0a7de745 997
0c530ab8
A
998 return KERN_SUCCESS;
999}
1000
a991bd8d
A
1001boolean_t
1002apple_protect_pager_backing_object(
1003 memory_object_t mem_obj,
1004 memory_object_offset_t offset,
1005 vm_object_t *backing_object,
1006 vm_object_offset_t *backing_offset)
1007{
1008 apple_protect_pager_t pager;
1009
1010 PAGER_DEBUG(PAGER_ALL,
1011 ("apple_protect_pager_backing_object: %p\n", mem_obj));
1012
1013 pager = apple_protect_pager_lookup(mem_obj);
1014
1015 *backing_object = pager->backing_object;
1016 *backing_offset = pager->backing_offset + offset;
1017
1018 return TRUE;
1019}
0c530ab8
A
1020
1021/*
1022 *
1023 */
1024apple_protect_pager_t
1025apple_protect_pager_lookup(
0a7de745 1026 memory_object_t mem_obj)
0c530ab8 1027{
0a7de745 1028 apple_protect_pager_t pager;
0c530ab8 1029
5ba3f43e 1030 assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops);
d9a64523 1031 pager = (apple_protect_pager_t)(uintptr_t) mem_obj;
0a7de745 1032 assert(os_ref_get_count(&pager->ref_count) > 0);
0c530ab8
A
1033 return pager;
1034}
1035
1036apple_protect_pager_t
1037apple_protect_pager_create(
0a7de745
A
1038 vm_object_t backing_object,
1039 vm_object_offset_t backing_offset,
1040 vm_object_offset_t crypto_backing_offset,
3e170ce0 1041 struct pager_crypt_info *crypt_info,
0a7de745
A
1042 vm_object_offset_t crypto_start,
1043 vm_object_offset_t crypto_end)
0c530ab8 1044{
0a7de745
A
1045 apple_protect_pager_t pager, pager2;
1046 memory_object_control_t control;
1047 kern_return_t kr;
1048 struct pager_crypt_info *old_crypt_info;
0c530ab8 1049
0a7de745 1050 pager = (apple_protect_pager_t) kalloc(sizeof(*pager));
0c530ab8
A
1051 if (pager == APPLE_PROTECT_PAGER_NULL) {
1052 return APPLE_PROTECT_PAGER_NULL;
1053 }
1054
1055 /*
1056 * The vm_map call takes both named entry ports and raw memory
1057 * objects in the same parameter. We need to make sure that
1058 * vm_map does not see this object as a named entry port. So,
b0d623f7 1059 * we reserve the first word in the object for a fake ip_kotype
0c530ab8
A
1060 * setting - that will tell vm_map to use it as a memory object.
1061 */
5ba3f43e
A
1062 pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
1063 pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops;
1064 pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1065
0c530ab8 1066 pager->is_ready = FALSE;/* not ready until it has a "name" */
0a7de745 1067 os_ref_init_count(&pager->ref_count, NULL, 2); /* existence reference (for the cache) and another for the caller */
0c530ab8 1068 pager->is_mapped = FALSE;
0c530ab8 1069 pager->backing_object = backing_object;
3e170ce0
A
1070 pager->backing_offset = backing_offset;
1071 pager->crypto_backing_offset = crypto_backing_offset;
1072 pager->crypto_start = crypto_start;
1073 pager->crypto_end = crypto_end;
1074 pager->crypt_info = crypt_info; /* allocated by caller */
1075
1076#if CRYPT_INFO_DEBUG
1077 printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n",
0a7de745
A
1078 __FUNCTION__,
1079 crypt_info,
1080 crypt_info->page_decrypt,
1081 crypt_info->crypt_end,
1082 crypt_info->crypt_ops,
1083 crypt_info->crypt_refcnt);
3e170ce0 1084#endif /* CRYPT_INFO_DEBUG */
0a7de745 1085
0c530ab8
A
1086 vm_object_reference(backing_object);
1087
3e170ce0
A
1088 old_crypt_info = NULL;
1089
b0d623f7 1090 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1091 /* see if anyone raced us to create a pager for the same object */
1092 queue_iterate(&apple_protect_pager_queue,
0a7de745
A
1093 pager2,
1094 apple_protect_pager_t,
1095 pager_queue) {
3e170ce0 1096 if ((pager2->crypt_info->page_decrypt !=
0a7de745 1097 crypt_info->page_decrypt) ||
3e170ce0 1098 (pager2->crypt_info->crypt_end !=
0a7de745 1099 crypt_info->crypt_end) ||
3e170ce0 1100 (pager2->crypt_info->crypt_ops !=
0a7de745 1101 crypt_info->crypt_ops)) {
3e170ce0
A
1102 /* crypt_info contents do not match: next pager */
1103 continue;
1104 }
1105
1106 /* found a match for crypt_info ... */
1107 if (old_crypt_info) {
1108 /* ... already switched to that crypt_info */
1109 assert(old_crypt_info == pager2->crypt_info);
1110 } else if (pager2->crypt_info != crypt_info) {
1111 /* ... switch to that pager's crypt_info */
1112#if CRYPT_INFO_DEBUG
1113 printf("CRYPT_INFO %s: reference %p ref %d "
0a7de745
A
1114 "(create match)\n",
1115 __FUNCTION__,
1116 pager2->crypt_info,
1117 pager2->crypt_info->crypt_refcnt);
3e170ce0
A
1118#endif /* CRYPT_INFO_DEBUG */
1119 old_crypt_info = pager2->crypt_info;
1120 crypt_info_reference(old_crypt_info);
1121 pager->crypt_info = old_crypt_info;
1122 }
0a7de745 1123
3e170ce0
A
1124 if (pager2->backing_object == backing_object &&
1125 pager2->backing_offset == backing_offset &&
1126 pager2->crypto_backing_offset == crypto_backing_offset &&
1127 pager2->crypto_start == crypto_start &&
1128 pager2->crypto_end == crypto_end) {
1129 /* full match: use that pager */
0c530ab8
A
1130 break;
1131 }
1132 }
0a7de745
A
1133 if (!queue_end(&apple_protect_pager_queue,
1134 (queue_entry_t) pager2)) {
0c530ab8 1135 /* we lost the race, down with the loser... */
b0d623f7 1136 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1137 vm_object_deallocate(pager->backing_object);
1138 pager->backing_object = VM_OBJECT_NULL;
3e170ce0
A
1139#if CRYPT_INFO_DEBUG
1140 printf("CRYPT_INFO %s: %p ref %d (create pager match)\n",
0a7de745
A
1141 __FUNCTION__,
1142 pager->crypt_info,
1143 pager->crypt_info->crypt_refcnt);
3e170ce0
A
1144#endif /* CRYPT_INFO_DEBUG */
1145 crypt_info_deallocate(pager->crypt_info);
1146 pager->crypt_info = NULL;
0a7de745 1147 kfree(pager, sizeof(*pager));
0c530ab8
A
1148 /* ... and go with the winner */
1149 pager = pager2;
1150 /* let the winner make sure the pager gets ready */
1151 return pager;
1152 }
1153
1154 /* enter new pager at the head of our list of pagers */
1155 queue_enter_first(&apple_protect_pager_queue,
0a7de745
A
1156 pager,
1157 apple_protect_pager_t,
1158 pager_queue);
0c530ab8
A
1159 apple_protect_pager_count++;
1160 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1161 apple_protect_pager_count_max = apple_protect_pager_count;
1162 }
b0d623f7 1163 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1164
1165 kr = memory_object_create_named((memory_object_t) pager,
0a7de745
A
1166 0,
1167 &control);
0c530ab8
A
1168 assert(kr == KERN_SUCCESS);
1169
cb323159
A
1170 memory_object_mark_trusted(control);
1171
b0d623f7 1172 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1173 /* the new pager is now ready to be used */
1174 pager->is_ready = TRUE;
b0d623f7 1175 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1176
1177 /* wakeup anyone waiting for this pager to be ready */
1178 thread_wakeup(&pager->is_ready);
1179
3e170ce0
A
1180 if (old_crypt_info != NULL &&
1181 old_crypt_info != crypt_info) {
1182 /* we re-used an old crypt_info instead of using our new one */
1183#if CRYPT_INFO_DEBUG
1184 printf("CRYPT_INFO %s: deallocate %p ref %d "
0a7de745
A
1185 "(create used old)\n",
1186 __FUNCTION__,
1187 crypt_info,
1188 crypt_info->crypt_refcnt);
3e170ce0
A
1189#endif /* CRYPT_INFO_DEBUG */
1190 crypt_info_deallocate(crypt_info);
1191 crypt_info = NULL;
1192 }
1193
0c530ab8
A
1194 return pager;
1195}
1196
1197/*
1198 * apple_protect_pager_setup()
1199 *
1200 * Provide the caller with a memory object backed by the provided
1201 * "backing_object" VM object. If such a memory object already exists,
1202 * re-use it, otherwise create a new memory object.
1203 */
1204memory_object_t
1205apple_protect_pager_setup(
0a7de745
A
1206 vm_object_t backing_object,
1207 vm_object_offset_t backing_offset,
1208 vm_object_offset_t crypto_backing_offset,
3e170ce0 1209 struct pager_crypt_info *crypt_info,
0a7de745
A
1210 vm_object_offset_t crypto_start,
1211 vm_object_offset_t crypto_end)
0c530ab8 1212{
0a7de745
A
1213 apple_protect_pager_t pager;
1214 struct pager_crypt_info *old_crypt_info, *new_crypt_info;
3e170ce0
A
1215
1216#if CRYPT_INFO_DEBUG
1217 printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n",
0a7de745
A
1218 __FUNCTION__,
1219 crypt_info,
1220 crypt_info->page_decrypt,
1221 crypt_info->crypt_end,
1222 crypt_info->crypt_ops,
1223 crypt_info->crypt_refcnt);
3e170ce0
A
1224#endif /* CRYPT_INFO_DEBUG */
1225
1226 old_crypt_info = NULL;
0c530ab8 1227
b0d623f7 1228 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1229
1230 queue_iterate(&apple_protect_pager_queue,
0a7de745
A
1231 pager,
1232 apple_protect_pager_t,
1233 pager_queue) {
3e170ce0 1234 if ((pager->crypt_info->page_decrypt !=
0a7de745 1235 crypt_info->page_decrypt) ||
3e170ce0 1236 (pager->crypt_info->crypt_end !=
0a7de745 1237 crypt_info->crypt_end) ||
3e170ce0 1238 (pager->crypt_info->crypt_ops !=
0a7de745 1239 crypt_info->crypt_ops)) {
3e170ce0
A
1240 /* no match for "crypt_info": next pager */
1241 continue;
1242 }
1243 /* found a match for crypt_info ... */
1244 if (old_crypt_info) {
1245 /* ... already switched to that crypt_info */
1246 assert(old_crypt_info == pager->crypt_info);
1247 } else {
1248 /* ... switch to that pager's crypt_info */
1249 old_crypt_info = pager->crypt_info;
1250#if CRYPT_INFO_DEBUG
1251 printf("CRYPT_INFO %s: "
0a7de745
A
1252 "switching crypt_info from %p [%p,%p,%p,%d] "
1253 "to %p [%p,%p,%p,%d] from pager %p\n",
1254 __FUNCTION__,
1255 crypt_info,
1256 crypt_info->page_decrypt,
1257 crypt_info->crypt_end,
1258 crypt_info->crypt_ops,
1259 crypt_info->crypt_refcnt,
1260 old_crypt_info,
1261 old_crypt_info->page_decrypt,
1262 old_crypt_info->crypt_end,
1263 old_crypt_info->crypt_ops,
1264 old_crypt_info->crypt_refcnt,
1265 pager);
3e170ce0 1266 printf("CRYPT_INFO %s: %p ref %d (setup match)\n",
0a7de745
A
1267 __FUNCTION__,
1268 pager->crypt_info,
1269 pager->crypt_info->crypt_refcnt);
3e170ce0
A
1270#endif /* CRYPT_INFO_DEBUG */
1271 crypt_info_reference(pager->crypt_info);
1272 }
0a7de745 1273
3e170ce0
A
1274 if (pager->backing_object == backing_object &&
1275 pager->backing_offset == backing_offset &&
1276 pager->crypto_backing_offset == crypto_backing_offset &&
1277 pager->crypto_start == crypto_start &&
1278 pager->crypto_end == crypto_end) {
1279 /* full match: use that pager! */
1280 assert(old_crypt_info == pager->crypt_info);
1281 assert(old_crypt_info->crypt_refcnt > 1);
1282#if CRYPT_INFO_DEBUG
1283 printf("CRYPT_INFO %s: "
0a7de745
A
1284 "pager match with %p crypt_info %p\n",
1285 __FUNCTION__,
1286 pager,
1287 pager->crypt_info);
3e170ce0 1288 printf("CRYPT_INFO %s: deallocate %p ref %d "
0a7de745
A
1289 "(pager match)\n",
1290 __FUNCTION__,
1291 old_crypt_info,
1292 old_crypt_info->crypt_refcnt);
3e170ce0
A
1293#endif /* CRYPT_INFO_DEBUG */
1294 /* release the extra ref on crypt_info we got above */
1295 crypt_info_deallocate(old_crypt_info);
1296 assert(old_crypt_info->crypt_refcnt > 0);
1297 /* give extra reference on pager to the caller */
0a7de745 1298 os_ref_retain_locked(&pager->ref_count);
0c530ab8
A
1299 break;
1300 }
1301 }
1302 if (queue_end(&apple_protect_pager_queue,
0a7de745 1303 (queue_entry_t) pager)) {
3e170ce0 1304 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1305 /* no existing pager for this backing object */
1306 pager = APPLE_PROTECT_PAGER_NULL;
3e170ce0
A
1307 if (old_crypt_info) {
1308 /* use this old crypt_info for new pager */
1309 new_crypt_info = old_crypt_info;
1310#if CRYPT_INFO_DEBUG
1311 printf("CRYPT_INFO %s: "
0a7de745
A
1312 "will use old_crypt_info %p for new pager\n",
1313 __FUNCTION__,
1314 old_crypt_info);
3e170ce0
A
1315#endif /* CRYPT_INFO_DEBUG */
1316 } else {
1317 /* allocate a new crypt_info for new pager */
0a7de745 1318 new_crypt_info = kalloc(sizeof(*new_crypt_info));
3e170ce0
A
1319 *new_crypt_info = *crypt_info;
1320 new_crypt_info->crypt_refcnt = 1;
1321#if CRYPT_INFO_DEBUG
1322 printf("CRYPT_INFO %s: "
0a7de745
A
1323 "will use new_crypt_info %p for new pager\n",
1324 __FUNCTION__,
1325 new_crypt_info);
3e170ce0
A
1326#endif /* CRYPT_INFO_DEBUG */
1327 }
1328 if (new_crypt_info == NULL) {
1329 /* can't create new pager without a crypt_info */
1330 } else {
1331 /* create new pager */
1332 pager = apple_protect_pager_create(
1333 backing_object,
1334 backing_offset,
1335 crypto_backing_offset,
1336 new_crypt_info,
1337 crypto_start,
1338 crypto_end);
1339 }
0c530ab8 1340 if (pager == APPLE_PROTECT_PAGER_NULL) {
3e170ce0
A
1341 /* could not create a new pager */
1342 if (new_crypt_info == old_crypt_info) {
1343 /* release extra reference on old_crypt_info */
1344#if CRYPT_INFO_DEBUG
1345 printf("CRYPT_INFO %s: deallocate %p ref %d "
0a7de745
A
1346 "(create fail old_crypt_info)\n",
1347 __FUNCTION__,
1348 old_crypt_info,
1349 old_crypt_info->crypt_refcnt);
3e170ce0
A
1350#endif /* CRYPT_INFO_DEBUG */
1351 crypt_info_deallocate(old_crypt_info);
1352 old_crypt_info = NULL;
1353 } else {
1354 /* release unused new_crypt_info */
1355 assert(new_crypt_info->crypt_refcnt == 1);
1356#if CRYPT_INFO_DEBUG
1357 printf("CRYPT_INFO %s: deallocate %p ref %d "
0a7de745
A
1358 "(create fail new_crypt_info)\n",
1359 __FUNCTION__,
1360 new_crypt_info,
1361 new_crypt_info->crypt_refcnt);
3e170ce0
A
1362#endif /* CRYPT_INFO_DEBUG */
1363 crypt_info_deallocate(new_crypt_info);
1364 new_crypt_info = NULL;
1365 }
0c530ab8
A
1366 return MEMORY_OBJECT_NULL;
1367 }
3e170ce0
A
1368 lck_mtx_lock(&apple_protect_pager_lock);
1369 } else {
1370 assert(old_crypt_info == pager->crypt_info);
0c530ab8
A
1371 }
1372
0c530ab8 1373 while (!pager->is_ready) {
b0d623f7 1374 lck_mtx_sleep(&apple_protect_pager_lock,
0a7de745
A
1375 LCK_SLEEP_DEFAULT,
1376 &pager->is_ready,
1377 THREAD_UNINT);
0c530ab8 1378 }
b0d623f7 1379 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1380
1381 return (memory_object_t) pager;
0a7de745 1382}
0c530ab8
A
1383
1384void
1385apple_protect_pager_trim(void)
1386{
0a7de745
A
1387 apple_protect_pager_t pager, prev_pager;
1388 queue_head_t trim_queue;
1389 int num_trim;
1390 int count_unmapped;
0c530ab8 1391
b0d623f7 1392 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1393
1394 /*
1395 * We have too many pagers, try and trim some unused ones,
1396 * starting with the oldest pager at the end of the queue.
1397 */
1398 queue_init(&trim_queue);
1399 num_trim = 0;
1400
1401 for (pager = (apple_protect_pager_t)
0a7de745
A
1402 queue_last(&apple_protect_pager_queue);
1403 !queue_end(&apple_protect_pager_queue,
1404 (queue_entry_t) pager);
1405 pager = prev_pager) {
0c530ab8
A
1406 /* get prev elt before we dequeue */
1407 prev_pager = (apple_protect_pager_t)
0a7de745 1408 queue_prev(&pager->pager_queue);
0c530ab8 1409
0a7de745 1410 if (os_ref_get_count(&pager->ref_count) == 2 &&
0c530ab8
A
1411 pager->is_ready &&
1412 !pager->is_mapped) {
1413 /* this pager can be trimmed */
1414 num_trim++;
1415 /* remove this pager from the main list ... */
1416 apple_protect_pager_dequeue(pager);
1417 /* ... and add it to our trim queue */
1418 queue_enter_first(&trim_queue,
0a7de745
A
1419 pager,
1420 apple_protect_pager_t,
1421 pager_queue);
0c530ab8
A
1422
1423 count_unmapped = (apple_protect_pager_count -
0a7de745 1424 apple_protect_pager_count_mapped);
0c530ab8
A
1425 if (count_unmapped <= apple_protect_pager_cache_limit) {
1426 /* we have enough pagers to trim */
1427 break;
1428 }
1429 }
1430 }
1431 if (num_trim > apple_protect_pager_num_trim_max) {
1432 apple_protect_pager_num_trim_max = num_trim;
1433 }
1434 apple_protect_pager_num_trim_total += num_trim;
1435
b0d623f7 1436 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1437
1438 /* terminate the trimmed pagers */
1439 while (!queue_empty(&trim_queue)) {
1440 queue_remove_first(&trim_queue,
0a7de745
A
1441 pager,
1442 apple_protect_pager_t,
1443 pager_queue);
0c530ab8
A
1444 pager->pager_queue.next = NULL;
1445 pager->pager_queue.prev = NULL;
0c530ab8
A
1446 /*
1447 * We can't call deallocate_internal() because the pager
1448 * has already been dequeued, but we still need to remove
1449 * a reference.
1450 */
0a7de745
A
1451 os_ref_count_t __assert_only count = os_ref_release_locked(&pager->ref_count);
1452 assert(count == 1);
0c530ab8
A
1453 apple_protect_pager_terminate_internal(pager);
1454 }
1455}
3e170ce0
A
1456
1457
1458void
1459crypt_info_reference(
1460 struct pager_crypt_info *crypt_info)
1461{
1462 assert(crypt_info->crypt_refcnt != 0);
1463#if CRYPT_INFO_DEBUG
1464 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
0a7de745
A
1465 __FUNCTION__,
1466 crypt_info,
1467 crypt_info->crypt_refcnt,
1468 crypt_info->crypt_refcnt + 1);
3e170ce0
A
1469#endif /* CRYPT_INFO_DEBUG */
1470 OSAddAtomic(+1, &crypt_info->crypt_refcnt);
1471}
1472
1473void
1474crypt_info_deallocate(
0a7de745 1475 struct pager_crypt_info *crypt_info)
3e170ce0
A
1476{
1477#if CRYPT_INFO_DEBUG
1478 printf("CRYPT_INFO %s: %p ref %d -> %d\n",
0a7de745
A
1479 __FUNCTION__,
1480 crypt_info,
1481 crypt_info->crypt_refcnt,
1482 crypt_info->crypt_refcnt - 1);
3e170ce0
A
1483#endif /* CRYPT_INFO_DEBUG */
1484 OSAddAtomic(-1, &crypt_info->crypt_refcnt);
1485 if (crypt_info->crypt_refcnt == 0) {
1486 /* deallocate any crypt module data */
1487 if (crypt_info->crypt_end) {
1488 crypt_info->crypt_end(crypt_info->crypt_ops);
1489 crypt_info->crypt_end = NULL;
1490 }
1491#if CRYPT_INFO_DEBUG
1492 printf("CRYPT_INFO %s: freeing %p\n",
0a7de745
A
1493 __FUNCTION__,
1494 crypt_info);
3e170ce0 1495#endif /* CRYPT_INFO_DEBUG */
0a7de745 1496 kfree(crypt_info, sizeof(*crypt_info));
3e170ce0
A
1497 crypt_info = NULL;
1498 }
1499}