]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_apple_protect.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
CommitLineData
0c530ab8
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/page_decrypt.h>
46#include <kern/queue.h>
47#include <kern/thread.h>
48
49#include <ipc/ipc_port.h>
50#include <ipc/ipc_space.h>
51
52#include <default_pager/default_pager_types.h>
53#include <default_pager/default_pager_object_server.h>
54
2d21ac55 55#include <vm/vm_fault.h>
0c530ab8
A
56#include <vm/vm_map.h>
57#include <vm/vm_pageout.h>
58#include <vm/memory_object.h>
59#include <vm/vm_pageout.h>
60#include <vm/vm_protos.h>
61
62
63/*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82/* forward declarations */
83void apple_protect_pager_reference(memory_object_t mem_obj);
84void apple_protect_pager_deallocate(memory_object_t mem_obj);
85kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
b0d623f7 87 memory_object_cluster_size_t pg_size);
0c530ab8
A
88kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
b0d623f7 91 memory_object_cluster_size_t length,
2d21ac55
A
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
0c530ab8
A
94kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
b0d623f7 96 memory_object_cluster_size_t data_cnt,
0c530ab8
A
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
b0d623f7 104 memory_object_cluster_size_t data_cnt);
0c530ab8
A
105kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
b0d623f7 107 memory_object_size_t size,
0c530ab8
A
108 vm_prot_t desired_access);
109kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
b0d623f7 111 memory_object_size_t length,
0c530ab8 112 vm_sync_t sync_flags);
593a1d5f
A
113kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
0c530ab8
A
116
117/*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121const struct memory_object_pager_ops apple_protect_pager_ops = {
122 apple_protect_pager_reference,
123 apple_protect_pager_deallocate,
124 apple_protect_pager_init,
125 apple_protect_pager_terminate,
126 apple_protect_pager_data_request,
127 apple_protect_pager_data_return,
128 apple_protect_pager_data_initialize,
129 apple_protect_pager_data_unlock,
130 apple_protect_pager_synchronize,
593a1d5f
A
131 apple_protect_pager_map,
132 apple_protect_pager_last_unmap,
0c530ab8
A
133 "apple protect pager"
134};
135
136/*
137 * The "apple_protect_pager" describes a memory object backed by
138 * the "apple protect" EMM.
139 */
140typedef struct apple_protect_pager {
b0d623f7 141 struct ipc_object_header pager_header; /* fake ip_kotype() */
0c530ab8 142 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
0c530ab8
A
143 queue_chain_t pager_queue; /* next & prev pagers */
144 unsigned int ref_count; /* reference count */
145 boolean_t is_ready; /* is this pager ready ? */
146 boolean_t is_mapped; /* is this mem_obj mapped ? */
147 memory_object_control_t pager_control; /* mem object control handle */
148 vm_object_t backing_object; /* VM obj w/ encrypted data */
593a1d5f 149 struct pager_crypt_info crypt;
0c530ab8
A
150} *apple_protect_pager_t;
151#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
b0d623f7 152#define pager_ikot pager_header.io_bits
0c530ab8
A
153
154/*
155 * List of memory objects managed by this EMM.
156 * The list is protected by the "apple_protect_pager_lock" lock.
157 */
158int apple_protect_pager_count = 0; /* number of pagers */
159int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
160queue_head_t apple_protect_pager_queue;
b0d623f7 161decl_lck_mtx_data(,apple_protect_pager_lock)
0c530ab8
A
162
163/*
164 * Maximum number of unmapped pagers we're willing to keep around.
165 */
166int apple_protect_pager_cache_limit = 10;
167
168/*
169 * Statistics & counters.
170 */
171int apple_protect_pager_count_max = 0;
172int apple_protect_pager_count_unmapped_max = 0;
173int apple_protect_pager_num_trim_max = 0;
174int apple_protect_pager_num_trim_total = 0;
175
b0d623f7
A
176
177lck_grp_t apple_protect_pager_lck_grp;
178lck_grp_attr_t apple_protect_pager_lck_grp_attr;
179lck_attr_t apple_protect_pager_lck_attr;
180
181
0c530ab8 182/* internal prototypes */
593a1d5f
A
183apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object,
184 struct pager_crypt_info *crypt_info);
0c530ab8
A
185apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
186void apple_protect_pager_dequeue(apple_protect_pager_t pager);
187void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
188 boolean_t locked);
189void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
190void apple_protect_pager_trim(void);
191
192
193#if DEBUG
194int apple_protect_pagerdebug = 0;
195#define PAGER_ALL 0xffffffff
196#define PAGER_INIT 0x00000001
197#define PAGER_PAGEIN 0x00000002
198
199#define PAGER_DEBUG(LEVEL, A) \
200 MACRO_BEGIN \
201 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
202 printf A; \
203 } \
204 MACRO_END
205#else
206#define PAGER_DEBUG(LEVEL, A)
207#endif
208
209
210void
211apple_protect_pager_bootstrap(void)
212{
b0d623f7
A
213 lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
214 lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
215 lck_attr_setdefault(&apple_protect_pager_lck_attr);
216 lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
0c530ab8
A
217 queue_init(&apple_protect_pager_queue);
218}
219
220/*
221 * apple_protect_pager_init()
222 *
223 * Initialize the memory object and makes it ready to be used and mapped.
224 */
225kern_return_t
226apple_protect_pager_init(
227 memory_object_t mem_obj,
228 memory_object_control_t control,
229#if !DEBUG
230 __unused
231#endif
b0d623f7 232 memory_object_cluster_size_t pg_size)
0c530ab8
A
233{
234 apple_protect_pager_t pager;
235 kern_return_t kr;
236 memory_object_attr_info_data_t attributes;
237
238 PAGER_DEBUG(PAGER_ALL,
239 ("apple_protect_pager_init: %p, %p, %x\n",
240 mem_obj, control, pg_size));
241
242 if (control == MEMORY_OBJECT_CONTROL_NULL)
243 return KERN_INVALID_ARGUMENT;
244
245 pager = apple_protect_pager_lookup(mem_obj);
246
247 memory_object_control_reference(control);
248
249 pager->pager_control = control;
250
251 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
252 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
253 attributes.cluster_size = (1 << (PAGE_SHIFT));
254 attributes.may_cache_object = FALSE;
255 attributes.temporary = TRUE;
256
257 kr = memory_object_change_attributes(
258 control,
259 MEMORY_OBJECT_ATTRIBUTE_INFO,
260 (memory_object_info_t) &attributes,
261 MEMORY_OBJECT_ATTR_INFO_COUNT);
262 if (kr != KERN_SUCCESS)
263 panic("apple_protect_pager_init: "
264 "memory_object_change_attributes() failed");
265
266 return KERN_SUCCESS;
267}
268
269/*
270 * apple_protect_data_return()
271 *
272 * Handles page-out requests from VM. This should never happen since
273 * the pages provided by this EMM are not supposed to be dirty or dirtied
274 * and VM should simply discard the contents and reclaim the pages if it
275 * needs to.
276 */
277kern_return_t
278apple_protect_pager_data_return(
279 __unused memory_object_t mem_obj,
280 __unused memory_object_offset_t offset,
b0d623f7 281 __unused memory_object_cluster_size_t data_cnt,
0c530ab8
A
282 __unused memory_object_offset_t *resid_offset,
283 __unused int *io_error,
284 __unused boolean_t dirty,
285 __unused boolean_t kernel_copy,
286 __unused int upl_flags)
287{
288 panic("apple_protect_pager_data_return: should never get called");
289 return KERN_FAILURE;
290}
291
292kern_return_t
293apple_protect_pager_data_initialize(
294 __unused memory_object_t mem_obj,
295 __unused memory_object_offset_t offset,
b0d623f7 296 __unused memory_object_cluster_size_t data_cnt)
0c530ab8
A
297{
298 panic("apple_protect_pager_data_initialize: should never get called");
299 return KERN_FAILURE;
300}
301
302kern_return_t
303apple_protect_pager_data_unlock(
304 __unused memory_object_t mem_obj,
305 __unused memory_object_offset_t offset,
b0d623f7 306 __unused memory_object_size_t size,
0c530ab8
A
307 __unused vm_prot_t desired_access)
308{
309 return KERN_FAILURE;
310}
311
312/*
313 * apple_protect_pager_data_request()
314 *
315 * Handles page-in requests from VM.
316 */
317kern_return_t
318apple_protect_pager_data_request(
319 memory_object_t mem_obj,
320 memory_object_offset_t offset,
b0d623f7 321 memory_object_cluster_size_t length,
0c530ab8
A
322#if !DEBUG
323 __unused
324#endif
2d21ac55
A
325 vm_prot_t protection_required,
326 memory_object_fault_info_t mo_fault_info)
0c530ab8
A
327{
328 apple_protect_pager_t pager;
329 memory_object_control_t mo_control;
2d21ac55 330 upl_t upl;
0c530ab8
A
331 int upl_flags;
332 upl_size_t upl_size;
b0d623f7 333 upl_page_info_t *upl_pl;
593a1d5f 334 unsigned int pl_count;
0c530ab8
A
335 vm_object_t src_object, dst_object;
336 kern_return_t kr, retval;
2d21ac55 337 vm_map_offset_t kernel_mapping;
0c530ab8
A
338 vm_offset_t src_vaddr, dst_vaddr;
339 vm_offset_t cur_offset;
0c530ab8 340 vm_map_entry_t map_entry;
2d21ac55
A
341 kern_return_t error_code;
342 vm_prot_t prot;
343 vm_page_t src_page, top_page;
344 int interruptible;
b0d623f7
A
345 struct vm_object_fault_info fault_info;
346 int ret;
2d21ac55
A
347
348 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
0c530ab8 349
b0d623f7 350 retval = KERN_SUCCESS;
2d21ac55
A
351 src_object = VM_OBJECT_NULL;
352 kernel_mapping = 0;
353 upl = NULL;
593a1d5f 354 upl_pl = NULL;
b0d623f7
A
355 fault_info = *((struct vm_object_fault_info *) mo_fault_info);
356 fault_info.stealth = TRUE;
0b4c1975 357 fault_info.mark_zf_absent = FALSE;
b0d623f7 358 interruptible = fault_info.interruptible;
0c530ab8
A
359
360 pager = apple_protect_pager_lookup(mem_obj);
361 assert(pager->is_ready);
362 assert(pager->ref_count > 1); /* pager is alive and mapped */
363
2d21ac55 364 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
0c530ab8
A
365
366 /*
367 * Gather in a UPL all the VM pages requested by VM.
368 */
369 mo_control = pager->pager_control;
370
371 upl_size = length;
372 upl_flags =
373 UPL_RET_ONLY_ABSENT |
374 UPL_SET_LITE |
375 UPL_NO_SYNC |
376 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
377 UPL_SET_INTERNAL;
593a1d5f 378 pl_count = 0;
0c530ab8
A
379 kr = memory_object_upl_request(mo_control,
380 offset, upl_size,
381 &upl, NULL, NULL, upl_flags);
382 if (kr != KERN_SUCCESS) {
383 retval = kr;
384 goto done;
385 }
2d21ac55
A
386 dst_object = mo_control->moc_object;
387 assert(dst_object != VM_OBJECT_NULL);
388
0c530ab8
A
389
390 /*
2d21ac55
A
391 * Reserve 2 virtual pages in the kernel address space to map each
392 * source and destination physical pages when it's their turn to
393 * be processed.
0c530ab8 394 */
0c530ab8
A
395 vm_object_reference(kernel_object); /* ref. for mapping */
396 kr = vm_map_find_space(kernel_map,
2d21ac55
A
397 &kernel_mapping,
398 2 * PAGE_SIZE_64,
0c530ab8
A
399 0,
400 0,
401 &map_entry);
402 if (kr != KERN_SUCCESS) {
403 vm_object_deallocate(kernel_object);
404 retval = kr;
405 goto done;
406 }
407 map_entry->object.vm_object = kernel_object;
b0d623f7 408 map_entry->offset = kernel_mapping;
0c530ab8 409 vm_map_unlock(kernel_map);
2d21ac55
A
410 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
411 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
412
413 /*
414 * We'll map the encrypted data in the kernel address space from the
415 * backing VM object (itself backed by the encrypted file via
416 * the vnode pager).
417 */
418 src_object = pager->backing_object;
419 assert(src_object != VM_OBJECT_NULL);
420 vm_object_reference(src_object); /* to keep the source object alive */
0c530ab8
A
421
422 /*
423 * Fill in the contents of the pages requested by VM.
424 */
425 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
593a1d5f 426 pl_count = length / PAGE_SIZE;
b0d623f7
A
427 for (cur_offset = 0;
428 retval == KERN_SUCCESS && cur_offset < length;
429 cur_offset += PAGE_SIZE) {
0c530ab8
A
430 ppnum_t dst_pnum;
431
b0d623f7 432 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
0c530ab8
A
433 /* this page is not in the UPL: skip it */
434 continue;
435 }
436
437 /*
438 * Map the source (encrypted) page in the kernel's
439 * virtual address space.
2d21ac55 440 * We already hold a reference on the src_object.
0c530ab8 441 */
2d21ac55
A
442 retry_src_fault:
443 vm_object_lock(src_object);
444 vm_object_paging_begin(src_object);
445 error_code = 0;
446 prot = VM_PROT_READ;
447 kr = vm_fault_page(src_object,
448 offset + cur_offset,
449 VM_PROT_READ,
450 FALSE,
451 &prot,
452 &src_page,
453 &top_page,
b0d623f7 454 NULL,
2d21ac55
A
455 &error_code,
456 FALSE,
457 FALSE,
b0d623f7 458 &fault_info);
2d21ac55
A
459 switch (kr) {
460 case VM_FAULT_SUCCESS:
461 break;
462 case VM_FAULT_RETRY:
463 goto retry_src_fault;
464 case VM_FAULT_MEMORY_SHORTAGE:
465 if (vm_page_wait(interruptible)) {
466 goto retry_src_fault;
0c530ab8 467 }
2d21ac55
A
468 /* fall thru */
469 case VM_FAULT_INTERRUPTED:
470 retval = MACH_SEND_INTERRUPTED;
471 goto done;
b0d623f7
A
472 case VM_FAULT_SUCCESS_NO_VM_PAGE:
473 /* success but no VM page: fail */
474 vm_object_paging_end(src_object);
475 vm_object_unlock(src_object);
476 /*FALLTHROUGH*/
2d21ac55
A
477 case VM_FAULT_MEMORY_ERROR:
478 /* the page is not there ! */
479 if (error_code) {
480 retval = error_code;
481 } else {
482 retval = KERN_MEMORY_ERROR;
0c530ab8 483 }
2d21ac55
A
484 goto done;
485 default:
b0d623f7
A
486 panic("apple_protect_pager_data_request: "
487 "vm_fault_page() unexpected error 0x%x\n",
488 kr);
0c530ab8 489 }
2d21ac55
A
490 assert(src_page != VM_PAGE_NULL);
491 assert(src_page->busy);
b0d623f7
A
492
493 if (!src_page->active &&
494 !src_page->inactive &&
495 !src_page->throttled) {
496 vm_page_lockspin_queues();
497 if (!src_page->active &&
498 !src_page->inactive &&
499 !src_page->throttled) {
500 vm_page_deactivate(src_page);
501 }
502 vm_page_unlock_queues();
503 }
2d21ac55
A
504
505 /*
506 * Establish an explicit mapping of the source
507 * physical page.
508 */
509 pmap_enter(kernel_pmap,
510 kernel_mapping,
511 src_page->phys_page,
512 VM_PROT_READ,
513 src_object->wimg_bits & VM_WIMG_MASK,
514 TRUE);
0c530ab8
A
515 /*
516 * Establish an explicit pmap mapping of the destination
517 * physical page.
518 * We can't do a regular VM mapping because the VM page
519 * is "busy".
520 */
b0d623f7
A
521 dst_pnum = (ppnum_t)
522 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
0c530ab8 523 assert(dst_pnum != 0);
2d21ac55
A
524 pmap_enter(kernel_pmap,
525 kernel_mapping + PAGE_SIZE_64,
526 dst_pnum,
0c530ab8
A
527 VM_PROT_READ | VM_PROT_WRITE,
528 dst_object->wimg_bits & VM_WIMG_MASK,
2d21ac55 529 TRUE);
0c530ab8
A
530
531 /*
532 * Decrypt the encrypted contents of the source page
533 * into the destination page.
534 */
b0d623f7
A
535 ret = pager->crypt.page_decrypt((const void *) src_vaddr,
536 (void *) dst_vaddr,
537 offset+cur_offset,
538 pager->crypt.crypt_ops);
539 if (ret) {
540 /*
541 * Decryption failed. Abort the fault.
542 */
543 retval = KERN_ABORTED;
544 } else {
545 /*
546 * Validate the original page...
547 */
548 if (src_page->object->code_signed) {
549 vm_page_validate_cs_mapped(
550 src_page,
551 (const void *) src_vaddr);
552 }
553 /*
554 * ... and transfer the results to the destination page.
555 */
556 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
557 src_page->cs_validated);
558 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
559 src_page->cs_tainted);
560 }
593a1d5f 561
0c530ab8 562 /*
2d21ac55 563 * Remove the pmap mapping of the source and destination pages
0c530ab8
A
564 * in the kernel.
565 */
566 pmap_remove(kernel_pmap,
2d21ac55
A
567 (addr64_t) kernel_mapping,
568 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
569
570 /*
571 * Cleanup the result of vm_fault_page() of the source page.
572 */
573 PAGE_WAKEUP_DONE(src_page);
574 vm_object_paging_end(src_page->object);
575 vm_object_unlock(src_page->object);
576 if (top_page != VM_PAGE_NULL) {
577 vm_object_t top_object;
578
579 top_object = top_page->object;
580 vm_object_lock(top_object);
581 VM_PAGE_FREE(top_page);
582 vm_object_paging_end(top_object);
583 vm_object_unlock(top_object);
0c530ab8
A
584 }
585 }
586
0c530ab8 587done:
0c530ab8
A
588 if (upl != NULL) {
589 /* clean up the UPL */
590
591 /*
592 * The pages are currently dirty because we've just been
593 * writing on them, but as far as we're concerned, they're
594 * clean since they contain their "original" contents as
595 * provided by us, the pager.
596 * Tell the UPL to mark them "clean".
597 */
598 upl_clear_dirty(upl, TRUE);
599
600 /* abort or commit the UPL */
601 if (retval != KERN_SUCCESS) {
602 upl_abort(upl, 0);
b0d623f7
A
603 if (retval == KERN_ABORTED) {
604 wait_result_t wait_result;
605
606 /*
607 * We aborted the fault and did not provide
608 * any contents for the requested pages but
609 * the pages themselves are not invalid, so
610 * let's return success and let the caller
611 * retry the fault, in case it might succeed
612 * later (when the decryption code is up and
613 * running in the kernel, for example).
614 */
615 retval = KERN_SUCCESS;
616 /*
617 * Wait a little bit first to avoid using
618 * too much CPU time retrying and failing
619 * the same fault over and over again.
620 */
621 wait_result = assert_wait_timeout(
622 (event_t) apple_protect_pager_data_request,
623 THREAD_UNINT,
624 10000, /* 10ms */
625 NSEC_PER_USEC);
626 assert(wait_result == THREAD_WAITING);
627 wait_result = thread_block(THREAD_CONTINUE_NULL);
628 assert(wait_result == THREAD_TIMED_OUT);
629 }
0c530ab8 630 } else {
593a1d5f
A
631 boolean_t empty;
632 upl_commit_range(upl, 0, upl->size,
633 UPL_COMMIT_CS_VALIDATED,
634 upl_pl, pl_count, &empty);
0c530ab8
A
635 }
636
637 /* and deallocate the UPL */
638 upl_deallocate(upl);
639 upl = NULL;
640 }
2d21ac55
A
641 if (kernel_mapping != 0) {
642 /* clean up the mapping of the source and destination pages */
0c530ab8 643 kr = vm_map_remove(kernel_map,
2d21ac55
A
644 kernel_mapping,
645 kernel_mapping + (2 * PAGE_SIZE_64),
0c530ab8
A
646 VM_MAP_NO_FLAGS);
647 assert(kr == KERN_SUCCESS);
2d21ac55
A
648 kernel_mapping = 0;
649 src_vaddr = 0;
0c530ab8
A
650 dst_vaddr = 0;
651 }
2d21ac55
A
652 if (src_object != VM_OBJECT_NULL) {
653 vm_object_deallocate(src_object);
654 }
0c530ab8
A
655
656 return retval;
657}
658
659/*
660 * apple_protect_pager_reference()
661 *
662 * Get a reference on this memory object.
663 * For external usage only. Assumes that the initial reference count is not 0,
664 * i.e one should not "revive" a dead pager this way.
665 */
666void
667apple_protect_pager_reference(
668 memory_object_t mem_obj)
669{
670 apple_protect_pager_t pager;
671
672 pager = apple_protect_pager_lookup(mem_obj);
673
b0d623f7 674 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
675 assert(pager->ref_count > 0);
676 pager->ref_count++;
b0d623f7 677 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
678}
679
680
681/*
682 * apple_protect_pager_dequeue:
683 *
684 * Removes a pager from the list of pagers.
685 *
686 * The caller must hold "apple_protect_pager_lock".
687 */
688void
689apple_protect_pager_dequeue(
690 apple_protect_pager_t pager)
691{
692 assert(!pager->is_mapped);
693
694 queue_remove(&apple_protect_pager_queue,
695 pager,
696 apple_protect_pager_t,
697 pager_queue);
698 pager->pager_queue.next = NULL;
699 pager->pager_queue.prev = NULL;
700
701 apple_protect_pager_count--;
702}
703
704/*
705 * apple_protect_pager_terminate_internal:
706 *
707 * Trigger the asynchronous termination of the memory object associated
708 * with this pager.
709 * When the memory object is terminated, there will be one more call
710 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
711 * to finish the clean up.
712 *
713 * "apple_protect_pager_lock" should not be held by the caller.
714 * We don't need the lock because the pager has already been removed from
715 * the pagers' list and is now ours exclusively.
716 */
717void
718apple_protect_pager_terminate_internal(
719 apple_protect_pager_t pager)
720{
721 assert(pager->is_ready);
722 assert(!pager->is_mapped);
723
724 if (pager->backing_object != VM_OBJECT_NULL) {
725 vm_object_deallocate(pager->backing_object);
726 pager->backing_object = VM_OBJECT_NULL;
727 }
728
729 /* trigger the destruction of the memory object */
730 memory_object_destroy(pager->pager_control, 0);
593a1d5f
A
731
732 /* deallocate any crypt module data */
733 if(pager->crypt.crypt_end)
734 pager->crypt.crypt_end(pager->crypt.crypt_ops);
0c530ab8
A
735}
736
737/*
738 * apple_protect_pager_deallocate_internal()
739 *
740 * Release a reference on this pager and free it when the last
741 * reference goes away.
742 * Can be called with apple_protect_pager_lock held or not but always returns
743 * with it unlocked.
744 */
745void
746apple_protect_pager_deallocate_internal(
747 apple_protect_pager_t pager,
748 boolean_t locked)
749{
750 boolean_t needs_trimming;
751 int count_unmapped;
752
753 if (! locked) {
b0d623f7 754 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
755 }
756
757 count_unmapped = (apple_protect_pager_count -
758 apple_protect_pager_count_mapped);
759 if (count_unmapped > apple_protect_pager_cache_limit) {
760 /* we have too many unmapped pagers: trim some */
761 needs_trimming = TRUE;
762 } else {
763 needs_trimming = FALSE;
764 }
765
766 /* drop a reference on this pager */
767 pager->ref_count--;
768
769 if (pager->ref_count == 1) {
770 /*
771 * Only the "named" reference is left, which means that
2d21ac55 772 * no one is really holding on to this pager anymore.
0c530ab8
A
773 * Terminate it.
774 */
775 apple_protect_pager_dequeue(pager);
776 /* the pager is all ours: no need for the lock now */
b0d623f7 777 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
778 apple_protect_pager_terminate_internal(pager);
779 } else if (pager->ref_count == 0) {
780 /*
781 * Dropped the existence reference; the memory object has
782 * been terminated. Do some final cleanup and release the
783 * pager structure.
784 */
b0d623f7 785 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
786 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
787 memory_object_control_deallocate(pager->pager_control);
788 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
789 }
790 kfree(pager, sizeof (*pager));
791 pager = APPLE_PROTECT_PAGER_NULL;
792 } else {
793 /* there are still plenty of references: keep going... */
b0d623f7 794 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
795 }
796
797 if (needs_trimming) {
798 apple_protect_pager_trim();
799 }
800 /* caution: lock is not held on return... */
801}
802
803/*
804 * apple_protect_pager_deallocate()
805 *
806 * Release a reference on this pager and free it when the last
807 * reference goes away.
808 */
809void
810apple_protect_pager_deallocate(
811 memory_object_t mem_obj)
812{
813 apple_protect_pager_t pager;
814
2d21ac55 815 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
0c530ab8
A
816 pager = apple_protect_pager_lookup(mem_obj);
817 apple_protect_pager_deallocate_internal(pager, FALSE);
818}
819
820/*
821 *
822 */
823kern_return_t
824apple_protect_pager_terminate(
825#if !DEBUG
826 __unused
827#endif
828 memory_object_t mem_obj)
829{
2d21ac55 830 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
0c530ab8
A
831
832 return KERN_SUCCESS;
833}
834
835/*
836 *
837 */
838kern_return_t
839apple_protect_pager_synchronize(
840 memory_object_t mem_obj,
841 memory_object_offset_t offset,
b0d623f7 842 memory_object_size_t length,
0c530ab8
A
843 __unused vm_sync_t sync_flags)
844{
845 apple_protect_pager_t pager;
846
2d21ac55 847 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
0c530ab8
A
848
849 pager = apple_protect_pager_lookup(mem_obj);
850
851 memory_object_synchronize_completed(pager->pager_control,
852 offset, length);
853
854 return KERN_SUCCESS;
855}
856
857/*
858 * apple_protect_pager_map()
859 *
860 * This allows VM to let us, the EMM, know that this memory object
b0d623f7
A
861 * is currently mapped one or more times. This is called by VM each time
862 * the memory object gets mapped and we take one extra reference on the
0c530ab8
A
863 * memory object to account for all its mappings.
864 */
593a1d5f 865kern_return_t
0c530ab8 866apple_protect_pager_map(
593a1d5f
A
867 memory_object_t mem_obj,
868 __unused vm_prot_t prot)
0c530ab8
A
869{
870 apple_protect_pager_t pager;
871
2d21ac55 872 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
0c530ab8
A
873
874 pager = apple_protect_pager_lookup(mem_obj);
875
b0d623f7 876 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
877 assert(pager->is_ready);
878 assert(pager->ref_count > 0); /* pager is alive */
879 if (pager->is_mapped == FALSE) {
880 /*
881 * First mapping of this pager: take an extra reference
882 * that will remain until all the mappings of this pager
883 * are removed.
884 */
885 pager->is_mapped = TRUE;
886 pager->ref_count++;
887 apple_protect_pager_count_mapped++;
888 }
b0d623f7 889 lck_mtx_unlock(&apple_protect_pager_lock);
593a1d5f
A
890
891 return KERN_SUCCESS;
0c530ab8
A
892}
893
894/*
593a1d5f 895 * apple_protect_pager_last_unmap()
0c530ab8
A
896 *
897 * This is called by VM when this memory object is no longer mapped anywhere.
898 */
899kern_return_t
593a1d5f 900apple_protect_pager_last_unmap(
0c530ab8
A
901 memory_object_t mem_obj)
902{
903 apple_protect_pager_t pager;
904 int count_unmapped;
905
593a1d5f
A
906 PAGER_DEBUG(PAGER_ALL,
907 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
0c530ab8
A
908
909 pager = apple_protect_pager_lookup(mem_obj);
910
b0d623f7 911 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
912 if (pager->is_mapped) {
913 /*
914 * All the mappings are gone, so let go of the one extra
915 * reference that represents all the mappings of this pager.
916 */
917 apple_protect_pager_count_mapped--;
918 count_unmapped = (apple_protect_pager_count -
919 apple_protect_pager_count_mapped);
920 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
921 apple_protect_pager_count_unmapped_max = count_unmapped;
922 }
923 pager->is_mapped = FALSE;
924 apple_protect_pager_deallocate_internal(pager, TRUE);
925 /* caution: deallocate_internal() released the lock ! */
926 } else {
b0d623f7 927 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
928 }
929
930 return KERN_SUCCESS;
931}
932
933
934/*
935 *
936 */
937apple_protect_pager_t
938apple_protect_pager_lookup(
939 memory_object_t mem_obj)
940{
941 apple_protect_pager_t pager;
942
943 pager = (apple_protect_pager_t) mem_obj;
944 assert(pager->pager_ops == &apple_protect_pager_ops);
945 assert(pager->ref_count > 0);
946 return pager;
947}
948
949apple_protect_pager_t
950apple_protect_pager_create(
593a1d5f
A
951 vm_object_t backing_object,
952 struct pager_crypt_info *crypt_info)
0c530ab8
A
953{
954 apple_protect_pager_t pager, pager2;
955 memory_object_control_t control;
956 kern_return_t kr;
957
958 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
959 if (pager == APPLE_PROTECT_PAGER_NULL) {
960 return APPLE_PROTECT_PAGER_NULL;
961 }
962
963 /*
964 * The vm_map call takes both named entry ports and raw memory
965 * objects in the same parameter. We need to make sure that
966 * vm_map does not see this object as a named entry port. So,
b0d623f7 967 * we reserve the first word in the object for a fake ip_kotype
0c530ab8
A
968 * setting - that will tell vm_map to use it as a memory object.
969 */
970 pager->pager_ops = &apple_protect_pager_ops;
971 pager->pager_ikot = IKOT_MEMORY_OBJECT;
972 pager->is_ready = FALSE;/* not ready until it has a "name" */
973 pager->ref_count = 2; /* existence + setup reference */
974 pager->is_mapped = FALSE;
975 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
976 pager->backing_object = backing_object;
593a1d5f
A
977 pager->crypt = *crypt_info;
978
0c530ab8
A
979 vm_object_reference(backing_object);
980
b0d623f7 981 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
982 /* see if anyone raced us to create a pager for the same object */
983 queue_iterate(&apple_protect_pager_queue,
984 pager2,
985 apple_protect_pager_t,
986 pager_queue) {
987 if (pager2->backing_object == backing_object) {
988 break;
989 }
990 }
991 if (! queue_end(&apple_protect_pager_queue,
992 (queue_entry_t) pager2)) {
993 /* while we hold the lock, transfer our setup ref to winner */
994 pager2->ref_count++;
995 /* we lost the race, down with the loser... */
b0d623f7 996 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
997 vm_object_deallocate(pager->backing_object);
998 pager->backing_object = VM_OBJECT_NULL;
999 kfree(pager, sizeof (*pager));
1000 /* ... and go with the winner */
1001 pager = pager2;
1002 /* let the winner make sure the pager gets ready */
1003 return pager;
1004 }
1005
1006 /* enter new pager at the head of our list of pagers */
1007 queue_enter_first(&apple_protect_pager_queue,
1008 pager,
1009 apple_protect_pager_t,
1010 pager_queue);
1011 apple_protect_pager_count++;
1012 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1013 apple_protect_pager_count_max = apple_protect_pager_count;
1014 }
b0d623f7 1015 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1016
1017 kr = memory_object_create_named((memory_object_t) pager,
1018 0,
1019 &control);
1020 assert(kr == KERN_SUCCESS);
1021
b0d623f7 1022 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1023 /* the new pager is now ready to be used */
1024 pager->is_ready = TRUE;
b0d623f7 1025 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1026
1027 /* wakeup anyone waiting for this pager to be ready */
1028 thread_wakeup(&pager->is_ready);
1029
1030 return pager;
1031}
1032
1033/*
1034 * apple_protect_pager_setup()
1035 *
1036 * Provide the caller with a memory object backed by the provided
1037 * "backing_object" VM object. If such a memory object already exists,
1038 * re-use it, otherwise create a new memory object.
1039 */
1040memory_object_t
1041apple_protect_pager_setup(
593a1d5f
A
1042 vm_object_t backing_object,
1043 struct pager_crypt_info *crypt_info)
0c530ab8
A
1044{
1045 apple_protect_pager_t pager;
1046
b0d623f7 1047 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1048
1049 queue_iterate(&apple_protect_pager_queue,
1050 pager,
1051 apple_protect_pager_t,
1052 pager_queue) {
1053 if (pager->backing_object == backing_object) {
593a1d5f
A
1054 /* For the same object we must always use the same protection options */
1055 if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) &&
1056 (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) {
b0d623f7 1057 lck_mtx_unlock(&apple_protect_pager_lock);
593a1d5f
A
1058 return MEMORY_OBJECT_NULL;
1059 }
0c530ab8
A
1060 break;
1061 }
1062 }
1063 if (queue_end(&apple_protect_pager_queue,
1064 (queue_entry_t) pager)) {
1065 /* no existing pager for this backing object */
1066 pager = APPLE_PROTECT_PAGER_NULL;
1067 } else {
1068 /* make sure pager doesn't disappear */
1069 pager->ref_count++;
1070 }
1071
b0d623f7 1072 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1073
1074 if (pager == APPLE_PROTECT_PAGER_NULL) {
593a1d5f 1075 pager = apple_protect_pager_create(backing_object, crypt_info);
0c530ab8
A
1076 if (pager == APPLE_PROTECT_PAGER_NULL) {
1077 return MEMORY_OBJECT_NULL;
1078 }
1079 }
1080
b0d623f7 1081 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8 1082 while (!pager->is_ready) {
b0d623f7
A
1083 lck_mtx_sleep(&apple_protect_pager_lock,
1084 LCK_SLEEP_DEFAULT,
1085 &pager->is_ready,
1086 THREAD_UNINT);
0c530ab8 1087 }
b0d623f7 1088 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1089
1090 return (memory_object_t) pager;
1091}
1092
1093void
1094apple_protect_pager_trim(void)
1095{
1096 apple_protect_pager_t pager, prev_pager;
1097 queue_head_t trim_queue;
1098 int num_trim;
1099 int count_unmapped;
1100
b0d623f7 1101 lck_mtx_lock(&apple_protect_pager_lock);
0c530ab8
A
1102
1103 /*
1104 * We have too many pagers, try and trim some unused ones,
1105 * starting with the oldest pager at the end of the queue.
1106 */
1107 queue_init(&trim_queue);
1108 num_trim = 0;
1109
1110 for (pager = (apple_protect_pager_t)
1111 queue_last(&apple_protect_pager_queue);
1112 !queue_end(&apple_protect_pager_queue,
1113 (queue_entry_t) pager);
1114 pager = prev_pager) {
1115 /* get prev elt before we dequeue */
1116 prev_pager = (apple_protect_pager_t)
1117 queue_prev(&pager->pager_queue);
1118
1119 if (pager->ref_count == 2 &&
1120 pager->is_ready &&
1121 !pager->is_mapped) {
1122 /* this pager can be trimmed */
1123 num_trim++;
1124 /* remove this pager from the main list ... */
1125 apple_protect_pager_dequeue(pager);
1126 /* ... and add it to our trim queue */
1127 queue_enter_first(&trim_queue,
1128 pager,
1129 apple_protect_pager_t,
1130 pager_queue);
1131
1132 count_unmapped = (apple_protect_pager_count -
1133 apple_protect_pager_count_mapped);
1134 if (count_unmapped <= apple_protect_pager_cache_limit) {
1135 /* we have enough pagers to trim */
1136 break;
1137 }
1138 }
1139 }
1140 if (num_trim > apple_protect_pager_num_trim_max) {
1141 apple_protect_pager_num_trim_max = num_trim;
1142 }
1143 apple_protect_pager_num_trim_total += num_trim;
1144
b0d623f7 1145 lck_mtx_unlock(&apple_protect_pager_lock);
0c530ab8
A
1146
1147 /* terminate the trimmed pagers */
1148 while (!queue_empty(&trim_queue)) {
1149 queue_remove_first(&trim_queue,
1150 pager,
1151 apple_protect_pager_t,
1152 pager_queue);
1153 pager->pager_queue.next = NULL;
1154 pager->pager_queue.prev = NULL;
1155 assert(pager->ref_count == 2);
1156 /*
1157 * We can't call deallocate_internal() because the pager
1158 * has already been dequeued, but we still need to remove
1159 * a reference.
1160 */
1161 pager->ref_count--;
1162 apple_protect_pager_terminate_internal(pager);
1163 }
1164}