]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <default_pager/default_pager_types.h>
53 #include <default_pager/default_pager_object_server.h>
54
55 #include <vm/vm_fault.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/memory_object.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_protos.h>
61
62
63 /*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 memory_object_cluster_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 memory_object_cluster_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
116
117 /*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121 const struct memory_object_pager_ops apple_protect_pager_ops = {
122 apple_protect_pager_reference,
123 apple_protect_pager_deallocate,
124 apple_protect_pager_init,
125 apple_protect_pager_terminate,
126 apple_protect_pager_data_request,
127 apple_protect_pager_data_return,
128 apple_protect_pager_data_initialize,
129 apple_protect_pager_data_unlock,
130 apple_protect_pager_synchronize,
131 apple_protect_pager_map,
132 apple_protect_pager_last_unmap,
133 "apple protect pager"
134 };
135
136 /*
137 * The "apple_protect_pager" describes a memory object backed by
138 * the "apple protect" EMM.
139 */
140 typedef struct apple_protect_pager {
141 struct ipc_object_header pager_header; /* fake ip_kotype() */
142 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
143 queue_chain_t pager_queue; /* next & prev pagers */
144 unsigned int ref_count; /* reference count */
145 boolean_t is_ready; /* is this pager ready ? */
146 boolean_t is_mapped; /* is this mem_obj mapped ? */
147 memory_object_control_t pager_control; /* mem object control handle */
148 vm_object_t backing_object; /* VM obj w/ encrypted data */
149 struct pager_crypt_info crypt;
150 } *apple_protect_pager_t;
151 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
152 #define pager_ikot pager_header.io_bits
153
154 /*
155 * List of memory objects managed by this EMM.
156 * The list is protected by the "apple_protect_pager_lock" lock.
157 */
158 int apple_protect_pager_count = 0; /* number of pagers */
159 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
160 queue_head_t apple_protect_pager_queue;
161 decl_lck_mtx_data(,apple_protect_pager_lock)
162
163 /*
164 * Maximum number of unmapped pagers we're willing to keep around.
165 */
166 int apple_protect_pager_cache_limit = 10;
167
168 /*
169 * Statistics & counters.
170 */
171 int apple_protect_pager_count_max = 0;
172 int apple_protect_pager_count_unmapped_max = 0;
173 int apple_protect_pager_num_trim_max = 0;
174 int apple_protect_pager_num_trim_total = 0;
175
176
177 lck_grp_t apple_protect_pager_lck_grp;
178 lck_grp_attr_t apple_protect_pager_lck_grp_attr;
179 lck_attr_t apple_protect_pager_lck_attr;
180
181
182 /* internal prototypes */
183 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object,
184 struct pager_crypt_info *crypt_info);
185 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
186 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
187 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
188 boolean_t locked);
189 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
190 void apple_protect_pager_trim(void);
191
192
193 #if DEBUG
194 int apple_protect_pagerdebug = 0;
195 #define PAGER_ALL 0xffffffff
196 #define PAGER_INIT 0x00000001
197 #define PAGER_PAGEIN 0x00000002
198
199 #define PAGER_DEBUG(LEVEL, A) \
200 MACRO_BEGIN \
201 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
202 printf A; \
203 } \
204 MACRO_END
205 #else
206 #define PAGER_DEBUG(LEVEL, A)
207 #endif
208
209
210 void
211 apple_protect_pager_bootstrap(void)
212 {
213 lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr);
214 lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr);
215 lck_attr_setdefault(&apple_protect_pager_lck_attr);
216 lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr);
217 queue_init(&apple_protect_pager_queue);
218 }
219
220 /*
221 * apple_protect_pager_init()
222 *
223 * Initialize the memory object and makes it ready to be used and mapped.
224 */
225 kern_return_t
226 apple_protect_pager_init(
227 memory_object_t mem_obj,
228 memory_object_control_t control,
229 #if !DEBUG
230 __unused
231 #endif
232 memory_object_cluster_size_t pg_size)
233 {
234 apple_protect_pager_t pager;
235 kern_return_t kr;
236 memory_object_attr_info_data_t attributes;
237
238 PAGER_DEBUG(PAGER_ALL,
239 ("apple_protect_pager_init: %p, %p, %x\n",
240 mem_obj, control, pg_size));
241
242 if (control == MEMORY_OBJECT_CONTROL_NULL)
243 return KERN_INVALID_ARGUMENT;
244
245 pager = apple_protect_pager_lookup(mem_obj);
246
247 memory_object_control_reference(control);
248
249 pager->pager_control = control;
250
251 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
252 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
253 attributes.cluster_size = (1 << (PAGE_SHIFT));
254 attributes.may_cache_object = FALSE;
255 attributes.temporary = TRUE;
256
257 kr = memory_object_change_attributes(
258 control,
259 MEMORY_OBJECT_ATTRIBUTE_INFO,
260 (memory_object_info_t) &attributes,
261 MEMORY_OBJECT_ATTR_INFO_COUNT);
262 if (kr != KERN_SUCCESS)
263 panic("apple_protect_pager_init: "
264 "memory_object_change_attributes() failed");
265
266 return KERN_SUCCESS;
267 }
268
269 /*
270 * apple_protect_data_return()
271 *
272 * Handles page-out requests from VM. This should never happen since
273 * the pages provided by this EMM are not supposed to be dirty or dirtied
274 * and VM should simply discard the contents and reclaim the pages if it
275 * needs to.
276 */
277 kern_return_t
278 apple_protect_pager_data_return(
279 __unused memory_object_t mem_obj,
280 __unused memory_object_offset_t offset,
281 __unused memory_object_cluster_size_t data_cnt,
282 __unused memory_object_offset_t *resid_offset,
283 __unused int *io_error,
284 __unused boolean_t dirty,
285 __unused boolean_t kernel_copy,
286 __unused int upl_flags)
287 {
288 panic("apple_protect_pager_data_return: should never get called");
289 return KERN_FAILURE;
290 }
291
292 kern_return_t
293 apple_protect_pager_data_initialize(
294 __unused memory_object_t mem_obj,
295 __unused memory_object_offset_t offset,
296 __unused memory_object_cluster_size_t data_cnt)
297 {
298 panic("apple_protect_pager_data_initialize: should never get called");
299 return KERN_FAILURE;
300 }
301
302 kern_return_t
303 apple_protect_pager_data_unlock(
304 __unused memory_object_t mem_obj,
305 __unused memory_object_offset_t offset,
306 __unused memory_object_size_t size,
307 __unused vm_prot_t desired_access)
308 {
309 return KERN_FAILURE;
310 }
311
312 /*
313 * apple_protect_pager_data_request()
314 *
315 * Handles page-in requests from VM.
316 */
317 kern_return_t
318 apple_protect_pager_data_request(
319 memory_object_t mem_obj,
320 memory_object_offset_t offset,
321 memory_object_cluster_size_t length,
322 #if !DEBUG
323 __unused
324 #endif
325 vm_prot_t protection_required,
326 memory_object_fault_info_t mo_fault_info)
327 {
328 apple_protect_pager_t pager;
329 memory_object_control_t mo_control;
330 upl_t upl;
331 int upl_flags;
332 upl_size_t upl_size;
333 upl_page_info_t *upl_pl;
334 unsigned int pl_count;
335 vm_object_t src_object, dst_object;
336 kern_return_t kr, retval;
337 vm_map_offset_t kernel_mapping;
338 vm_offset_t src_vaddr, dst_vaddr;
339 vm_offset_t cur_offset;
340 vm_map_entry_t map_entry;
341 kern_return_t error_code;
342 vm_prot_t prot;
343 vm_page_t src_page, top_page;
344 int interruptible;
345 struct vm_object_fault_info fault_info;
346 int ret;
347
348 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
349
350 retval = KERN_SUCCESS;
351 src_object = VM_OBJECT_NULL;
352 kernel_mapping = 0;
353 upl = NULL;
354 upl_pl = NULL;
355 fault_info = *((struct vm_object_fault_info *) mo_fault_info);
356 fault_info.stealth = TRUE;
357 interruptible = fault_info.interruptible;
358
359 pager = apple_protect_pager_lookup(mem_obj);
360 assert(pager->is_ready);
361 assert(pager->ref_count > 1); /* pager is alive and mapped */
362
363 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
364
365 /*
366 * Gather in a UPL all the VM pages requested by VM.
367 */
368 mo_control = pager->pager_control;
369
370 upl_size = length;
371 upl_flags =
372 UPL_RET_ONLY_ABSENT |
373 UPL_SET_LITE |
374 UPL_NO_SYNC |
375 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
376 UPL_SET_INTERNAL;
377 pl_count = 0;
378 kr = memory_object_upl_request(mo_control,
379 offset, upl_size,
380 &upl, NULL, NULL, upl_flags);
381 if (kr != KERN_SUCCESS) {
382 retval = kr;
383 goto done;
384 }
385 dst_object = mo_control->moc_object;
386 assert(dst_object != VM_OBJECT_NULL);
387
388
389 /*
390 * Reserve 2 virtual pages in the kernel address space to map each
391 * source and destination physical pages when it's their turn to
392 * be processed.
393 */
394 vm_object_reference(kernel_object); /* ref. for mapping */
395 kr = vm_map_find_space(kernel_map,
396 &kernel_mapping,
397 2 * PAGE_SIZE_64,
398 0,
399 0,
400 &map_entry);
401 if (kr != KERN_SUCCESS) {
402 vm_object_deallocate(kernel_object);
403 retval = kr;
404 goto done;
405 }
406 map_entry->object.vm_object = kernel_object;
407 map_entry->offset = kernel_mapping;
408 vm_map_unlock(kernel_map);
409 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
410 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
411
412 /*
413 * We'll map the encrypted data in the kernel address space from the
414 * backing VM object (itself backed by the encrypted file via
415 * the vnode pager).
416 */
417 src_object = pager->backing_object;
418 assert(src_object != VM_OBJECT_NULL);
419 vm_object_reference(src_object); /* to keep the source object alive */
420
421 /*
422 * Fill in the contents of the pages requested by VM.
423 */
424 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
425 pl_count = length / PAGE_SIZE;
426 for (cur_offset = 0;
427 retval == KERN_SUCCESS && cur_offset < length;
428 cur_offset += PAGE_SIZE) {
429 ppnum_t dst_pnum;
430
431 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
432 /* this page is not in the UPL: skip it */
433 continue;
434 }
435
436 /*
437 * Map the source (encrypted) page in the kernel's
438 * virtual address space.
439 * We already hold a reference on the src_object.
440 */
441 retry_src_fault:
442 vm_object_lock(src_object);
443 vm_object_paging_begin(src_object);
444 error_code = 0;
445 prot = VM_PROT_READ;
446 kr = vm_fault_page(src_object,
447 offset + cur_offset,
448 VM_PROT_READ,
449 FALSE,
450 &prot,
451 &src_page,
452 &top_page,
453 NULL,
454 &error_code,
455 FALSE,
456 FALSE,
457 &fault_info);
458 switch (kr) {
459 case VM_FAULT_SUCCESS:
460 break;
461 case VM_FAULT_RETRY:
462 goto retry_src_fault;
463 case VM_FAULT_MEMORY_SHORTAGE:
464 if (vm_page_wait(interruptible)) {
465 goto retry_src_fault;
466 }
467 /* fall thru */
468 case VM_FAULT_INTERRUPTED:
469 retval = MACH_SEND_INTERRUPTED;
470 goto done;
471 case VM_FAULT_SUCCESS_NO_VM_PAGE:
472 /* success but no VM page: fail */
473 vm_object_paging_end(src_object);
474 vm_object_unlock(src_object);
475 /*FALLTHROUGH*/
476 case VM_FAULT_MEMORY_ERROR:
477 /* the page is not there ! */
478 if (error_code) {
479 retval = error_code;
480 } else {
481 retval = KERN_MEMORY_ERROR;
482 }
483 goto done;
484 default:
485 panic("apple_protect_pager_data_request: "
486 "vm_fault_page() unexpected error 0x%x\n",
487 kr);
488 }
489 assert(src_page != VM_PAGE_NULL);
490 assert(src_page->busy);
491
492 if (!src_page->active &&
493 !src_page->inactive &&
494 !src_page->throttled) {
495 vm_page_lockspin_queues();
496 if (!src_page->active &&
497 !src_page->inactive &&
498 !src_page->throttled) {
499 vm_page_deactivate(src_page);
500 }
501 vm_page_unlock_queues();
502 }
503
504 /*
505 * Establish an explicit mapping of the source
506 * physical page.
507 */
508 pmap_enter(kernel_pmap,
509 kernel_mapping,
510 src_page->phys_page,
511 VM_PROT_READ,
512 src_object->wimg_bits & VM_WIMG_MASK,
513 TRUE);
514 /*
515 * Establish an explicit pmap mapping of the destination
516 * physical page.
517 * We can't do a regular VM mapping because the VM page
518 * is "busy".
519 */
520 dst_pnum = (ppnum_t)
521 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
522 assert(dst_pnum != 0);
523 pmap_enter(kernel_pmap,
524 kernel_mapping + PAGE_SIZE_64,
525 dst_pnum,
526 VM_PROT_READ | VM_PROT_WRITE,
527 dst_object->wimg_bits & VM_WIMG_MASK,
528 TRUE);
529
530 /*
531 * Decrypt the encrypted contents of the source page
532 * into the destination page.
533 */
534 ret = pager->crypt.page_decrypt((const void *) src_vaddr,
535 (void *) dst_vaddr,
536 offset+cur_offset,
537 pager->crypt.crypt_ops);
538 if (ret) {
539 /*
540 * Decryption failed. Abort the fault.
541 */
542 retval = KERN_ABORTED;
543 } else {
544 /*
545 * Validate the original page...
546 */
547 if (src_page->object->code_signed) {
548 vm_page_validate_cs_mapped(
549 src_page,
550 (const void *) src_vaddr);
551 }
552 /*
553 * ... and transfer the results to the destination page.
554 */
555 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
556 src_page->cs_validated);
557 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
558 src_page->cs_tainted);
559 }
560
561 /*
562 * Remove the pmap mapping of the source and destination pages
563 * in the kernel.
564 */
565 pmap_remove(kernel_pmap,
566 (addr64_t) kernel_mapping,
567 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
568
569 /*
570 * Cleanup the result of vm_fault_page() of the source page.
571 */
572 PAGE_WAKEUP_DONE(src_page);
573 vm_object_paging_end(src_page->object);
574 vm_object_unlock(src_page->object);
575 if (top_page != VM_PAGE_NULL) {
576 vm_object_t top_object;
577
578 top_object = top_page->object;
579 vm_object_lock(top_object);
580 VM_PAGE_FREE(top_page);
581 vm_object_paging_end(top_object);
582 vm_object_unlock(top_object);
583 }
584 }
585
586 done:
587 if (upl != NULL) {
588 /* clean up the UPL */
589
590 /*
591 * The pages are currently dirty because we've just been
592 * writing on them, but as far as we're concerned, they're
593 * clean since they contain their "original" contents as
594 * provided by us, the pager.
595 * Tell the UPL to mark them "clean".
596 */
597 upl_clear_dirty(upl, TRUE);
598
599 /* abort or commit the UPL */
600 if (retval != KERN_SUCCESS) {
601 upl_abort(upl, 0);
602 if (retval == KERN_ABORTED) {
603 wait_result_t wait_result;
604
605 /*
606 * We aborted the fault and did not provide
607 * any contents for the requested pages but
608 * the pages themselves are not invalid, so
609 * let's return success and let the caller
610 * retry the fault, in case it might succeed
611 * later (when the decryption code is up and
612 * running in the kernel, for example).
613 */
614 retval = KERN_SUCCESS;
615 /*
616 * Wait a little bit first to avoid using
617 * too much CPU time retrying and failing
618 * the same fault over and over again.
619 */
620 wait_result = assert_wait_timeout(
621 (event_t) apple_protect_pager_data_request,
622 THREAD_UNINT,
623 10000, /* 10ms */
624 NSEC_PER_USEC);
625 assert(wait_result == THREAD_WAITING);
626 wait_result = thread_block(THREAD_CONTINUE_NULL);
627 assert(wait_result == THREAD_TIMED_OUT);
628 }
629 } else {
630 boolean_t empty;
631 upl_commit_range(upl, 0, upl->size,
632 UPL_COMMIT_CS_VALIDATED,
633 upl_pl, pl_count, &empty);
634 }
635
636 /* and deallocate the UPL */
637 upl_deallocate(upl);
638 upl = NULL;
639 }
640 if (kernel_mapping != 0) {
641 /* clean up the mapping of the source and destination pages */
642 kr = vm_map_remove(kernel_map,
643 kernel_mapping,
644 kernel_mapping + (2 * PAGE_SIZE_64),
645 VM_MAP_NO_FLAGS);
646 assert(kr == KERN_SUCCESS);
647 kernel_mapping = 0;
648 src_vaddr = 0;
649 dst_vaddr = 0;
650 }
651 if (src_object != VM_OBJECT_NULL) {
652 vm_object_deallocate(src_object);
653 }
654
655 return retval;
656 }
657
658 /*
659 * apple_protect_pager_reference()
660 *
661 * Get a reference on this memory object.
662 * For external usage only. Assumes that the initial reference count is not 0,
663 * i.e one should not "revive" a dead pager this way.
664 */
665 void
666 apple_protect_pager_reference(
667 memory_object_t mem_obj)
668 {
669 apple_protect_pager_t pager;
670
671 pager = apple_protect_pager_lookup(mem_obj);
672
673 lck_mtx_lock(&apple_protect_pager_lock);
674 assert(pager->ref_count > 0);
675 pager->ref_count++;
676 lck_mtx_unlock(&apple_protect_pager_lock);
677 }
678
679
680 /*
681 * apple_protect_pager_dequeue:
682 *
683 * Removes a pager from the list of pagers.
684 *
685 * The caller must hold "apple_protect_pager_lock".
686 */
687 void
688 apple_protect_pager_dequeue(
689 apple_protect_pager_t pager)
690 {
691 assert(!pager->is_mapped);
692
693 queue_remove(&apple_protect_pager_queue,
694 pager,
695 apple_protect_pager_t,
696 pager_queue);
697 pager->pager_queue.next = NULL;
698 pager->pager_queue.prev = NULL;
699
700 apple_protect_pager_count--;
701 }
702
703 /*
704 * apple_protect_pager_terminate_internal:
705 *
706 * Trigger the asynchronous termination of the memory object associated
707 * with this pager.
708 * When the memory object is terminated, there will be one more call
709 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
710 * to finish the clean up.
711 *
712 * "apple_protect_pager_lock" should not be held by the caller.
713 * We don't need the lock because the pager has already been removed from
714 * the pagers' list and is now ours exclusively.
715 */
716 void
717 apple_protect_pager_terminate_internal(
718 apple_protect_pager_t pager)
719 {
720 assert(pager->is_ready);
721 assert(!pager->is_mapped);
722
723 if (pager->backing_object != VM_OBJECT_NULL) {
724 vm_object_deallocate(pager->backing_object);
725 pager->backing_object = VM_OBJECT_NULL;
726 }
727
728 /* trigger the destruction of the memory object */
729 memory_object_destroy(pager->pager_control, 0);
730
731 /* deallocate any crypt module data */
732 if(pager->crypt.crypt_end)
733 pager->crypt.crypt_end(pager->crypt.crypt_ops);
734 }
735
736 /*
737 * apple_protect_pager_deallocate_internal()
738 *
739 * Release a reference on this pager and free it when the last
740 * reference goes away.
741 * Can be called with apple_protect_pager_lock held or not but always returns
742 * with it unlocked.
743 */
744 void
745 apple_protect_pager_deallocate_internal(
746 apple_protect_pager_t pager,
747 boolean_t locked)
748 {
749 boolean_t needs_trimming;
750 int count_unmapped;
751
752 if (! locked) {
753 lck_mtx_lock(&apple_protect_pager_lock);
754 }
755
756 count_unmapped = (apple_protect_pager_count -
757 apple_protect_pager_count_mapped);
758 if (count_unmapped > apple_protect_pager_cache_limit) {
759 /* we have too many unmapped pagers: trim some */
760 needs_trimming = TRUE;
761 } else {
762 needs_trimming = FALSE;
763 }
764
765 /* drop a reference on this pager */
766 pager->ref_count--;
767
768 if (pager->ref_count == 1) {
769 /*
770 * Only the "named" reference is left, which means that
771 * no one is really holding on to this pager anymore.
772 * Terminate it.
773 */
774 apple_protect_pager_dequeue(pager);
775 /* the pager is all ours: no need for the lock now */
776 lck_mtx_unlock(&apple_protect_pager_lock);
777 apple_protect_pager_terminate_internal(pager);
778 } else if (pager->ref_count == 0) {
779 /*
780 * Dropped the existence reference; the memory object has
781 * been terminated. Do some final cleanup and release the
782 * pager structure.
783 */
784 lck_mtx_unlock(&apple_protect_pager_lock);
785 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
786 memory_object_control_deallocate(pager->pager_control);
787 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
788 }
789 kfree(pager, sizeof (*pager));
790 pager = APPLE_PROTECT_PAGER_NULL;
791 } else {
792 /* there are still plenty of references: keep going... */
793 lck_mtx_unlock(&apple_protect_pager_lock);
794 }
795
796 if (needs_trimming) {
797 apple_protect_pager_trim();
798 }
799 /* caution: lock is not held on return... */
800 }
801
802 /*
803 * apple_protect_pager_deallocate()
804 *
805 * Release a reference on this pager and free it when the last
806 * reference goes away.
807 */
808 void
809 apple_protect_pager_deallocate(
810 memory_object_t mem_obj)
811 {
812 apple_protect_pager_t pager;
813
814 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
815 pager = apple_protect_pager_lookup(mem_obj);
816 apple_protect_pager_deallocate_internal(pager, FALSE);
817 }
818
819 /*
820 *
821 */
822 kern_return_t
823 apple_protect_pager_terminate(
824 #if !DEBUG
825 __unused
826 #endif
827 memory_object_t mem_obj)
828 {
829 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
830
831 return KERN_SUCCESS;
832 }
833
834 /*
835 *
836 */
837 kern_return_t
838 apple_protect_pager_synchronize(
839 memory_object_t mem_obj,
840 memory_object_offset_t offset,
841 memory_object_size_t length,
842 __unused vm_sync_t sync_flags)
843 {
844 apple_protect_pager_t pager;
845
846 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
847
848 pager = apple_protect_pager_lookup(mem_obj);
849
850 memory_object_synchronize_completed(pager->pager_control,
851 offset, length);
852
853 return KERN_SUCCESS;
854 }
855
856 /*
857 * apple_protect_pager_map()
858 *
859 * This allows VM to let us, the EMM, know that this memory object
860 * is currently mapped one or more times. This is called by VM each time
861 * the memory object gets mapped and we take one extra reference on the
862 * memory object to account for all its mappings.
863 */
864 kern_return_t
865 apple_protect_pager_map(
866 memory_object_t mem_obj,
867 __unused vm_prot_t prot)
868 {
869 apple_protect_pager_t pager;
870
871 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
872
873 pager = apple_protect_pager_lookup(mem_obj);
874
875 lck_mtx_lock(&apple_protect_pager_lock);
876 assert(pager->is_ready);
877 assert(pager->ref_count > 0); /* pager is alive */
878 if (pager->is_mapped == FALSE) {
879 /*
880 * First mapping of this pager: take an extra reference
881 * that will remain until all the mappings of this pager
882 * are removed.
883 */
884 pager->is_mapped = TRUE;
885 pager->ref_count++;
886 apple_protect_pager_count_mapped++;
887 }
888 lck_mtx_unlock(&apple_protect_pager_lock);
889
890 return KERN_SUCCESS;
891 }
892
893 /*
894 * apple_protect_pager_last_unmap()
895 *
896 * This is called by VM when this memory object is no longer mapped anywhere.
897 */
898 kern_return_t
899 apple_protect_pager_last_unmap(
900 memory_object_t mem_obj)
901 {
902 apple_protect_pager_t pager;
903 int count_unmapped;
904
905 PAGER_DEBUG(PAGER_ALL,
906 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
907
908 pager = apple_protect_pager_lookup(mem_obj);
909
910 lck_mtx_lock(&apple_protect_pager_lock);
911 if (pager->is_mapped) {
912 /*
913 * All the mappings are gone, so let go of the one extra
914 * reference that represents all the mappings of this pager.
915 */
916 apple_protect_pager_count_mapped--;
917 count_unmapped = (apple_protect_pager_count -
918 apple_protect_pager_count_mapped);
919 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
920 apple_protect_pager_count_unmapped_max = count_unmapped;
921 }
922 pager->is_mapped = FALSE;
923 apple_protect_pager_deallocate_internal(pager, TRUE);
924 /* caution: deallocate_internal() released the lock ! */
925 } else {
926 lck_mtx_unlock(&apple_protect_pager_lock);
927 }
928
929 return KERN_SUCCESS;
930 }
931
932
933 /*
934 *
935 */
936 apple_protect_pager_t
937 apple_protect_pager_lookup(
938 memory_object_t mem_obj)
939 {
940 apple_protect_pager_t pager;
941
942 pager = (apple_protect_pager_t) mem_obj;
943 assert(pager->pager_ops == &apple_protect_pager_ops);
944 assert(pager->ref_count > 0);
945 return pager;
946 }
947
948 apple_protect_pager_t
949 apple_protect_pager_create(
950 vm_object_t backing_object,
951 struct pager_crypt_info *crypt_info)
952 {
953 apple_protect_pager_t pager, pager2;
954 memory_object_control_t control;
955 kern_return_t kr;
956
957 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
958 if (pager == APPLE_PROTECT_PAGER_NULL) {
959 return APPLE_PROTECT_PAGER_NULL;
960 }
961
962 /*
963 * The vm_map call takes both named entry ports and raw memory
964 * objects in the same parameter. We need to make sure that
965 * vm_map does not see this object as a named entry port. So,
966 * we reserve the first word in the object for a fake ip_kotype
967 * setting - that will tell vm_map to use it as a memory object.
968 */
969 pager->pager_ops = &apple_protect_pager_ops;
970 pager->pager_ikot = IKOT_MEMORY_OBJECT;
971 pager->is_ready = FALSE;/* not ready until it has a "name" */
972 pager->ref_count = 2; /* existence + setup reference */
973 pager->is_mapped = FALSE;
974 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
975 pager->backing_object = backing_object;
976 pager->crypt = *crypt_info;
977
978 vm_object_reference(backing_object);
979
980 lck_mtx_lock(&apple_protect_pager_lock);
981 /* see if anyone raced us to create a pager for the same object */
982 queue_iterate(&apple_protect_pager_queue,
983 pager2,
984 apple_protect_pager_t,
985 pager_queue) {
986 if (pager2->backing_object == backing_object) {
987 break;
988 }
989 }
990 if (! queue_end(&apple_protect_pager_queue,
991 (queue_entry_t) pager2)) {
992 /* while we hold the lock, transfer our setup ref to winner */
993 pager2->ref_count++;
994 /* we lost the race, down with the loser... */
995 lck_mtx_unlock(&apple_protect_pager_lock);
996 vm_object_deallocate(pager->backing_object);
997 pager->backing_object = VM_OBJECT_NULL;
998 kfree(pager, sizeof (*pager));
999 /* ... and go with the winner */
1000 pager = pager2;
1001 /* let the winner make sure the pager gets ready */
1002 return pager;
1003 }
1004
1005 /* enter new pager at the head of our list of pagers */
1006 queue_enter_first(&apple_protect_pager_queue,
1007 pager,
1008 apple_protect_pager_t,
1009 pager_queue);
1010 apple_protect_pager_count++;
1011 if (apple_protect_pager_count > apple_protect_pager_count_max) {
1012 apple_protect_pager_count_max = apple_protect_pager_count;
1013 }
1014 lck_mtx_unlock(&apple_protect_pager_lock);
1015
1016 kr = memory_object_create_named((memory_object_t) pager,
1017 0,
1018 &control);
1019 assert(kr == KERN_SUCCESS);
1020
1021 lck_mtx_lock(&apple_protect_pager_lock);
1022 /* the new pager is now ready to be used */
1023 pager->is_ready = TRUE;
1024 lck_mtx_unlock(&apple_protect_pager_lock);
1025
1026 /* wakeup anyone waiting for this pager to be ready */
1027 thread_wakeup(&pager->is_ready);
1028
1029 return pager;
1030 }
1031
1032 /*
1033 * apple_protect_pager_setup()
1034 *
1035 * Provide the caller with a memory object backed by the provided
1036 * "backing_object" VM object. If such a memory object already exists,
1037 * re-use it, otherwise create a new memory object.
1038 */
1039 memory_object_t
1040 apple_protect_pager_setup(
1041 vm_object_t backing_object,
1042 struct pager_crypt_info *crypt_info)
1043 {
1044 apple_protect_pager_t pager;
1045
1046 lck_mtx_lock(&apple_protect_pager_lock);
1047
1048 queue_iterate(&apple_protect_pager_queue,
1049 pager,
1050 apple_protect_pager_t,
1051 pager_queue) {
1052 if (pager->backing_object == backing_object) {
1053 /* For the same object we must always use the same protection options */
1054 if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) &&
1055 (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) {
1056 lck_mtx_unlock(&apple_protect_pager_lock);
1057 return MEMORY_OBJECT_NULL;
1058 }
1059 break;
1060 }
1061 }
1062 if (queue_end(&apple_protect_pager_queue,
1063 (queue_entry_t) pager)) {
1064 /* no existing pager for this backing object */
1065 pager = APPLE_PROTECT_PAGER_NULL;
1066 } else {
1067 /* make sure pager doesn't disappear */
1068 pager->ref_count++;
1069 }
1070
1071 lck_mtx_unlock(&apple_protect_pager_lock);
1072
1073 if (pager == APPLE_PROTECT_PAGER_NULL) {
1074 pager = apple_protect_pager_create(backing_object, crypt_info);
1075 if (pager == APPLE_PROTECT_PAGER_NULL) {
1076 return MEMORY_OBJECT_NULL;
1077 }
1078 }
1079
1080 lck_mtx_lock(&apple_protect_pager_lock);
1081 while (!pager->is_ready) {
1082 lck_mtx_sleep(&apple_protect_pager_lock,
1083 LCK_SLEEP_DEFAULT,
1084 &pager->is_ready,
1085 THREAD_UNINT);
1086 }
1087 lck_mtx_unlock(&apple_protect_pager_lock);
1088
1089 return (memory_object_t) pager;
1090 }
1091
1092 void
1093 apple_protect_pager_trim(void)
1094 {
1095 apple_protect_pager_t pager, prev_pager;
1096 queue_head_t trim_queue;
1097 int num_trim;
1098 int count_unmapped;
1099
1100 lck_mtx_lock(&apple_protect_pager_lock);
1101
1102 /*
1103 * We have too many pagers, try and trim some unused ones,
1104 * starting with the oldest pager at the end of the queue.
1105 */
1106 queue_init(&trim_queue);
1107 num_trim = 0;
1108
1109 for (pager = (apple_protect_pager_t)
1110 queue_last(&apple_protect_pager_queue);
1111 !queue_end(&apple_protect_pager_queue,
1112 (queue_entry_t) pager);
1113 pager = prev_pager) {
1114 /* get prev elt before we dequeue */
1115 prev_pager = (apple_protect_pager_t)
1116 queue_prev(&pager->pager_queue);
1117
1118 if (pager->ref_count == 2 &&
1119 pager->is_ready &&
1120 !pager->is_mapped) {
1121 /* this pager can be trimmed */
1122 num_trim++;
1123 /* remove this pager from the main list ... */
1124 apple_protect_pager_dequeue(pager);
1125 /* ... and add it to our trim queue */
1126 queue_enter_first(&trim_queue,
1127 pager,
1128 apple_protect_pager_t,
1129 pager_queue);
1130
1131 count_unmapped = (apple_protect_pager_count -
1132 apple_protect_pager_count_mapped);
1133 if (count_unmapped <= apple_protect_pager_cache_limit) {
1134 /* we have enough pagers to trim */
1135 break;
1136 }
1137 }
1138 }
1139 if (num_trim > apple_protect_pager_num_trim_max) {
1140 apple_protect_pager_num_trim_max = num_trim;
1141 }
1142 apple_protect_pager_num_trim_total += num_trim;
1143
1144 lck_mtx_unlock(&apple_protect_pager_lock);
1145
1146 /* terminate the trimmed pagers */
1147 while (!queue_empty(&trim_queue)) {
1148 queue_remove_first(&trim_queue,
1149 pager,
1150 apple_protect_pager_t,
1151 pager_queue);
1152 pager->pager_queue.next = NULL;
1153 pager->pager_queue.prev = NULL;
1154 assert(pager->ref_count == 2);
1155 /*
1156 * We can't call deallocate_internal() because the pager
1157 * has already been dequeued, but we still need to remove
1158 * a reference.
1159 */
1160 pager->ref_count--;
1161 apple_protect_pager_terminate_internal(pager);
1162 }
1163 }