]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
54e618e40004537aae4c35af51a38fc0987238b5
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <default_pager/default_pager_types.h>
53 #include <default_pager/default_pager_object_server.h>
54
55 #include <vm/vm_fault.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/memory_object.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_protos.h>
61
62
63 /*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 vm_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 vm_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 vm_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 vm_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 vm_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 vm_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
116
117 /*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121 const struct memory_object_pager_ops apple_protect_pager_ops = {
122 apple_protect_pager_reference,
123 apple_protect_pager_deallocate,
124 apple_protect_pager_init,
125 apple_protect_pager_terminate,
126 apple_protect_pager_data_request,
127 apple_protect_pager_data_return,
128 apple_protect_pager_data_initialize,
129 apple_protect_pager_data_unlock,
130 apple_protect_pager_synchronize,
131 apple_protect_pager_map,
132 apple_protect_pager_last_unmap,
133 "apple protect pager"
134 };
135
136 /*
137 * The "apple_protect_pager" describes a memory object backed by
138 * the "apple protect" EMM.
139 */
140 typedef struct apple_protect_pager {
141 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
142 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
143 queue_chain_t pager_queue; /* next & prev pagers */
144 unsigned int ref_count; /* reference count */
145 boolean_t is_ready; /* is this pager ready ? */
146 boolean_t is_mapped; /* is this mem_obj mapped ? */
147 memory_object_control_t pager_control; /* mem object control handle */
148 vm_object_t backing_object; /* VM obj w/ encrypted data */
149 struct pager_crypt_info crypt;
150 } *apple_protect_pager_t;
151 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
152
153 /*
154 * List of memory objects managed by this EMM.
155 * The list is protected by the "apple_protect_pager_lock" lock.
156 */
157 int apple_protect_pager_count = 0; /* number of pagers */
158 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
159 queue_head_t apple_protect_pager_queue;
160 decl_mutex_data(,apple_protect_pager_lock)
161
162 /*
163 * Maximum number of unmapped pagers we're willing to keep around.
164 */
165 int apple_protect_pager_cache_limit = 10;
166
167 /*
168 * Statistics & counters.
169 */
170 int apple_protect_pager_count_max = 0;
171 int apple_protect_pager_count_unmapped_max = 0;
172 int apple_protect_pager_num_trim_max = 0;
173 int apple_protect_pager_num_trim_total = 0;
174
175 /* internal prototypes */
176 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object,
177 struct pager_crypt_info *crypt_info);
178 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
179 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
180 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
181 boolean_t locked);
182 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
183 void apple_protect_pager_trim(void);
184
185
186 #if DEBUG
187 int apple_protect_pagerdebug = 0;
188 #define PAGER_ALL 0xffffffff
189 #define PAGER_INIT 0x00000001
190 #define PAGER_PAGEIN 0x00000002
191
192 #define PAGER_DEBUG(LEVEL, A) \
193 MACRO_BEGIN \
194 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
195 printf A; \
196 } \
197 MACRO_END
198 #else
199 #define PAGER_DEBUG(LEVEL, A)
200 #endif
201
202
203 void
204 apple_protect_pager_bootstrap(void)
205 {
206 mutex_init(&apple_protect_pager_lock, 0);
207 queue_init(&apple_protect_pager_queue);
208 }
209
210 /*
211 * apple_protect_pager_init()
212 *
213 * Initialize the memory object and makes it ready to be used and mapped.
214 */
215 kern_return_t
216 apple_protect_pager_init(
217 memory_object_t mem_obj,
218 memory_object_control_t control,
219 #if !DEBUG
220 __unused
221 #endif
222 vm_size_t pg_size)
223 {
224 apple_protect_pager_t pager;
225 kern_return_t kr;
226 memory_object_attr_info_data_t attributes;
227
228 PAGER_DEBUG(PAGER_ALL,
229 ("apple_protect_pager_init: %p, %p, %x\n",
230 mem_obj, control, pg_size));
231
232 if (control == MEMORY_OBJECT_CONTROL_NULL)
233 return KERN_INVALID_ARGUMENT;
234
235 pager = apple_protect_pager_lookup(mem_obj);
236
237 memory_object_control_reference(control);
238
239 pager->pager_control = control;
240
241 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
242 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
243 attributes.cluster_size = (1 << (PAGE_SHIFT));
244 attributes.may_cache_object = FALSE;
245 attributes.temporary = TRUE;
246
247 kr = memory_object_change_attributes(
248 control,
249 MEMORY_OBJECT_ATTRIBUTE_INFO,
250 (memory_object_info_t) &attributes,
251 MEMORY_OBJECT_ATTR_INFO_COUNT);
252 if (kr != KERN_SUCCESS)
253 panic("apple_protect_pager_init: "
254 "memory_object_change_attributes() failed");
255
256 return KERN_SUCCESS;
257 }
258
259 /*
260 * apple_protect_data_return()
261 *
262 * Handles page-out requests from VM. This should never happen since
263 * the pages provided by this EMM are not supposed to be dirty or dirtied
264 * and VM should simply discard the contents and reclaim the pages if it
265 * needs to.
266 */
267 kern_return_t
268 apple_protect_pager_data_return(
269 __unused memory_object_t mem_obj,
270 __unused memory_object_offset_t offset,
271 __unused vm_size_t data_cnt,
272 __unused memory_object_offset_t *resid_offset,
273 __unused int *io_error,
274 __unused boolean_t dirty,
275 __unused boolean_t kernel_copy,
276 __unused int upl_flags)
277 {
278 panic("apple_protect_pager_data_return: should never get called");
279 return KERN_FAILURE;
280 }
281
282 kern_return_t
283 apple_protect_pager_data_initialize(
284 __unused memory_object_t mem_obj,
285 __unused memory_object_offset_t offset,
286 __unused vm_size_t data_cnt)
287 {
288 panic("apple_protect_pager_data_initialize: should never get called");
289 return KERN_FAILURE;
290 }
291
292 kern_return_t
293 apple_protect_pager_data_unlock(
294 __unused memory_object_t mem_obj,
295 __unused memory_object_offset_t offset,
296 __unused vm_size_t size,
297 __unused vm_prot_t desired_access)
298 {
299 return KERN_FAILURE;
300 }
301
302 /*
303 * apple_protect_pager_data_request()
304 *
305 * Handles page-in requests from VM.
306 */
307 kern_return_t
308 apple_protect_pager_data_request(
309 memory_object_t mem_obj,
310 memory_object_offset_t offset,
311 vm_size_t length,
312 #if !DEBUG
313 __unused
314 #endif
315 vm_prot_t protection_required,
316 memory_object_fault_info_t mo_fault_info)
317 {
318 apple_protect_pager_t pager;
319 memory_object_control_t mo_control;
320 upl_t upl;
321 int upl_flags;
322 upl_size_t upl_size;
323 upl_page_info_t *upl_pl = NULL;
324 unsigned int pl_count;
325 vm_object_t src_object, dst_object;
326 kern_return_t kr, retval;
327 vm_map_offset_t kernel_mapping;
328 vm_offset_t src_vaddr, dst_vaddr;
329 vm_offset_t cur_offset;
330 vm_map_entry_t map_entry;
331 kern_return_t error_code;
332 vm_prot_t prot;
333 vm_page_t src_page, top_page;
334 int interruptible;
335 vm_object_fault_info_t fault_info;
336
337 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
338
339 src_object = VM_OBJECT_NULL;
340 kernel_mapping = 0;
341 upl = NULL;
342 upl_pl = NULL;
343 fault_info = (vm_object_fault_info_t) mo_fault_info;
344 interruptible = fault_info->interruptible;
345
346 pager = apple_protect_pager_lookup(mem_obj);
347 assert(pager->is_ready);
348 assert(pager->ref_count > 1); /* pager is alive and mapped */
349
350 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
351
352 /*
353 * Gather in a UPL all the VM pages requested by VM.
354 */
355 mo_control = pager->pager_control;
356
357 upl_size = length;
358 upl_flags =
359 UPL_RET_ONLY_ABSENT |
360 UPL_SET_LITE |
361 UPL_NO_SYNC |
362 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
363 UPL_SET_INTERNAL;
364 pl_count = 0;
365 kr = memory_object_upl_request(mo_control,
366 offset, upl_size,
367 &upl, NULL, NULL, upl_flags);
368 if (kr != KERN_SUCCESS) {
369 retval = kr;
370 goto done;
371 }
372 dst_object = mo_control->moc_object;
373 assert(dst_object != VM_OBJECT_NULL);
374
375
376 /*
377 * Reserve 2 virtual pages in the kernel address space to map each
378 * source and destination physical pages when it's their turn to
379 * be processed.
380 */
381 vm_object_reference(kernel_object); /* ref. for mapping */
382 kr = vm_map_find_space(kernel_map,
383 &kernel_mapping,
384 2 * PAGE_SIZE_64,
385 0,
386 0,
387 &map_entry);
388 if (kr != KERN_SUCCESS) {
389 vm_object_deallocate(kernel_object);
390 retval = kr;
391 goto done;
392 }
393 map_entry->object.vm_object = kernel_object;
394 map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
395 vm_map_unlock(kernel_map);
396 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
397 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
398
399 /*
400 * We'll map the encrypted data in the kernel address space from the
401 * backing VM object (itself backed by the encrypted file via
402 * the vnode pager).
403 */
404 src_object = pager->backing_object;
405 assert(src_object != VM_OBJECT_NULL);
406 vm_object_reference(src_object); /* to keep the source object alive */
407
408 /*
409 * Fill in the contents of the pages requested by VM.
410 */
411 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
412 pl_count = length / PAGE_SIZE;
413 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
414 ppnum_t dst_pnum;
415
416 if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) {
417 /* this page is not in the UPL: skip it */
418 continue;
419 }
420
421 /*
422 * Map the source (encrypted) page in the kernel's
423 * virtual address space.
424 * We already hold a reference on the src_object.
425 */
426 retry_src_fault:
427 vm_object_lock(src_object);
428 vm_object_paging_begin(src_object);
429 error_code = 0;
430 prot = VM_PROT_READ;
431 kr = vm_fault_page(src_object,
432 offset + cur_offset,
433 VM_PROT_READ,
434 FALSE,
435 &prot,
436 &src_page,
437 &top_page,
438 NULL,
439 &error_code,
440 FALSE,
441 FALSE,
442 fault_info);
443 switch (kr) {
444 case VM_FAULT_SUCCESS:
445 break;
446 case VM_FAULT_RETRY:
447 goto retry_src_fault;
448 case VM_FAULT_MEMORY_SHORTAGE:
449 if (vm_page_wait(interruptible)) {
450 goto retry_src_fault;
451 }
452 /* fall thru */
453 case VM_FAULT_INTERRUPTED:
454 retval = MACH_SEND_INTERRUPTED;
455 goto done;
456 case VM_FAULT_MEMORY_ERROR:
457 /* the page is not there ! */
458 if (error_code) {
459 retval = error_code;
460 } else {
461 retval = KERN_MEMORY_ERROR;
462 }
463 goto done;
464 default:
465 retval = KERN_FAILURE;
466 goto done;
467 }
468 assert(src_page != VM_PAGE_NULL);
469 assert(src_page->busy);
470
471 /*
472 * Establish an explicit mapping of the source
473 * physical page.
474 */
475 pmap_enter(kernel_pmap,
476 kernel_mapping,
477 src_page->phys_page,
478 VM_PROT_READ,
479 src_object->wimg_bits & VM_WIMG_MASK,
480 TRUE);
481 /*
482 * Establish an explicit pmap mapping of the destination
483 * physical page.
484 * We can't do a regular VM mapping because the VM page
485 * is "busy".
486 */
487 dst_pnum = (addr64_t)
488 upl_phys_page(upl_pl, cur_offset / PAGE_SIZE);
489 assert(dst_pnum != 0);
490 pmap_enter(kernel_pmap,
491 kernel_mapping + PAGE_SIZE_64,
492 dst_pnum,
493 VM_PROT_READ | VM_PROT_WRITE,
494 dst_object->wimg_bits & VM_WIMG_MASK,
495 TRUE);
496
497 /*
498 * Validate the original page...
499 */
500 if (src_page->object->code_signed) {
501 vm_page_validate_cs_mapped(src_page,
502 (const void *) src_vaddr);
503 }
504 /*
505 * ... and transfer the results to the destination page.
506 */
507 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
508 src_page->cs_validated);
509 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
510 src_page->cs_tainted);
511
512 /*
513 * Decrypt the encrypted contents of the source page
514 * into the destination page.
515 */
516 pager->crypt.page_decrypt((const void *) src_vaddr,
517 (void *) dst_vaddr, offset+cur_offset,
518 pager->crypt.crypt_ops);
519
520 /*
521 * Remove the pmap mapping of the source and destination pages
522 * in the kernel.
523 */
524 pmap_remove(kernel_pmap,
525 (addr64_t) kernel_mapping,
526 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
527
528 /*
529 * Cleanup the result of vm_fault_page() of the source page.
530 */
531 PAGE_WAKEUP_DONE(src_page);
532 vm_object_paging_end(src_page->object);
533 vm_object_unlock(src_page->object);
534 if (top_page != VM_PAGE_NULL) {
535 vm_object_t top_object;
536
537 top_object = top_page->object;
538 vm_object_lock(top_object);
539 VM_PAGE_FREE(top_page);
540 vm_object_paging_end(top_object);
541 vm_object_unlock(top_object);
542 }
543 }
544
545 retval = KERN_SUCCESS;
546 done:
547 if (upl != NULL) {
548 /* clean up the UPL */
549
550 /*
551 * The pages are currently dirty because we've just been
552 * writing on them, but as far as we're concerned, they're
553 * clean since they contain their "original" contents as
554 * provided by us, the pager.
555 * Tell the UPL to mark them "clean".
556 */
557 upl_clear_dirty(upl, TRUE);
558
559 /* abort or commit the UPL */
560 if (retval != KERN_SUCCESS) {
561 upl_abort(upl, 0);
562 } else {
563 boolean_t empty;
564 upl_commit_range(upl, 0, upl->size,
565 UPL_COMMIT_CS_VALIDATED,
566 upl_pl, pl_count, &empty);
567 }
568
569 /* and deallocate the UPL */
570 upl_deallocate(upl);
571 upl = NULL;
572 }
573 if (kernel_mapping != 0) {
574 /* clean up the mapping of the source and destination pages */
575 kr = vm_map_remove(kernel_map,
576 kernel_mapping,
577 kernel_mapping + (2 * PAGE_SIZE_64),
578 VM_MAP_NO_FLAGS);
579 assert(kr == KERN_SUCCESS);
580 kernel_mapping = 0;
581 src_vaddr = 0;
582 dst_vaddr = 0;
583 }
584 if (src_object != VM_OBJECT_NULL) {
585 vm_object_deallocate(src_object);
586 }
587
588 return retval;
589 }
590
591 /*
592 * apple_protect_pager_reference()
593 *
594 * Get a reference on this memory object.
595 * For external usage only. Assumes that the initial reference count is not 0,
596 * i.e one should not "revive" a dead pager this way.
597 */
598 void
599 apple_protect_pager_reference(
600 memory_object_t mem_obj)
601 {
602 apple_protect_pager_t pager;
603
604 pager = apple_protect_pager_lookup(mem_obj);
605
606 mutex_lock(&apple_protect_pager_lock);
607 assert(pager->ref_count > 0);
608 pager->ref_count++;
609 mutex_unlock(&apple_protect_pager_lock);
610 }
611
612
613 /*
614 * apple_protect_pager_dequeue:
615 *
616 * Removes a pager from the list of pagers.
617 *
618 * The caller must hold "apple_protect_pager_lock".
619 */
620 void
621 apple_protect_pager_dequeue(
622 apple_protect_pager_t pager)
623 {
624 assert(!pager->is_mapped);
625
626 queue_remove(&apple_protect_pager_queue,
627 pager,
628 apple_protect_pager_t,
629 pager_queue);
630 pager->pager_queue.next = NULL;
631 pager->pager_queue.prev = NULL;
632
633 apple_protect_pager_count--;
634 }
635
636 /*
637 * apple_protect_pager_terminate_internal:
638 *
639 * Trigger the asynchronous termination of the memory object associated
640 * with this pager.
641 * When the memory object is terminated, there will be one more call
642 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
643 * to finish the clean up.
644 *
645 * "apple_protect_pager_lock" should not be held by the caller.
646 * We don't need the lock because the pager has already been removed from
647 * the pagers' list and is now ours exclusively.
648 */
649 void
650 apple_protect_pager_terminate_internal(
651 apple_protect_pager_t pager)
652 {
653 assert(pager->is_ready);
654 assert(!pager->is_mapped);
655
656 if (pager->backing_object != VM_OBJECT_NULL) {
657 vm_object_deallocate(pager->backing_object);
658 pager->backing_object = VM_OBJECT_NULL;
659 }
660
661 /* trigger the destruction of the memory object */
662 memory_object_destroy(pager->pager_control, 0);
663
664 /* deallocate any crypt module data */
665 if(pager->crypt.crypt_end)
666 pager->crypt.crypt_end(pager->crypt.crypt_ops);
667 }
668
669 /*
670 * apple_protect_pager_deallocate_internal()
671 *
672 * Release a reference on this pager and free it when the last
673 * reference goes away.
674 * Can be called with apple_protect_pager_lock held or not but always returns
675 * with it unlocked.
676 */
677 void
678 apple_protect_pager_deallocate_internal(
679 apple_protect_pager_t pager,
680 boolean_t locked)
681 {
682 boolean_t needs_trimming;
683 int count_unmapped;
684
685 if (! locked) {
686 mutex_lock(&apple_protect_pager_lock);
687 }
688
689 count_unmapped = (apple_protect_pager_count -
690 apple_protect_pager_count_mapped);
691 if (count_unmapped > apple_protect_pager_cache_limit) {
692 /* we have too many unmapped pagers: trim some */
693 needs_trimming = TRUE;
694 } else {
695 needs_trimming = FALSE;
696 }
697
698 /* drop a reference on this pager */
699 pager->ref_count--;
700
701 if (pager->ref_count == 1) {
702 /*
703 * Only the "named" reference is left, which means that
704 * no one is really holding on to this pager anymore.
705 * Terminate it.
706 */
707 apple_protect_pager_dequeue(pager);
708 /* the pager is all ours: no need for the lock now */
709 mutex_unlock(&apple_protect_pager_lock);
710 apple_protect_pager_terminate_internal(pager);
711 } else if (pager->ref_count == 0) {
712 /*
713 * Dropped the existence reference; the memory object has
714 * been terminated. Do some final cleanup and release the
715 * pager structure.
716 */
717 mutex_unlock(&apple_protect_pager_lock);
718 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
719 memory_object_control_deallocate(pager->pager_control);
720 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
721 }
722 kfree(pager, sizeof (*pager));
723 pager = APPLE_PROTECT_PAGER_NULL;
724 } else {
725 /* there are still plenty of references: keep going... */
726 mutex_unlock(&apple_protect_pager_lock);
727 }
728
729 if (needs_trimming) {
730 apple_protect_pager_trim();
731 }
732 /* caution: lock is not held on return... */
733 }
734
735 /*
736 * apple_protect_pager_deallocate()
737 *
738 * Release a reference on this pager and free it when the last
739 * reference goes away.
740 */
741 void
742 apple_protect_pager_deallocate(
743 memory_object_t mem_obj)
744 {
745 apple_protect_pager_t pager;
746
747 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
748 pager = apple_protect_pager_lookup(mem_obj);
749 apple_protect_pager_deallocate_internal(pager, FALSE);
750 }
751
752 /*
753 *
754 */
755 kern_return_t
756 apple_protect_pager_terminate(
757 #if !DEBUG
758 __unused
759 #endif
760 memory_object_t mem_obj)
761 {
762 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
763
764 return KERN_SUCCESS;
765 }
766
767 /*
768 *
769 */
770 kern_return_t
771 apple_protect_pager_synchronize(
772 memory_object_t mem_obj,
773 memory_object_offset_t offset,
774 vm_size_t length,
775 __unused vm_sync_t sync_flags)
776 {
777 apple_protect_pager_t pager;
778
779 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
780
781 pager = apple_protect_pager_lookup(mem_obj);
782
783 memory_object_synchronize_completed(pager->pager_control,
784 offset, length);
785
786 return KERN_SUCCESS;
787 }
788
789 /*
790 * apple_protect_pager_map()
791 *
792 * This allows VM to let us, the EMM, know that this memory object
793 * is currently mapped one or more times. This is called by VM only the first
794 * time the memory object gets mapped and we take one extra reference on the
795 * memory object to account for all its mappings.
796 */
797 kern_return_t
798 apple_protect_pager_map(
799 memory_object_t mem_obj,
800 __unused vm_prot_t prot)
801 {
802 apple_protect_pager_t pager;
803
804 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
805
806 pager = apple_protect_pager_lookup(mem_obj);
807
808 mutex_lock(&apple_protect_pager_lock);
809 assert(pager->is_ready);
810 assert(pager->ref_count > 0); /* pager is alive */
811 if (pager->is_mapped == FALSE) {
812 /*
813 * First mapping of this pager: take an extra reference
814 * that will remain until all the mappings of this pager
815 * are removed.
816 */
817 pager->is_mapped = TRUE;
818 pager->ref_count++;
819 apple_protect_pager_count_mapped++;
820 }
821 mutex_unlock(&apple_protect_pager_lock);
822
823 return KERN_SUCCESS;
824 }
825
826 /*
827 * apple_protect_pager_last_unmap()
828 *
829 * This is called by VM when this memory object is no longer mapped anywhere.
830 */
831 kern_return_t
832 apple_protect_pager_last_unmap(
833 memory_object_t mem_obj)
834 {
835 apple_protect_pager_t pager;
836 int count_unmapped;
837
838 PAGER_DEBUG(PAGER_ALL,
839 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
840
841 pager = apple_protect_pager_lookup(mem_obj);
842
843 mutex_lock(&apple_protect_pager_lock);
844 if (pager->is_mapped) {
845 /*
846 * All the mappings are gone, so let go of the one extra
847 * reference that represents all the mappings of this pager.
848 */
849 apple_protect_pager_count_mapped--;
850 count_unmapped = (apple_protect_pager_count -
851 apple_protect_pager_count_mapped);
852 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
853 apple_protect_pager_count_unmapped_max = count_unmapped;
854 }
855 pager->is_mapped = FALSE;
856 apple_protect_pager_deallocate_internal(pager, TRUE);
857 /* caution: deallocate_internal() released the lock ! */
858 } else {
859 mutex_unlock(&apple_protect_pager_lock);
860 }
861
862 return KERN_SUCCESS;
863 }
864
865
866 /*
867 *
868 */
869 apple_protect_pager_t
870 apple_protect_pager_lookup(
871 memory_object_t mem_obj)
872 {
873 apple_protect_pager_t pager;
874
875 pager = (apple_protect_pager_t) mem_obj;
876 assert(pager->pager_ops == &apple_protect_pager_ops);
877 assert(pager->ref_count > 0);
878 return pager;
879 }
880
881 apple_protect_pager_t
882 apple_protect_pager_create(
883 vm_object_t backing_object,
884 struct pager_crypt_info *crypt_info)
885 {
886 apple_protect_pager_t pager, pager2;
887 memory_object_control_t control;
888 kern_return_t kr;
889
890 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
891 if (pager == APPLE_PROTECT_PAGER_NULL) {
892 return APPLE_PROTECT_PAGER_NULL;
893 }
894
895 /*
896 * The vm_map call takes both named entry ports and raw memory
897 * objects in the same parameter. We need to make sure that
898 * vm_map does not see this object as a named entry port. So,
899 * we reserve the second word in the object for a fake ip_kotype
900 * setting - that will tell vm_map to use it as a memory object.
901 */
902 pager->pager_ops = &apple_protect_pager_ops;
903 pager->pager_ikot = IKOT_MEMORY_OBJECT;
904 pager->is_ready = FALSE;/* not ready until it has a "name" */
905 pager->ref_count = 2; /* existence + setup reference */
906 pager->is_mapped = FALSE;
907 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
908 pager->backing_object = backing_object;
909 pager->crypt = *crypt_info;
910
911 vm_object_reference(backing_object);
912
913 mutex_lock(&apple_protect_pager_lock);
914 /* see if anyone raced us to create a pager for the same object */
915 queue_iterate(&apple_protect_pager_queue,
916 pager2,
917 apple_protect_pager_t,
918 pager_queue) {
919 if (pager2->backing_object == backing_object) {
920 break;
921 }
922 }
923 if (! queue_end(&apple_protect_pager_queue,
924 (queue_entry_t) pager2)) {
925 /* while we hold the lock, transfer our setup ref to winner */
926 pager2->ref_count++;
927 /* we lost the race, down with the loser... */
928 mutex_unlock(&apple_protect_pager_lock);
929 vm_object_deallocate(pager->backing_object);
930 pager->backing_object = VM_OBJECT_NULL;
931 kfree(pager, sizeof (*pager));
932 /* ... and go with the winner */
933 pager = pager2;
934 /* let the winner make sure the pager gets ready */
935 return pager;
936 }
937
938 /* enter new pager at the head of our list of pagers */
939 queue_enter_first(&apple_protect_pager_queue,
940 pager,
941 apple_protect_pager_t,
942 pager_queue);
943 apple_protect_pager_count++;
944 if (apple_protect_pager_count > apple_protect_pager_count_max) {
945 apple_protect_pager_count_max = apple_protect_pager_count;
946 }
947 mutex_unlock(&apple_protect_pager_lock);
948
949 kr = memory_object_create_named((memory_object_t) pager,
950 0,
951 &control);
952 assert(kr == KERN_SUCCESS);
953
954 mutex_lock(&apple_protect_pager_lock);
955 /* the new pager is now ready to be used */
956 pager->is_ready = TRUE;
957 mutex_unlock(&apple_protect_pager_lock);
958
959 /* wakeup anyone waiting for this pager to be ready */
960 thread_wakeup(&pager->is_ready);
961
962 return pager;
963 }
964
965 /*
966 * apple_protect_pager_setup()
967 *
968 * Provide the caller with a memory object backed by the provided
969 * "backing_object" VM object. If such a memory object already exists,
970 * re-use it, otherwise create a new memory object.
971 */
972 memory_object_t
973 apple_protect_pager_setup(
974 vm_object_t backing_object,
975 struct pager_crypt_info *crypt_info)
976 {
977 apple_protect_pager_t pager;
978
979 mutex_lock(&apple_protect_pager_lock);
980
981 queue_iterate(&apple_protect_pager_queue,
982 pager,
983 apple_protect_pager_t,
984 pager_queue) {
985 if (pager->backing_object == backing_object) {
986 /* For the same object we must always use the same protection options */
987 if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) &&
988 (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) {
989 mutex_unlock(&apple_protect_pager_lock);
990 return MEMORY_OBJECT_NULL;
991 }
992 break;
993 }
994 }
995 if (queue_end(&apple_protect_pager_queue,
996 (queue_entry_t) pager)) {
997 /* no existing pager for this backing object */
998 pager = APPLE_PROTECT_PAGER_NULL;
999 } else {
1000 /* make sure pager doesn't disappear */
1001 pager->ref_count++;
1002 }
1003
1004 mutex_unlock(&apple_protect_pager_lock);
1005
1006 if (pager == APPLE_PROTECT_PAGER_NULL) {
1007 pager = apple_protect_pager_create(backing_object, crypt_info);
1008 if (pager == APPLE_PROTECT_PAGER_NULL) {
1009 return MEMORY_OBJECT_NULL;
1010 }
1011 }
1012
1013 mutex_lock(&apple_protect_pager_lock);
1014 while (!pager->is_ready) {
1015 thread_sleep_mutex(&pager->is_ready,
1016 &apple_protect_pager_lock,
1017 THREAD_UNINT);
1018 }
1019 mutex_unlock(&apple_protect_pager_lock);
1020
1021 return (memory_object_t) pager;
1022 }
1023
1024 void
1025 apple_protect_pager_trim(void)
1026 {
1027 apple_protect_pager_t pager, prev_pager;
1028 queue_head_t trim_queue;
1029 int num_trim;
1030 int count_unmapped;
1031
1032 mutex_lock(&apple_protect_pager_lock);
1033
1034 /*
1035 * We have too many pagers, try and trim some unused ones,
1036 * starting with the oldest pager at the end of the queue.
1037 */
1038 queue_init(&trim_queue);
1039 num_trim = 0;
1040
1041 for (pager = (apple_protect_pager_t)
1042 queue_last(&apple_protect_pager_queue);
1043 !queue_end(&apple_protect_pager_queue,
1044 (queue_entry_t) pager);
1045 pager = prev_pager) {
1046 /* get prev elt before we dequeue */
1047 prev_pager = (apple_protect_pager_t)
1048 queue_prev(&pager->pager_queue);
1049
1050 if (pager->ref_count == 2 &&
1051 pager->is_ready &&
1052 !pager->is_mapped) {
1053 /* this pager can be trimmed */
1054 num_trim++;
1055 /* remove this pager from the main list ... */
1056 apple_protect_pager_dequeue(pager);
1057 /* ... and add it to our trim queue */
1058 queue_enter_first(&trim_queue,
1059 pager,
1060 apple_protect_pager_t,
1061 pager_queue);
1062
1063 count_unmapped = (apple_protect_pager_count -
1064 apple_protect_pager_count_mapped);
1065 if (count_unmapped <= apple_protect_pager_cache_limit) {
1066 /* we have enough pagers to trim */
1067 break;
1068 }
1069 }
1070 }
1071 if (num_trim > apple_protect_pager_num_trim_max) {
1072 apple_protect_pager_num_trim_max = num_trim;
1073 }
1074 apple_protect_pager_num_trim_total += num_trim;
1075
1076 mutex_unlock(&apple_protect_pager_lock);
1077
1078 /* terminate the trimmed pagers */
1079 while (!queue_empty(&trim_queue)) {
1080 queue_remove_first(&trim_queue,
1081 pager,
1082 apple_protect_pager_t,
1083 pager_queue);
1084 pager->pager_queue.next = NULL;
1085 pager->pager_queue.prev = NULL;
1086 assert(pager->ref_count == 2);
1087 /*
1088 * We can't call deallocate_internal() because the pager
1089 * has already been dequeued, but we still need to remove
1090 * a reference.
1091 */
1092 pager->ref_count--;
1093 apple_protect_pager_terminate_internal(pager);
1094 }
1095 }