]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <default_pager/default_pager_types.h>
53 #include <default_pager/default_pager_object_server.h>
54
55 #include <vm/vm_fault.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/memory_object.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_protos.h>
61
62
63 /*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 vm_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 vm_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 vm_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 vm_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 vm_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 vm_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
116
117 /*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121 const struct memory_object_pager_ops apple_protect_pager_ops = {
122 apple_protect_pager_reference,
123 apple_protect_pager_deallocate,
124 apple_protect_pager_init,
125 apple_protect_pager_terminate,
126 apple_protect_pager_data_request,
127 apple_protect_pager_data_return,
128 apple_protect_pager_data_initialize,
129 apple_protect_pager_data_unlock,
130 apple_protect_pager_synchronize,
131 apple_protect_pager_map,
132 apple_protect_pager_last_unmap,
133 "apple protect pager"
134 };
135
136 /*
137 * The "apple_protect_pager" describes a memory object backed by
138 * the "apple protect" EMM.
139 */
140 typedef struct apple_protect_pager {
141 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
142 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
143 queue_chain_t pager_queue; /* next & prev pagers */
144 unsigned int ref_count; /* reference count */
145 boolean_t is_ready; /* is this pager ready ? */
146 boolean_t is_mapped; /* is this mem_obj mapped ? */
147 memory_object_control_t pager_control; /* mem object control handle */
148 vm_object_t backing_object; /* VM obj w/ encrypted data */
149 struct pager_crypt_info crypt;
150 } *apple_protect_pager_t;
151 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
152
153 /*
154 * List of memory objects managed by this EMM.
155 * The list is protected by the "apple_protect_pager_lock" lock.
156 */
157 int apple_protect_pager_count = 0; /* number of pagers */
158 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
159 queue_head_t apple_protect_pager_queue;
160 decl_mutex_data(,apple_protect_pager_lock)
161
162 /*
163 * Maximum number of unmapped pagers we're willing to keep around.
164 */
165 int apple_protect_pager_cache_limit = 10;
166
167 /*
168 * Statistics & counters.
169 */
170 int apple_protect_pager_count_max = 0;
171 int apple_protect_pager_count_unmapped_max = 0;
172 int apple_protect_pager_num_trim_max = 0;
173 int apple_protect_pager_num_trim_total = 0;
174
175 /* internal prototypes */
176 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object,
177 struct pager_crypt_info *crypt_info);
178 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
179 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
180 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
181 boolean_t locked);
182 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
183 void apple_protect_pager_trim(void);
184
185
186 #if DEBUG
187 int apple_protect_pagerdebug = 0;
188 #define PAGER_ALL 0xffffffff
189 #define PAGER_INIT 0x00000001
190 #define PAGER_PAGEIN 0x00000002
191
192 #define PAGER_DEBUG(LEVEL, A) \
193 MACRO_BEGIN \
194 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
195 printf A; \
196 } \
197 MACRO_END
198 #else
199 #define PAGER_DEBUG(LEVEL, A)
200 #endif
201
202
203 void
204 apple_protect_pager_bootstrap(void)
205 {
206 mutex_init(&apple_protect_pager_lock, 0);
207 queue_init(&apple_protect_pager_queue);
208 }
209
210 /*
211 * apple_protect_pager_init()
212 *
213 * Initialize the memory object and makes it ready to be used and mapped.
214 */
215 kern_return_t
216 apple_protect_pager_init(
217 memory_object_t mem_obj,
218 memory_object_control_t control,
219 #if !DEBUG
220 __unused
221 #endif
222 vm_size_t pg_size)
223 {
224 apple_protect_pager_t pager;
225 kern_return_t kr;
226 memory_object_attr_info_data_t attributes;
227
228 PAGER_DEBUG(PAGER_ALL,
229 ("apple_protect_pager_init: %p, %p, %x\n",
230 mem_obj, control, pg_size));
231
232 if (control == MEMORY_OBJECT_CONTROL_NULL)
233 return KERN_INVALID_ARGUMENT;
234
235 pager = apple_protect_pager_lookup(mem_obj);
236
237 memory_object_control_reference(control);
238
239 pager->pager_control = control;
240
241 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
242 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
243 attributes.cluster_size = (1 << (PAGE_SHIFT));
244 attributes.may_cache_object = FALSE;
245 attributes.temporary = TRUE;
246
247 kr = memory_object_change_attributes(
248 control,
249 MEMORY_OBJECT_ATTRIBUTE_INFO,
250 (memory_object_info_t) &attributes,
251 MEMORY_OBJECT_ATTR_INFO_COUNT);
252 if (kr != KERN_SUCCESS)
253 panic("apple_protect_pager_init: "
254 "memory_object_change_attributes() failed");
255
256 return KERN_SUCCESS;
257 }
258
259 /*
260 * apple_protect_data_return()
261 *
262 * Handles page-out requests from VM. This should never happen since
263 * the pages provided by this EMM are not supposed to be dirty or dirtied
264 * and VM should simply discard the contents and reclaim the pages if it
265 * needs to.
266 */
267 kern_return_t
268 apple_protect_pager_data_return(
269 __unused memory_object_t mem_obj,
270 __unused memory_object_offset_t offset,
271 __unused vm_size_t data_cnt,
272 __unused memory_object_offset_t *resid_offset,
273 __unused int *io_error,
274 __unused boolean_t dirty,
275 __unused boolean_t kernel_copy,
276 __unused int upl_flags)
277 {
278 panic("apple_protect_pager_data_return: should never get called");
279 return KERN_FAILURE;
280 }
281
282 kern_return_t
283 apple_protect_pager_data_initialize(
284 __unused memory_object_t mem_obj,
285 __unused memory_object_offset_t offset,
286 __unused vm_size_t data_cnt)
287 {
288 panic("apple_protect_pager_data_initialize: should never get called");
289 return KERN_FAILURE;
290 }
291
292 kern_return_t
293 apple_protect_pager_data_unlock(
294 __unused memory_object_t mem_obj,
295 __unused memory_object_offset_t offset,
296 __unused vm_size_t size,
297 __unused vm_prot_t desired_access)
298 {
299 return KERN_FAILURE;
300 }
301
302 /*
303 * apple_protect_pager_data_request()
304 *
305 * Handles page-in requests from VM.
306 */
307 kern_return_t
308 apple_protect_pager_data_request(
309 memory_object_t mem_obj,
310 memory_object_offset_t offset,
311 vm_size_t length,
312 #if !DEBUG
313 __unused
314 #endif
315 vm_prot_t protection_required,
316 memory_object_fault_info_t mo_fault_info)
317 {
318 apple_protect_pager_t pager;
319 memory_object_control_t mo_control;
320 upl_t upl;
321 int upl_flags;
322 upl_size_t upl_size;
323 upl_page_info_t *upl_pl = NULL;
324 unsigned int pl_count;
325 vm_object_t src_object, dst_object;
326 kern_return_t kr, retval;
327 vm_map_offset_t kernel_mapping;
328 vm_offset_t src_vaddr, dst_vaddr;
329 vm_offset_t cur_offset;
330 vm_map_entry_t map_entry;
331 kern_return_t error_code;
332 vm_prot_t prot;
333 vm_page_t src_page, top_page;
334 int interruptible;
335 vm_object_fault_info_t fault_info;
336
337 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
338
339 src_object = VM_OBJECT_NULL;
340 kernel_mapping = 0;
341 upl = NULL;
342 upl_pl = NULL;
343 fault_info = (vm_object_fault_info_t) mo_fault_info;
344 interruptible = fault_info->interruptible;
345
346 pager = apple_protect_pager_lookup(mem_obj);
347 assert(pager->is_ready);
348 assert(pager->ref_count > 1); /* pager is alive and mapped */
349
350 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
351
352 /*
353 * Gather in a UPL all the VM pages requested by VM.
354 */
355 mo_control = pager->pager_control;
356
357 upl_size = length;
358 upl_flags =
359 UPL_RET_ONLY_ABSENT |
360 UPL_SET_LITE |
361 UPL_NO_SYNC |
362 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
363 UPL_SET_INTERNAL;
364 pl_count = 0;
365 kr = memory_object_upl_request(mo_control,
366 offset, upl_size,
367 &upl, NULL, NULL, upl_flags);
368 if (kr != KERN_SUCCESS) {
369 retval = kr;
370 goto done;
371 }
372 dst_object = mo_control->moc_object;
373 assert(dst_object != VM_OBJECT_NULL);
374
375
376 /*
377 * Reserve 2 virtual pages in the kernel address space to map each
378 * source and destination physical pages when it's their turn to
379 * be processed.
380 */
381 vm_object_reference(kernel_object); /* ref. for mapping */
382 kr = vm_map_find_space(kernel_map,
383 &kernel_mapping,
384 2 * PAGE_SIZE_64,
385 0,
386 0,
387 &map_entry);
388 if (kr != KERN_SUCCESS) {
389 vm_object_deallocate(kernel_object);
390 retval = kr;
391 goto done;
392 }
393 map_entry->object.vm_object = kernel_object;
394 map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
395 vm_map_unlock(kernel_map);
396 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
397 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
398
399 /*
400 * We'll map the encrypted data in the kernel address space from the
401 * backing VM object (itself backed by the encrypted file via
402 * the vnode pager).
403 */
404 src_object = pager->backing_object;
405 assert(src_object != VM_OBJECT_NULL);
406 vm_object_reference(src_object); /* to keep the source object alive */
407
408 /*
409 * Fill in the contents of the pages requested by VM.
410 */
411 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
412 pl_count = length / PAGE_SIZE;
413 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
414 ppnum_t dst_pnum;
415 int type_of_fault;
416
417 if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) {
418 /* this page is not in the UPL: skip it */
419 continue;
420 }
421
422 /*
423 * Map the source (encrypted) page in the kernel's
424 * virtual address space.
425 * We already hold a reference on the src_object.
426 */
427 retry_src_fault:
428 vm_object_lock(src_object);
429 vm_object_paging_begin(src_object);
430 error_code = 0;
431 prot = VM_PROT_READ;
432 kr = vm_fault_page(src_object,
433 offset + cur_offset,
434 VM_PROT_READ,
435 FALSE,
436 &prot,
437 &src_page,
438 &top_page,
439 &type_of_fault,
440 &error_code,
441 FALSE,
442 FALSE,
443 fault_info);
444 switch (kr) {
445 case VM_FAULT_SUCCESS:
446 break;
447 case VM_FAULT_RETRY:
448 goto retry_src_fault;
449 case VM_FAULT_MEMORY_SHORTAGE:
450 if (vm_page_wait(interruptible)) {
451 goto retry_src_fault;
452 }
453 /* fall thru */
454 case VM_FAULT_INTERRUPTED:
455 retval = MACH_SEND_INTERRUPTED;
456 goto done;
457 case VM_FAULT_MEMORY_ERROR:
458 /* the page is not there ! */
459 if (error_code) {
460 retval = error_code;
461 } else {
462 retval = KERN_MEMORY_ERROR;
463 }
464 goto done;
465 default:
466 retval = KERN_FAILURE;
467 goto done;
468 }
469 assert(src_page != VM_PAGE_NULL);
470 assert(src_page->busy);
471
472 /*
473 * Establish an explicit mapping of the source
474 * physical page.
475 */
476 pmap_enter(kernel_pmap,
477 kernel_mapping,
478 src_page->phys_page,
479 VM_PROT_READ,
480 src_object->wimg_bits & VM_WIMG_MASK,
481 TRUE);
482 /*
483 * Establish an explicit pmap mapping of the destination
484 * physical page.
485 * We can't do a regular VM mapping because the VM page
486 * is "busy".
487 */
488 dst_pnum = (addr64_t)
489 upl_phys_page(upl_pl, cur_offset / PAGE_SIZE);
490 assert(dst_pnum != 0);
491 pmap_enter(kernel_pmap,
492 kernel_mapping + PAGE_SIZE_64,
493 dst_pnum,
494 VM_PROT_READ | VM_PROT_WRITE,
495 dst_object->wimg_bits & VM_WIMG_MASK,
496 TRUE);
497
498 /*
499 * Validate the original page...
500 */
501 if (src_page->object->code_signed) {
502 vm_page_validate_cs_mapped(src_page,
503 (const void *) src_vaddr);
504 }
505 /*
506 * ... and transfer the results to the destination page.
507 */
508 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
509 src_page->cs_validated);
510 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
511 src_page->cs_tainted);
512
513 /*
514 * Decrypt the encrypted contents of the source page
515 * into the destination page.
516 */
517 pager->crypt.page_decrypt((const void *) src_vaddr,
518 (void *) dst_vaddr, offset+cur_offset,
519 pager->crypt.crypt_ops);
520
521 /*
522 * Remove the pmap mapping of the source and destination pages
523 * in the kernel.
524 */
525 pmap_remove(kernel_pmap,
526 (addr64_t) kernel_mapping,
527 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
528
529 /*
530 * Cleanup the result of vm_fault_page() of the source page.
531 */
532 PAGE_WAKEUP_DONE(src_page);
533 vm_object_paging_end(src_page->object);
534 vm_object_unlock(src_page->object);
535 if (top_page != VM_PAGE_NULL) {
536 vm_object_t top_object;
537
538 top_object = top_page->object;
539 vm_object_lock(top_object);
540 VM_PAGE_FREE(top_page);
541 vm_object_paging_end(top_object);
542 vm_object_unlock(top_object);
543 }
544 }
545
546 retval = KERN_SUCCESS;
547 done:
548 if (upl != NULL) {
549 /* clean up the UPL */
550
551 /*
552 * The pages are currently dirty because we've just been
553 * writing on them, but as far as we're concerned, they're
554 * clean since they contain their "original" contents as
555 * provided by us, the pager.
556 * Tell the UPL to mark them "clean".
557 */
558 upl_clear_dirty(upl, TRUE);
559
560 /* abort or commit the UPL */
561 if (retval != KERN_SUCCESS) {
562 upl_abort(upl, 0);
563 } else {
564 boolean_t empty;
565 upl_commit_range(upl, 0, upl->size,
566 UPL_COMMIT_CS_VALIDATED,
567 upl_pl, pl_count, &empty);
568 }
569
570 /* and deallocate the UPL */
571 upl_deallocate(upl);
572 upl = NULL;
573 }
574 if (kernel_mapping != 0) {
575 /* clean up the mapping of the source and destination pages */
576 kr = vm_map_remove(kernel_map,
577 kernel_mapping,
578 kernel_mapping + (2 * PAGE_SIZE_64),
579 VM_MAP_NO_FLAGS);
580 assert(kr == KERN_SUCCESS);
581 kernel_mapping = 0;
582 src_vaddr = 0;
583 dst_vaddr = 0;
584 }
585 if (src_object != VM_OBJECT_NULL) {
586 vm_object_deallocate(src_object);
587 }
588
589 return retval;
590 }
591
592 /*
593 * apple_protect_pager_reference()
594 *
595 * Get a reference on this memory object.
596 * For external usage only. Assumes that the initial reference count is not 0,
597 * i.e one should not "revive" a dead pager this way.
598 */
599 void
600 apple_protect_pager_reference(
601 memory_object_t mem_obj)
602 {
603 apple_protect_pager_t pager;
604
605 pager = apple_protect_pager_lookup(mem_obj);
606
607 mutex_lock(&apple_protect_pager_lock);
608 assert(pager->ref_count > 0);
609 pager->ref_count++;
610 mutex_unlock(&apple_protect_pager_lock);
611 }
612
613
614 /*
615 * apple_protect_pager_dequeue:
616 *
617 * Removes a pager from the list of pagers.
618 *
619 * The caller must hold "apple_protect_pager_lock".
620 */
621 void
622 apple_protect_pager_dequeue(
623 apple_protect_pager_t pager)
624 {
625 assert(!pager->is_mapped);
626
627 queue_remove(&apple_protect_pager_queue,
628 pager,
629 apple_protect_pager_t,
630 pager_queue);
631 pager->pager_queue.next = NULL;
632 pager->pager_queue.prev = NULL;
633
634 apple_protect_pager_count--;
635 }
636
637 /*
638 * apple_protect_pager_terminate_internal:
639 *
640 * Trigger the asynchronous termination of the memory object associated
641 * with this pager.
642 * When the memory object is terminated, there will be one more call
643 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
644 * to finish the clean up.
645 *
646 * "apple_protect_pager_lock" should not be held by the caller.
647 * We don't need the lock because the pager has already been removed from
648 * the pagers' list and is now ours exclusively.
649 */
650 void
651 apple_protect_pager_terminate_internal(
652 apple_protect_pager_t pager)
653 {
654 assert(pager->is_ready);
655 assert(!pager->is_mapped);
656
657 if (pager->backing_object != VM_OBJECT_NULL) {
658 vm_object_deallocate(pager->backing_object);
659 pager->backing_object = VM_OBJECT_NULL;
660 }
661
662 /* trigger the destruction of the memory object */
663 memory_object_destroy(pager->pager_control, 0);
664
665 /* deallocate any crypt module data */
666 if(pager->crypt.crypt_end)
667 pager->crypt.crypt_end(pager->crypt.crypt_ops);
668 }
669
670 /*
671 * apple_protect_pager_deallocate_internal()
672 *
673 * Release a reference on this pager and free it when the last
674 * reference goes away.
675 * Can be called with apple_protect_pager_lock held or not but always returns
676 * with it unlocked.
677 */
678 void
679 apple_protect_pager_deallocate_internal(
680 apple_protect_pager_t pager,
681 boolean_t locked)
682 {
683 boolean_t needs_trimming;
684 int count_unmapped;
685
686 if (! locked) {
687 mutex_lock(&apple_protect_pager_lock);
688 }
689
690 count_unmapped = (apple_protect_pager_count -
691 apple_protect_pager_count_mapped);
692 if (count_unmapped > apple_protect_pager_cache_limit) {
693 /* we have too many unmapped pagers: trim some */
694 needs_trimming = TRUE;
695 } else {
696 needs_trimming = FALSE;
697 }
698
699 /* drop a reference on this pager */
700 pager->ref_count--;
701
702 if (pager->ref_count == 1) {
703 /*
704 * Only the "named" reference is left, which means that
705 * no one is really holding on to this pager anymore.
706 * Terminate it.
707 */
708 apple_protect_pager_dequeue(pager);
709 /* the pager is all ours: no need for the lock now */
710 mutex_unlock(&apple_protect_pager_lock);
711 apple_protect_pager_terminate_internal(pager);
712 } else if (pager->ref_count == 0) {
713 /*
714 * Dropped the existence reference; the memory object has
715 * been terminated. Do some final cleanup and release the
716 * pager structure.
717 */
718 mutex_unlock(&apple_protect_pager_lock);
719 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
720 memory_object_control_deallocate(pager->pager_control);
721 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
722 }
723 kfree(pager, sizeof (*pager));
724 pager = APPLE_PROTECT_PAGER_NULL;
725 } else {
726 /* there are still plenty of references: keep going... */
727 mutex_unlock(&apple_protect_pager_lock);
728 }
729
730 if (needs_trimming) {
731 apple_protect_pager_trim();
732 }
733 /* caution: lock is not held on return... */
734 }
735
736 /*
737 * apple_protect_pager_deallocate()
738 *
739 * Release a reference on this pager and free it when the last
740 * reference goes away.
741 */
742 void
743 apple_protect_pager_deallocate(
744 memory_object_t mem_obj)
745 {
746 apple_protect_pager_t pager;
747
748 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
749 pager = apple_protect_pager_lookup(mem_obj);
750 apple_protect_pager_deallocate_internal(pager, FALSE);
751 }
752
753 /*
754 *
755 */
756 kern_return_t
757 apple_protect_pager_terminate(
758 #if !DEBUG
759 __unused
760 #endif
761 memory_object_t mem_obj)
762 {
763 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
764
765 return KERN_SUCCESS;
766 }
767
768 /*
769 *
770 */
771 kern_return_t
772 apple_protect_pager_synchronize(
773 memory_object_t mem_obj,
774 memory_object_offset_t offset,
775 vm_size_t length,
776 __unused vm_sync_t sync_flags)
777 {
778 apple_protect_pager_t pager;
779
780 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
781
782 pager = apple_protect_pager_lookup(mem_obj);
783
784 memory_object_synchronize_completed(pager->pager_control,
785 offset, length);
786
787 return KERN_SUCCESS;
788 }
789
790 /*
791 * apple_protect_pager_map()
792 *
793 * This allows VM to let us, the EMM, know that this memory object
794 * is currently mapped one or more times. This is called by VM only the first
795 * time the memory object gets mapped and we take one extra reference on the
796 * memory object to account for all its mappings.
797 */
798 kern_return_t
799 apple_protect_pager_map(
800 memory_object_t mem_obj,
801 __unused vm_prot_t prot)
802 {
803 apple_protect_pager_t pager;
804
805 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
806
807 pager = apple_protect_pager_lookup(mem_obj);
808
809 mutex_lock(&apple_protect_pager_lock);
810 assert(pager->is_ready);
811 assert(pager->ref_count > 0); /* pager is alive */
812 if (pager->is_mapped == FALSE) {
813 /*
814 * First mapping of this pager: take an extra reference
815 * that will remain until all the mappings of this pager
816 * are removed.
817 */
818 pager->is_mapped = TRUE;
819 pager->ref_count++;
820 apple_protect_pager_count_mapped++;
821 }
822 mutex_unlock(&apple_protect_pager_lock);
823
824 return KERN_SUCCESS;
825 }
826
827 /*
828 * apple_protect_pager_last_unmap()
829 *
830 * This is called by VM when this memory object is no longer mapped anywhere.
831 */
832 kern_return_t
833 apple_protect_pager_last_unmap(
834 memory_object_t mem_obj)
835 {
836 apple_protect_pager_t pager;
837 int count_unmapped;
838
839 PAGER_DEBUG(PAGER_ALL,
840 ("apple_protect_pager_last_unmap: %p\n", mem_obj));
841
842 pager = apple_protect_pager_lookup(mem_obj);
843
844 mutex_lock(&apple_protect_pager_lock);
845 if (pager->is_mapped) {
846 /*
847 * All the mappings are gone, so let go of the one extra
848 * reference that represents all the mappings of this pager.
849 */
850 apple_protect_pager_count_mapped--;
851 count_unmapped = (apple_protect_pager_count -
852 apple_protect_pager_count_mapped);
853 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
854 apple_protect_pager_count_unmapped_max = count_unmapped;
855 }
856 pager->is_mapped = FALSE;
857 apple_protect_pager_deallocate_internal(pager, TRUE);
858 /* caution: deallocate_internal() released the lock ! */
859 } else {
860 mutex_unlock(&apple_protect_pager_lock);
861 }
862
863 return KERN_SUCCESS;
864 }
865
866
867 /*
868 *
869 */
870 apple_protect_pager_t
871 apple_protect_pager_lookup(
872 memory_object_t mem_obj)
873 {
874 apple_protect_pager_t pager;
875
876 pager = (apple_protect_pager_t) mem_obj;
877 assert(pager->pager_ops == &apple_protect_pager_ops);
878 assert(pager->ref_count > 0);
879 return pager;
880 }
881
882 apple_protect_pager_t
883 apple_protect_pager_create(
884 vm_object_t backing_object,
885 struct pager_crypt_info *crypt_info)
886 {
887 apple_protect_pager_t pager, pager2;
888 memory_object_control_t control;
889 kern_return_t kr;
890
891 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
892 if (pager == APPLE_PROTECT_PAGER_NULL) {
893 return APPLE_PROTECT_PAGER_NULL;
894 }
895
896 /*
897 * The vm_map call takes both named entry ports and raw memory
898 * objects in the same parameter. We need to make sure that
899 * vm_map does not see this object as a named entry port. So,
900 * we reserve the second word in the object for a fake ip_kotype
901 * setting - that will tell vm_map to use it as a memory object.
902 */
903 pager->pager_ops = &apple_protect_pager_ops;
904 pager->pager_ikot = IKOT_MEMORY_OBJECT;
905 pager->is_ready = FALSE;/* not ready until it has a "name" */
906 pager->ref_count = 2; /* existence + setup reference */
907 pager->is_mapped = FALSE;
908 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
909 pager->backing_object = backing_object;
910 pager->crypt = *crypt_info;
911
912 vm_object_reference(backing_object);
913
914 mutex_lock(&apple_protect_pager_lock);
915 /* see if anyone raced us to create a pager for the same object */
916 queue_iterate(&apple_protect_pager_queue,
917 pager2,
918 apple_protect_pager_t,
919 pager_queue) {
920 if (pager2->backing_object == backing_object) {
921 break;
922 }
923 }
924 if (! queue_end(&apple_protect_pager_queue,
925 (queue_entry_t) pager2)) {
926 /* while we hold the lock, transfer our setup ref to winner */
927 pager2->ref_count++;
928 /* we lost the race, down with the loser... */
929 mutex_unlock(&apple_protect_pager_lock);
930 vm_object_deallocate(pager->backing_object);
931 pager->backing_object = VM_OBJECT_NULL;
932 kfree(pager, sizeof (*pager));
933 /* ... and go with the winner */
934 pager = pager2;
935 /* let the winner make sure the pager gets ready */
936 return pager;
937 }
938
939 /* enter new pager at the head of our list of pagers */
940 queue_enter_first(&apple_protect_pager_queue,
941 pager,
942 apple_protect_pager_t,
943 pager_queue);
944 apple_protect_pager_count++;
945 if (apple_protect_pager_count > apple_protect_pager_count_max) {
946 apple_protect_pager_count_max = apple_protect_pager_count;
947 }
948 mutex_unlock(&apple_protect_pager_lock);
949
950 kr = memory_object_create_named((memory_object_t) pager,
951 0,
952 &control);
953 assert(kr == KERN_SUCCESS);
954
955 mutex_lock(&apple_protect_pager_lock);
956 /* the new pager is now ready to be used */
957 pager->is_ready = TRUE;
958 mutex_unlock(&apple_protect_pager_lock);
959
960 /* wakeup anyone waiting for this pager to be ready */
961 thread_wakeup(&pager->is_ready);
962
963 return pager;
964 }
965
966 /*
967 * apple_protect_pager_setup()
968 *
969 * Provide the caller with a memory object backed by the provided
970 * "backing_object" VM object. If such a memory object already exists,
971 * re-use it, otherwise create a new memory object.
972 */
973 memory_object_t
974 apple_protect_pager_setup(
975 vm_object_t backing_object,
976 struct pager_crypt_info *crypt_info)
977 {
978 apple_protect_pager_t pager;
979
980 mutex_lock(&apple_protect_pager_lock);
981
982 queue_iterate(&apple_protect_pager_queue,
983 pager,
984 apple_protect_pager_t,
985 pager_queue) {
986 if (pager->backing_object == backing_object) {
987 /* For the same object we must always use the same protection options */
988 if (!((pager->crypt.page_decrypt == crypt_info->page_decrypt) &&
989 (pager->crypt.crypt_ops == crypt_info->crypt_ops) )) {
990 mutex_unlock(&apple_protect_pager_lock);
991 return MEMORY_OBJECT_NULL;
992 }
993 break;
994 }
995 }
996 if (queue_end(&apple_protect_pager_queue,
997 (queue_entry_t) pager)) {
998 /* no existing pager for this backing object */
999 pager = APPLE_PROTECT_PAGER_NULL;
1000 } else {
1001 /* make sure pager doesn't disappear */
1002 pager->ref_count++;
1003 }
1004
1005 mutex_unlock(&apple_protect_pager_lock);
1006
1007 if (pager == APPLE_PROTECT_PAGER_NULL) {
1008 pager = apple_protect_pager_create(backing_object, crypt_info);
1009 if (pager == APPLE_PROTECT_PAGER_NULL) {
1010 return MEMORY_OBJECT_NULL;
1011 }
1012 }
1013
1014 mutex_lock(&apple_protect_pager_lock);
1015 while (!pager->is_ready) {
1016 thread_sleep_mutex(&pager->is_ready,
1017 &apple_protect_pager_lock,
1018 THREAD_UNINT);
1019 }
1020 mutex_unlock(&apple_protect_pager_lock);
1021
1022 return (memory_object_t) pager;
1023 }
1024
1025 void
1026 apple_protect_pager_trim(void)
1027 {
1028 apple_protect_pager_t pager, prev_pager;
1029 queue_head_t trim_queue;
1030 int num_trim;
1031 int count_unmapped;
1032
1033 mutex_lock(&apple_protect_pager_lock);
1034
1035 /*
1036 * We have too many pagers, try and trim some unused ones,
1037 * starting with the oldest pager at the end of the queue.
1038 */
1039 queue_init(&trim_queue);
1040 num_trim = 0;
1041
1042 for (pager = (apple_protect_pager_t)
1043 queue_last(&apple_protect_pager_queue);
1044 !queue_end(&apple_protect_pager_queue,
1045 (queue_entry_t) pager);
1046 pager = prev_pager) {
1047 /* get prev elt before we dequeue */
1048 prev_pager = (apple_protect_pager_t)
1049 queue_prev(&pager->pager_queue);
1050
1051 if (pager->ref_count == 2 &&
1052 pager->is_ready &&
1053 !pager->is_mapped) {
1054 /* this pager can be trimmed */
1055 num_trim++;
1056 /* remove this pager from the main list ... */
1057 apple_protect_pager_dequeue(pager);
1058 /* ... and add it to our trim queue */
1059 queue_enter_first(&trim_queue,
1060 pager,
1061 apple_protect_pager_t,
1062 pager_queue);
1063
1064 count_unmapped = (apple_protect_pager_count -
1065 apple_protect_pager_count_mapped);
1066 if (count_unmapped <= apple_protect_pager_cache_limit) {
1067 /* we have enough pagers to trim */
1068 break;
1069 }
1070 }
1071 }
1072 if (num_trim > apple_protect_pager_num_trim_max) {
1073 apple_protect_pager_num_trim_max = num_trim;
1074 }
1075 apple_protect_pager_num_trim_total += num_trim;
1076
1077 mutex_unlock(&apple_protect_pager_lock);
1078
1079 /* terminate the trimmed pagers */
1080 while (!queue_empty(&trim_queue)) {
1081 queue_remove_first(&trim_queue,
1082 pager,
1083 apple_protect_pager_t,
1084 pager_queue);
1085 pager->pager_queue.next = NULL;
1086 pager->pager_queue.prev = NULL;
1087 assert(pager->ref_count == 2);
1088 /*
1089 * We can't call deallocate_internal() because the pager
1090 * has already been dequeued, but we still need to remove
1091 * a reference.
1092 */
1093 pager->ref_count--;
1094 apple_protect_pager_terminate_internal(pager);
1095 }
1096 }