]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
7e9797bf0942f1afc4e78638ee5a157f9fd3695c
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/errno.h>
24
25 #include <mach/mach_types.h>
26 #include <mach/mach_traps.h>
27 #include <mach/host_priv.h>
28 #include <mach/kern_return.h>
29 #include <mach/memory_object_control.h>
30 #include <mach/memory_object_types.h>
31 #include <mach/port.h>
32 #include <mach/policy.h>
33 #include <mach/upl.h>
34 #include <mach/thread_act.h>
35 #include <mach/mach_vm.h>
36
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/page_decrypt.h>
40 #include <kern/queue.h>
41 #include <kern/thread.h>
42
43 #include <ipc/ipc_port.h>
44 #include <ipc/ipc_space.h>
45
46 #include <default_pager/default_pager_types.h>
47 #include <default_pager/default_pager_object_server.h>
48
49 #include <vm/vm_map.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/memory_object.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_protos.h>
54
55
56 /*
57 * APPLE PROTECT MEMORY PAGER
58 *
59 * This external memory manager (EMM) handles memory from the encrypted
60 * sections of some executables protected by the DSMOS kernel extension.
61 *
62 * It mostly handles page-in requests (from memory_object_data_request()) by
63 * getting the encrypted data from its backing VM object, itself backed by
64 * the encrypted file, decrypting it and providing it to VM.
65 *
66 * The decrypted pages will never be dirtied, so the memory manager doesn't
67 * need to handle page-out requests (from memory_object_data_return()). The
68 * pages need to be mapped copy-on-write, so that the originals stay clean.
69 *
70 * We don't expect to have to handle a large number of apple-protected
71 * binaries, so the data structures are very simple (simple linked list)
72 * for now.
73 */
74
75 /* forward declarations */
76 void apple_protect_pager_reference(memory_object_t mem_obj);
77 void apple_protect_pager_deallocate(memory_object_t mem_obj);
78 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
79 memory_object_control_t control,
80 vm_size_t pg_size);
81 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
82 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
83 memory_object_offset_t offset,
84 vm_size_t length,
85 vm_prot_t protection_required);
86 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
87 memory_object_offset_t offset,
88 vm_size_t data_cnt,
89 memory_object_offset_t *resid_offset,
90 int *io_error,
91 boolean_t dirty,
92 boolean_t kernel_copy,
93 int upl_flags);
94 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 vm_size_t data_cnt);
97 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
98 memory_object_offset_t offset,
99 vm_size_t size,
100 vm_prot_t desired_access);
101 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 vm_size_t length,
104 vm_sync_t sync_flags);
105 kern_return_t apple_protect_pager_unmap(memory_object_t mem_obj);
106
107 /*
108 * Vector of VM operations for this EMM.
109 * These routines are invoked by VM via the memory_object_*() interfaces.
110 */
111 const struct memory_object_pager_ops apple_protect_pager_ops = {
112 apple_protect_pager_reference,
113 apple_protect_pager_deallocate,
114 apple_protect_pager_init,
115 apple_protect_pager_terminate,
116 apple_protect_pager_data_request,
117 apple_protect_pager_data_return,
118 apple_protect_pager_data_initialize,
119 apple_protect_pager_data_unlock,
120 apple_protect_pager_synchronize,
121 apple_protect_pager_unmap,
122 "apple protect pager"
123 };
124
125 /*
126 * The "apple_protect_pager" describes a memory object backed by
127 * the "apple protect" EMM.
128 */
129 typedef struct apple_protect_pager {
130 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
131 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
132 queue_chain_t pager_queue; /* next & prev pagers */
133 unsigned int ref_count; /* reference count */
134 boolean_t is_ready; /* is this pager ready ? */
135 boolean_t is_mapped; /* is this mem_obj mapped ? */
136 memory_object_control_t pager_control; /* mem object control handle */
137 vm_object_t backing_object; /* VM obj w/ encrypted data */
138 } *apple_protect_pager_t;
139 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
140
141 /*
142 * List of memory objects managed by this EMM.
143 * The list is protected by the "apple_protect_pager_lock" lock.
144 */
145 int apple_protect_pager_count = 0; /* number of pagers */
146 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
147 queue_head_t apple_protect_pager_queue;
148 decl_mutex_data(,apple_protect_pager_lock)
149
150 /*
151 * Maximum number of unmapped pagers we're willing to keep around.
152 */
153 int apple_protect_pager_cache_limit = 10;
154
155 /*
156 * Statistics & counters.
157 */
158 int apple_protect_pager_count_max = 0;
159 int apple_protect_pager_count_unmapped_max = 0;
160 int apple_protect_pager_num_trim_max = 0;
161 int apple_protect_pager_num_trim_total = 0;
162
163 /* internal prototypes */
164 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object);
165 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
166 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
167 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
168 boolean_t locked);
169 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
170 void apple_protect_pager_trim(void);
171
172
173 #if DEBUG
174 int apple_protect_pagerdebug = 0;
175 #define PAGER_ALL 0xffffffff
176 #define PAGER_INIT 0x00000001
177 #define PAGER_PAGEIN 0x00000002
178
179 #define PAGER_DEBUG(LEVEL, A) \
180 MACRO_BEGIN \
181 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
182 printf A; \
183 } \
184 MACRO_END
185 #else
186 #define PAGER_DEBUG(LEVEL, A)
187 #endif
188
189
190 void
191 apple_protect_pager_bootstrap(void)
192 {
193 mutex_init(&apple_protect_pager_lock, 0);
194 queue_init(&apple_protect_pager_queue);
195 }
196
197 /*
198 * apple_protect_pager_init()
199 *
200 * Initialize the memory object and makes it ready to be used and mapped.
201 */
202 kern_return_t
203 apple_protect_pager_init(
204 memory_object_t mem_obj,
205 memory_object_control_t control,
206 #if !DEBUG
207 __unused
208 #endif
209 vm_size_t pg_size)
210 {
211 apple_protect_pager_t pager;
212 kern_return_t kr;
213 memory_object_attr_info_data_t attributes;
214
215 PAGER_DEBUG(PAGER_ALL,
216 ("apple_protect_pager_init: %p, %p, %x\n",
217 mem_obj, control, pg_size));
218
219 if (control == MEMORY_OBJECT_CONTROL_NULL)
220 return KERN_INVALID_ARGUMENT;
221
222 pager = apple_protect_pager_lookup(mem_obj);
223
224 memory_object_control_reference(control);
225
226 pager->pager_control = control;
227
228 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
229 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
230 attributes.cluster_size = (1 << (PAGE_SHIFT));
231 attributes.may_cache_object = FALSE;
232 attributes.temporary = TRUE;
233
234 kr = memory_object_change_attributes(
235 control,
236 MEMORY_OBJECT_ATTRIBUTE_INFO,
237 (memory_object_info_t) &attributes,
238 MEMORY_OBJECT_ATTR_INFO_COUNT);
239 if (kr != KERN_SUCCESS)
240 panic("apple_protect_pager_init: "
241 "memory_object_change_attributes() failed");
242
243 return KERN_SUCCESS;
244 }
245
246 /*
247 * apple_protect_data_return()
248 *
249 * Handles page-out requests from VM. This should never happen since
250 * the pages provided by this EMM are not supposed to be dirty or dirtied
251 * and VM should simply discard the contents and reclaim the pages if it
252 * needs to.
253 */
254 kern_return_t
255 apple_protect_pager_data_return(
256 __unused memory_object_t mem_obj,
257 __unused memory_object_offset_t offset,
258 __unused vm_size_t data_cnt,
259 __unused memory_object_offset_t *resid_offset,
260 __unused int *io_error,
261 __unused boolean_t dirty,
262 __unused boolean_t kernel_copy,
263 __unused int upl_flags)
264 {
265 panic("apple_protect_pager_data_return: should never get called");
266 return KERN_FAILURE;
267 }
268
269 kern_return_t
270 apple_protect_pager_data_initialize(
271 __unused memory_object_t mem_obj,
272 __unused memory_object_offset_t offset,
273 __unused vm_size_t data_cnt)
274 {
275 panic("apple_protect_pager_data_initialize: should never get called");
276 return KERN_FAILURE;
277 }
278
279 kern_return_t
280 apple_protect_pager_data_unlock(
281 __unused memory_object_t mem_obj,
282 __unused memory_object_offset_t offset,
283 __unused vm_size_t size,
284 __unused vm_prot_t desired_access)
285 {
286 return KERN_FAILURE;
287 }
288
289 /*
290 * apple_protect_pager_data_request()
291 *
292 * Handles page-in requests from VM.
293 */
294 kern_return_t
295 apple_protect_pager_data_request(
296 memory_object_t mem_obj,
297 memory_object_offset_t offset,
298 vm_size_t length,
299 #if !DEBUG
300 __unused
301 #endif
302 vm_prot_t protection_required)
303 {
304 apple_protect_pager_t pager;
305 memory_object_control_t mo_control;
306 upl_t upl = NULL;
307 int upl_flags;
308 upl_size_t upl_size;
309 upl_page_info_t *upl_pl;
310 vm_object_t src_object, dst_object;
311 kern_return_t kr, retval;
312 vm_map_offset_t src_mapping = 0, dst_mapping = 0;
313 vm_offset_t src_vaddr, dst_vaddr;
314 vm_offset_t cur_offset;
315 boolean_t src_map_page_by_page;
316 vm_map_entry_t map_entry;
317
318 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %x, %llx, %llxx, %x\n", mem_obj, offset, length, protection_required));
319
320 pager = apple_protect_pager_lookup(mem_obj);
321 assert(pager->is_ready);
322 assert(pager->ref_count > 1); /* pager is alive and mapped */
323
324 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %x, %llx, %llx, %x, pager %x\n", mem_obj, offset, length, protection_required, pager));
325
326 /*
327 * Map the encrypted data in the kernel address space from the
328 * backing VM object (itself backed by the encrypted file via
329 * the vnode pager).
330 */
331 src_object = pager->backing_object;
332 assert(src_object != VM_OBJECT_NULL);
333 vm_object_reference(src_object); /* ref. for the mapping */
334 src_mapping = 0;
335 kr = vm_map_enter(kernel_map,
336 &src_mapping,
337 length,
338 0,
339 VM_FLAGS_ANYWHERE,
340 src_object,
341 offset,
342 FALSE,
343 VM_PROT_READ,
344 VM_PROT_READ,
345 VM_INHERIT_NONE);
346 switch (kr) {
347 case KERN_SUCCESS:
348 src_map_page_by_page = FALSE;
349 src_vaddr = CAST_DOWN(vm_offset_t, src_mapping);
350 break;
351 case KERN_NO_SPACE:
352 /* we can't map the entire section, so map it page by page */
353 src_map_page_by_page = TRUE;
354 vm_object_deallocate(src_object);
355 break;
356 default:
357 vm_object_deallocate(src_object);
358 retval = kr;
359 goto done;
360 }
361
362
363 /*
364 * Gather in a UPL all the VM pages requested by VM.
365 */
366 mo_control = pager->pager_control;
367
368 upl_size = length;
369 upl_flags =
370 UPL_RET_ONLY_ABSENT |
371 UPL_SET_LITE |
372 UPL_NO_SYNC |
373 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
374 UPL_SET_INTERNAL;
375 kr = memory_object_upl_request(mo_control,
376 offset, upl_size,
377 &upl, NULL, NULL, upl_flags);
378 if (kr != KERN_SUCCESS) {
379 retval = kr;
380 goto done;
381 }
382
383 /*
384 * Reserve a virtual page in the kernel address space to map each
385 * destination physical page when it's its turn to be filled.
386 */
387 dst_object = mo_control->moc_object;
388 assert(dst_object != VM_OBJECT_NULL);
389 dst_mapping = 0;
390 vm_object_reference(kernel_object); /* ref. for mapping */
391 kr = vm_map_find_space(kernel_map,
392 &dst_mapping,
393 PAGE_SIZE_64,
394 0,
395 0,
396 &map_entry);
397 if (kr != KERN_SUCCESS) {
398 vm_object_deallocate(kernel_object);
399 retval = kr;
400 goto done;
401 }
402 map_entry->object.vm_object = kernel_object;
403 map_entry->offset = dst_mapping - VM_MIN_KERNEL_ADDRESS;
404 vm_map_unlock(kernel_map);
405 dst_vaddr = CAST_DOWN(vm_offset_t, dst_mapping);
406
407 /*
408 * Fill in the contents of the pages requested by VM.
409 */
410 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
411 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
412 ppnum_t dst_pnum;
413
414 /*
415 * Establish an explicit pmap mapping of the destination
416 * physical page.
417 * We can't do a regular VM mapping because the VM page
418 * is "busy".
419 */
420 if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) {
421 /* this page is not in the UPL: skip it */
422 continue;
423 }
424 dst_pnum = (addr64_t)
425 upl_phys_page(upl_pl, cur_offset / PAGE_SIZE);
426 assert (dst_pnum != 0);
427 pmap_enter(kernel_pmap, dst_mapping, dst_pnum,
428 VM_PROT_READ | VM_PROT_WRITE,
429 dst_object->wimg_bits & VM_WIMG_MASK,
430 FALSE);
431
432 /*
433 * Map the source (encrypted) page in the kernel's
434 * virtual address space.
435 */
436 if (src_map_page_by_page) {
437 vm_object_reference(src_object); /* ref. for mapping */
438 kr = vm_map_enter(kernel_map,
439 &src_mapping,
440 PAGE_SIZE_64,
441 0,
442 VM_FLAGS_ANYWHERE,
443 src_object,
444 offset + cur_offset,
445 FALSE,
446 VM_PROT_READ,
447 VM_PROT_READ,
448 VM_INHERIT_NONE);
449 if (kr != KERN_SUCCESS) {
450 vm_object_deallocate(src_object);
451 retval = kr;
452 goto done;
453 }
454 src_vaddr = CAST_DOWN(vm_offset_t, src_mapping);
455 } else {
456 src_vaddr = src_mapping + cur_offset;
457 }
458
459 /*
460 * Decrypt the encrypted contents of the source page
461 * into the destination page.
462 */
463 dsmos_page_transform((const void *) src_vaddr,
464 (void *) dst_vaddr);
465
466 /*
467 * Remove the pmap mapping of the destination page
468 * in the kernel.
469 */
470 pmap_remove(kernel_pmap,
471 (addr64_t) dst_mapping,
472 (addr64_t) (dst_mapping + PAGE_SIZE_64));
473 if (src_map_page_by_page) {
474 /*
475 * Remove the kernel mapping of the source page.
476 * This releases the extra reference we took on
477 * src_object.
478 */
479 kr = vm_map_remove(kernel_map,
480 src_mapping,
481 src_mapping + PAGE_SIZE_64,
482 VM_MAP_NO_FLAGS);
483 assert(kr == KERN_SUCCESS);
484 src_mapping = 0;
485 }
486 }
487
488 retval = KERN_SUCCESS;
489 done:
490 if (src_mapping != 0) {
491 /* clean up the mapping of the source pages */
492 kr = vm_map_remove(kernel_map,
493 src_mapping,
494 src_mapping + length,
495 VM_MAP_NO_FLAGS);
496 assert (kr == KERN_SUCCESS);
497 src_mapping = 0;
498 src_vaddr = 0;
499 }
500 if (upl != NULL) {
501 /* clean up the UPL */
502
503 /*
504 * The pages are currently dirty because we've just been
505 * writing on them, but as far as we're concerned, they're
506 * clean since they contain their "original" contents as
507 * provided by us, the pager.
508 * Tell the UPL to mark them "clean".
509 */
510 upl_clear_dirty(upl, TRUE);
511
512 /* abort or commit the UPL */
513 if (retval != KERN_SUCCESS) {
514 upl_abort(upl, 0);
515 } else {
516 upl_commit(upl, NULL, 0);
517 }
518
519 /* and deallocate the UPL */
520 upl_deallocate(upl);
521 upl = NULL;
522 }
523 if (dst_mapping != 0) {
524 /* clean up the mapping of the destination pages */
525 kr = vm_map_remove(kernel_map,
526 dst_mapping,
527 dst_mapping + PAGE_SIZE_64,
528 VM_MAP_NO_FLAGS);
529 assert(kr == KERN_SUCCESS);
530 dst_mapping = 0;
531 dst_vaddr = 0;
532 }
533
534 return retval;
535 }
536
537 /*
538 * apple_protect_pager_reference()
539 *
540 * Get a reference on this memory object.
541 * For external usage only. Assumes that the initial reference count is not 0,
542 * i.e one should not "revive" a dead pager this way.
543 */
544 void
545 apple_protect_pager_reference(
546 memory_object_t mem_obj)
547 {
548 apple_protect_pager_t pager;
549
550 pager = apple_protect_pager_lookup(mem_obj);
551
552 mutex_lock(&apple_protect_pager_lock);
553 assert(pager->ref_count > 0);
554 pager->ref_count++;
555 mutex_unlock(&apple_protect_pager_lock);
556 }
557
558
559 /*
560 * apple_protect_pager_dequeue:
561 *
562 * Removes a pager from the list of pagers.
563 *
564 * The caller must hold "apple_protect_pager_lock".
565 */
566 void
567 apple_protect_pager_dequeue(
568 apple_protect_pager_t pager)
569 {
570 assert(!pager->is_mapped);
571
572 queue_remove(&apple_protect_pager_queue,
573 pager,
574 apple_protect_pager_t,
575 pager_queue);
576 pager->pager_queue.next = NULL;
577 pager->pager_queue.prev = NULL;
578
579 apple_protect_pager_count--;
580 }
581
582 /*
583 * apple_protect_pager_terminate_internal:
584 *
585 * Trigger the asynchronous termination of the memory object associated
586 * with this pager.
587 * When the memory object is terminated, there will be one more call
588 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
589 * to finish the clean up.
590 *
591 * "apple_protect_pager_lock" should not be held by the caller.
592 * We don't need the lock because the pager has already been removed from
593 * the pagers' list and is now ours exclusively.
594 */
595 void
596 apple_protect_pager_terminate_internal(
597 apple_protect_pager_t pager)
598 {
599 assert(pager->is_ready);
600 assert(!pager->is_mapped);
601
602 if (pager->backing_object != VM_OBJECT_NULL) {
603 vm_object_deallocate(pager->backing_object);
604 pager->backing_object = VM_OBJECT_NULL;
605 }
606
607 /* trigger the destruction of the memory object */
608 memory_object_destroy(pager->pager_control, 0);
609 }
610
611 /*
612 * apple_protect_pager_deallocate_internal()
613 *
614 * Release a reference on this pager and free it when the last
615 * reference goes away.
616 * Can be called with apple_protect_pager_lock held or not but always returns
617 * with it unlocked.
618 */
619 void
620 apple_protect_pager_deallocate_internal(
621 apple_protect_pager_t pager,
622 boolean_t locked)
623 {
624 boolean_t needs_trimming;
625 int count_unmapped;
626
627 if (! locked) {
628 mutex_lock(&apple_protect_pager_lock);
629 }
630
631 count_unmapped = (apple_protect_pager_count -
632 apple_protect_pager_count_mapped);
633 if (count_unmapped > apple_protect_pager_cache_limit) {
634 /* we have too many unmapped pagers: trim some */
635 needs_trimming = TRUE;
636 } else {
637 needs_trimming = FALSE;
638 }
639
640 /* drop a reference on this pager */
641 pager->ref_count--;
642
643 if (pager->ref_count == 1) {
644 /*
645 * Only the "named" reference is left, which means that
646 * no one is realy holding on to this pager anymore.
647 * Terminate it.
648 */
649 apple_protect_pager_dequeue(pager);
650 /* the pager is all ours: no need for the lock now */
651 mutex_unlock(&apple_protect_pager_lock);
652 apple_protect_pager_terminate_internal(pager);
653 } else if (pager->ref_count == 0) {
654 /*
655 * Dropped the existence reference; the memory object has
656 * been terminated. Do some final cleanup and release the
657 * pager structure.
658 */
659 mutex_unlock(&apple_protect_pager_lock);
660 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
661 memory_object_control_deallocate(pager->pager_control);
662 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
663 }
664 kfree(pager, sizeof (*pager));
665 pager = APPLE_PROTECT_PAGER_NULL;
666 } else {
667 /* there are still plenty of references: keep going... */
668 mutex_unlock(&apple_protect_pager_lock);
669 }
670
671 if (needs_trimming) {
672 apple_protect_pager_trim();
673 }
674 /* caution: lock is not held on return... */
675 }
676
677 /*
678 * apple_protect_pager_deallocate()
679 *
680 * Release a reference on this pager and free it when the last
681 * reference goes away.
682 */
683 void
684 apple_protect_pager_deallocate(
685 memory_object_t mem_obj)
686 {
687 apple_protect_pager_t pager;
688
689 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %x\n", mem_obj));
690 pager = apple_protect_pager_lookup(mem_obj);
691 apple_protect_pager_deallocate_internal(pager, FALSE);
692 }
693
694 /*
695 *
696 */
697 kern_return_t
698 apple_protect_pager_terminate(
699 #if !DEBUG
700 __unused
701 #endif
702 memory_object_t mem_obj)
703 {
704 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %x\n", mem_obj));
705
706 return KERN_SUCCESS;
707 }
708
709 /*
710 *
711 */
712 kern_return_t
713 apple_protect_pager_synchronize(
714 memory_object_t mem_obj,
715 memory_object_offset_t offset,
716 vm_size_t length,
717 __unused vm_sync_t sync_flags)
718 {
719 apple_protect_pager_t pager;
720
721 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %x\n", mem_obj));
722
723 pager = apple_protect_pager_lookup(mem_obj);
724
725 memory_object_synchronize_completed(pager->pager_control,
726 offset, length);
727
728 return KERN_SUCCESS;
729 }
730
731 /*
732 * apple_protect_pager_map()
733 *
734 * This allows VM to let us, the EMM, know that this memory object
735 * is currently mapped one or more times. This is called by VM only the first
736 * time the memory object gets mapped and we take one extra reference on the
737 * memory object to account for all its mappings.
738 */
739 void
740 apple_protect_pager_map(
741 memory_object_t mem_obj)
742 {
743 apple_protect_pager_t pager;
744
745 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %x\n", mem_obj));
746
747 pager = apple_protect_pager_lookup(mem_obj);
748
749 mutex_lock(&apple_protect_pager_lock);
750 assert(pager->is_ready);
751 assert(pager->ref_count > 0); /* pager is alive */
752 if (pager->is_mapped == FALSE) {
753 /*
754 * First mapping of this pager: take an extra reference
755 * that will remain until all the mappings of this pager
756 * are removed.
757 */
758 pager->is_mapped = TRUE;
759 pager->ref_count++;
760 apple_protect_pager_count_mapped++;
761 }
762 mutex_unlock(&apple_protect_pager_lock);
763 }
764
765 /*
766 * apple_protect_pager_unmap()
767 *
768 * This is called by VM when this memory object is no longer mapped anywhere.
769 */
770 kern_return_t
771 apple_protect_pager_unmap(
772 memory_object_t mem_obj)
773 {
774 apple_protect_pager_t pager;
775 int count_unmapped;
776
777 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_unmap: %x\n", mem_obj));
778
779 pager = apple_protect_pager_lookup(mem_obj);
780
781 mutex_lock(&apple_protect_pager_lock);
782 if (pager->is_mapped) {
783 /*
784 * All the mappings are gone, so let go of the one extra
785 * reference that represents all the mappings of this pager.
786 */
787 apple_protect_pager_count_mapped--;
788 count_unmapped = (apple_protect_pager_count -
789 apple_protect_pager_count_mapped);
790 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
791 apple_protect_pager_count_unmapped_max = count_unmapped;
792 }
793 pager->is_mapped = FALSE;
794 apple_protect_pager_deallocate_internal(pager, TRUE);
795 /* caution: deallocate_internal() released the lock ! */
796 } else {
797 mutex_unlock(&apple_protect_pager_lock);
798 }
799
800 return KERN_SUCCESS;
801 }
802
803
804 /*
805 *
806 */
807 apple_protect_pager_t
808 apple_protect_pager_lookup(
809 memory_object_t mem_obj)
810 {
811 apple_protect_pager_t pager;
812
813 pager = (apple_protect_pager_t) mem_obj;
814 assert(pager->pager_ops == &apple_protect_pager_ops);
815 assert(pager->ref_count > 0);
816 return pager;
817 }
818
819 apple_protect_pager_t
820 apple_protect_pager_create(
821 vm_object_t backing_object)
822 {
823 apple_protect_pager_t pager, pager2;
824 memory_object_control_t control;
825 kern_return_t kr;
826
827 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
828 if (pager == APPLE_PROTECT_PAGER_NULL) {
829 return APPLE_PROTECT_PAGER_NULL;
830 }
831
832 /*
833 * The vm_map call takes both named entry ports and raw memory
834 * objects in the same parameter. We need to make sure that
835 * vm_map does not see this object as a named entry port. So,
836 * we reserve the second word in the object for a fake ip_kotype
837 * setting - that will tell vm_map to use it as a memory object.
838 */
839 pager->pager_ops = &apple_protect_pager_ops;
840 pager->pager_ikot = IKOT_MEMORY_OBJECT;
841 pager->is_ready = FALSE;/* not ready until it has a "name" */
842 pager->ref_count = 2; /* existence + setup reference */
843 pager->is_mapped = FALSE;
844 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
845 pager->backing_object = backing_object;
846 vm_object_reference(backing_object);
847
848 mutex_lock(&apple_protect_pager_lock);
849 /* see if anyone raced us to create a pager for the same object */
850 queue_iterate(&apple_protect_pager_queue,
851 pager2,
852 apple_protect_pager_t,
853 pager_queue) {
854 if (pager2->backing_object == backing_object) {
855 break;
856 }
857 }
858 if (! queue_end(&apple_protect_pager_queue,
859 (queue_entry_t) pager2)) {
860 /* while we hold the lock, transfer our setup ref to winner */
861 pager2->ref_count++;
862 /* we lost the race, down with the loser... */
863 mutex_unlock(&apple_protect_pager_lock);
864 vm_object_deallocate(pager->backing_object);
865 pager->backing_object = VM_OBJECT_NULL;
866 kfree(pager, sizeof (*pager));
867 /* ... and go with the winner */
868 pager = pager2;
869 /* let the winner make sure the pager gets ready */
870 return pager;
871 }
872
873 /* enter new pager at the head of our list of pagers */
874 queue_enter_first(&apple_protect_pager_queue,
875 pager,
876 apple_protect_pager_t,
877 pager_queue);
878 apple_protect_pager_count++;
879 if (apple_protect_pager_count > apple_protect_pager_count_max) {
880 apple_protect_pager_count_max = apple_protect_pager_count;
881 }
882 mutex_unlock(&apple_protect_pager_lock);
883
884 kr = memory_object_create_named((memory_object_t) pager,
885 0,
886 &control);
887 assert(kr == KERN_SUCCESS);
888
889 mutex_lock(&apple_protect_pager_lock);
890 /* the new pager is now ready to be used */
891 pager->is_ready = TRUE;
892 mutex_unlock(&apple_protect_pager_lock);
893
894 /* wakeup anyone waiting for this pager to be ready */
895 thread_wakeup(&pager->is_ready);
896
897 return pager;
898 }
899
900 /*
901 * apple_protect_pager_setup()
902 *
903 * Provide the caller with a memory object backed by the provided
904 * "backing_object" VM object. If such a memory object already exists,
905 * re-use it, otherwise create a new memory object.
906 */
907 memory_object_t
908 apple_protect_pager_setup(
909 vm_object_t backing_object)
910 {
911 apple_protect_pager_t pager;
912
913 mutex_lock(&apple_protect_pager_lock);
914
915 queue_iterate(&apple_protect_pager_queue,
916 pager,
917 apple_protect_pager_t,
918 pager_queue) {
919 if (pager->backing_object == backing_object) {
920 break;
921 }
922 }
923 if (queue_end(&apple_protect_pager_queue,
924 (queue_entry_t) pager)) {
925 /* no existing pager for this backing object */
926 pager = APPLE_PROTECT_PAGER_NULL;
927 } else {
928 /* make sure pager doesn't disappear */
929 pager->ref_count++;
930 }
931
932 mutex_unlock(&apple_protect_pager_lock);
933
934 if (pager == APPLE_PROTECT_PAGER_NULL) {
935 pager = apple_protect_pager_create(backing_object);
936 if (pager == APPLE_PROTECT_PAGER_NULL) {
937 return MEMORY_OBJECT_NULL;
938 }
939 }
940
941 mutex_lock(&apple_protect_pager_lock);
942 while (!pager->is_ready) {
943 thread_sleep_mutex(&pager->is_ready,
944 &apple_protect_pager_lock,
945 THREAD_UNINT);
946 }
947 mutex_unlock(&apple_protect_pager_lock);
948
949 return (memory_object_t) pager;
950 }
951
952 void
953 apple_protect_pager_trim(void)
954 {
955 apple_protect_pager_t pager, prev_pager;
956 queue_head_t trim_queue;
957 int num_trim;
958 int count_unmapped;
959
960 mutex_lock(&apple_protect_pager_lock);
961
962 /*
963 * We have too many pagers, try and trim some unused ones,
964 * starting with the oldest pager at the end of the queue.
965 */
966 queue_init(&trim_queue);
967 num_trim = 0;
968
969 for (pager = (apple_protect_pager_t)
970 queue_last(&apple_protect_pager_queue);
971 !queue_end(&apple_protect_pager_queue,
972 (queue_entry_t) pager);
973 pager = prev_pager) {
974 /* get prev elt before we dequeue */
975 prev_pager = (apple_protect_pager_t)
976 queue_prev(&pager->pager_queue);
977
978 if (pager->ref_count == 2 &&
979 pager->is_ready &&
980 !pager->is_mapped) {
981 /* this pager can be trimmed */
982 num_trim++;
983 /* remove this pager from the main list ... */
984 apple_protect_pager_dequeue(pager);
985 /* ... and add it to our trim queue */
986 queue_enter_first(&trim_queue,
987 pager,
988 apple_protect_pager_t,
989 pager_queue);
990
991 count_unmapped = (apple_protect_pager_count -
992 apple_protect_pager_count_mapped);
993 if (count_unmapped <= apple_protect_pager_cache_limit) {
994 /* we have enough pagers to trim */
995 break;
996 }
997 }
998 }
999 if (num_trim > apple_protect_pager_num_trim_max) {
1000 apple_protect_pager_num_trim_max = num_trim;
1001 }
1002 apple_protect_pager_num_trim_total += num_trim;
1003
1004 mutex_unlock(&apple_protect_pager_lock);
1005
1006 /* terminate the trimmed pagers */
1007 while (!queue_empty(&trim_queue)) {
1008 queue_remove_first(&trim_queue,
1009 pager,
1010 apple_protect_pager_t,
1011 pager_queue);
1012 pager->pager_queue.next = NULL;
1013 pager->pager_queue.prev = NULL;
1014 assert(pager->ref_count == 2);
1015 /*
1016 * We can't call deallocate_internal() because the pager
1017 * has already been dequeued, but we still need to remove
1018 * a reference.
1019 */
1020 pager->ref_count--;
1021 apple_protect_pager_terminate_internal(pager);
1022 }
1023 }