]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
a0da89a7c6dcdcd50f7b3659bc2e48d35b6faf0f
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <sys/errno.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/mach_traps.h>
35 #include <mach/host_priv.h>
36 #include <mach/kern_return.h>
37 #include <mach/memory_object_control.h>
38 #include <mach/memory_object_types.h>
39 #include <mach/port.h>
40 #include <mach/policy.h>
41 #include <mach/upl.h>
42 #include <mach/thread_act.h>
43 #include <mach/mach_vm.h>
44
45 #include <kern/host.h>
46 #include <kern/kalloc.h>
47 #include <kern/page_decrypt.h>
48 #include <kern/queue.h>
49 #include <kern/thread.h>
50
51 #include <ipc/ipc_port.h>
52 #include <ipc/ipc_space.h>
53
54 #include <default_pager/default_pager_types.h>
55 #include <default_pager/default_pager_object_server.h>
56
57 #include <vm/vm_map.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/memory_object.h>
60 #include <vm/vm_pageout.h>
61 #include <vm/vm_protos.h>
62
63
64 /*
65 * APPLE PROTECT MEMORY PAGER
66 *
67 * This external memory manager (EMM) handles memory from the encrypted
68 * sections of some executables protected by the DSMOS kernel extension.
69 *
70 * It mostly handles page-in requests (from memory_object_data_request()) by
71 * getting the encrypted data from its backing VM object, itself backed by
72 * the encrypted file, decrypting it and providing it to VM.
73 *
74 * The decrypted pages will never be dirtied, so the memory manager doesn't
75 * need to handle page-out requests (from memory_object_data_return()). The
76 * pages need to be mapped copy-on-write, so that the originals stay clean.
77 *
78 * We don't expect to have to handle a large number of apple-protected
79 * binaries, so the data structures are very simple (simple linked list)
80 * for now.
81 */
82
83 /* forward declarations */
84 void apple_protect_pager_reference(memory_object_t mem_obj);
85 void apple_protect_pager_deallocate(memory_object_t mem_obj);
86 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
87 memory_object_control_t control,
88 vm_size_t pg_size);
89 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
90 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
91 memory_object_offset_t offset,
92 vm_size_t length,
93 vm_prot_t protection_required);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 vm_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 vm_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 vm_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 vm_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_unmap(memory_object_t mem_obj);
114
115 /*
116 * Vector of VM operations for this EMM.
117 * These routines are invoked by VM via the memory_object_*() interfaces.
118 */
119 const struct memory_object_pager_ops apple_protect_pager_ops = {
120 apple_protect_pager_reference,
121 apple_protect_pager_deallocate,
122 apple_protect_pager_init,
123 apple_protect_pager_terminate,
124 apple_protect_pager_data_request,
125 apple_protect_pager_data_return,
126 apple_protect_pager_data_initialize,
127 apple_protect_pager_data_unlock,
128 apple_protect_pager_synchronize,
129 apple_protect_pager_unmap,
130 "apple protect pager"
131 };
132
133 /*
134 * The "apple_protect_pager" describes a memory object backed by
135 * the "apple protect" EMM.
136 */
137 typedef struct apple_protect_pager {
138 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
139 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
140 queue_chain_t pager_queue; /* next & prev pagers */
141 unsigned int ref_count; /* reference count */
142 boolean_t is_ready; /* is this pager ready ? */
143 boolean_t is_mapped; /* is this mem_obj mapped ? */
144 memory_object_control_t pager_control; /* mem object control handle */
145 vm_object_t backing_object; /* VM obj w/ encrypted data */
146 } *apple_protect_pager_t;
147 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
148
149 /*
150 * List of memory objects managed by this EMM.
151 * The list is protected by the "apple_protect_pager_lock" lock.
152 */
153 int apple_protect_pager_count = 0; /* number of pagers */
154 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
155 queue_head_t apple_protect_pager_queue;
156 decl_mutex_data(,apple_protect_pager_lock)
157
158 /*
159 * Maximum number of unmapped pagers we're willing to keep around.
160 */
161 int apple_protect_pager_cache_limit = 10;
162
163 /*
164 * Statistics & counters.
165 */
166 int apple_protect_pager_count_max = 0;
167 int apple_protect_pager_count_unmapped_max = 0;
168 int apple_protect_pager_num_trim_max = 0;
169 int apple_protect_pager_num_trim_total = 0;
170
171 /* internal prototypes */
172 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object);
173 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
174 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
175 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
176 boolean_t locked);
177 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
178 void apple_protect_pager_trim(void);
179
180
181 #if DEBUG
182 int apple_protect_pagerdebug = 0;
183 #define PAGER_ALL 0xffffffff
184 #define PAGER_INIT 0x00000001
185 #define PAGER_PAGEIN 0x00000002
186
187 #define PAGER_DEBUG(LEVEL, A) \
188 MACRO_BEGIN \
189 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
190 printf A; \
191 } \
192 MACRO_END
193 #else
194 #define PAGER_DEBUG(LEVEL, A)
195 #endif
196
197
198 void
199 apple_protect_pager_bootstrap(void)
200 {
201 mutex_init(&apple_protect_pager_lock, 0);
202 queue_init(&apple_protect_pager_queue);
203 }
204
205 /*
206 * apple_protect_pager_init()
207 *
208 * Initialize the memory object and makes it ready to be used and mapped.
209 */
210 kern_return_t
211 apple_protect_pager_init(
212 memory_object_t mem_obj,
213 memory_object_control_t control,
214 #if !DEBUG
215 __unused
216 #endif
217 vm_size_t pg_size)
218 {
219 apple_protect_pager_t pager;
220 kern_return_t kr;
221 memory_object_attr_info_data_t attributes;
222
223 PAGER_DEBUG(PAGER_ALL,
224 ("apple_protect_pager_init: %p, %p, %x\n",
225 mem_obj, control, pg_size));
226
227 if (control == MEMORY_OBJECT_CONTROL_NULL)
228 return KERN_INVALID_ARGUMENT;
229
230 pager = apple_protect_pager_lookup(mem_obj);
231
232 memory_object_control_reference(control);
233
234 pager->pager_control = control;
235
236 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
237 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
238 attributes.cluster_size = (1 << (PAGE_SHIFT));
239 attributes.may_cache_object = FALSE;
240 attributes.temporary = TRUE;
241
242 kr = memory_object_change_attributes(
243 control,
244 MEMORY_OBJECT_ATTRIBUTE_INFO,
245 (memory_object_info_t) &attributes,
246 MEMORY_OBJECT_ATTR_INFO_COUNT);
247 if (kr != KERN_SUCCESS)
248 panic("apple_protect_pager_init: "
249 "memory_object_change_attributes() failed");
250
251 return KERN_SUCCESS;
252 }
253
254 /*
255 * apple_protect_data_return()
256 *
257 * Handles page-out requests from VM. This should never happen since
258 * the pages provided by this EMM are not supposed to be dirty or dirtied
259 * and VM should simply discard the contents and reclaim the pages if it
260 * needs to.
261 */
262 kern_return_t
263 apple_protect_pager_data_return(
264 __unused memory_object_t mem_obj,
265 __unused memory_object_offset_t offset,
266 __unused vm_size_t data_cnt,
267 __unused memory_object_offset_t *resid_offset,
268 __unused int *io_error,
269 __unused boolean_t dirty,
270 __unused boolean_t kernel_copy,
271 __unused int upl_flags)
272 {
273 panic("apple_protect_pager_data_return: should never get called");
274 return KERN_FAILURE;
275 }
276
277 kern_return_t
278 apple_protect_pager_data_initialize(
279 __unused memory_object_t mem_obj,
280 __unused memory_object_offset_t offset,
281 __unused vm_size_t data_cnt)
282 {
283 panic("apple_protect_pager_data_initialize: should never get called");
284 return KERN_FAILURE;
285 }
286
287 kern_return_t
288 apple_protect_pager_data_unlock(
289 __unused memory_object_t mem_obj,
290 __unused memory_object_offset_t offset,
291 __unused vm_size_t size,
292 __unused vm_prot_t desired_access)
293 {
294 return KERN_FAILURE;
295 }
296
297 /*
298 * apple_protect_pager_data_request()
299 *
300 * Handles page-in requests from VM.
301 */
302 kern_return_t
303 apple_protect_pager_data_request(
304 memory_object_t mem_obj,
305 memory_object_offset_t offset,
306 vm_size_t length,
307 #if !DEBUG
308 __unused
309 #endif
310 vm_prot_t protection_required)
311 {
312 apple_protect_pager_t pager;
313 memory_object_control_t mo_control;
314 upl_t upl = NULL;
315 int upl_flags;
316 upl_size_t upl_size;
317 upl_page_info_t *upl_pl;
318 vm_object_t src_object, dst_object;
319 kern_return_t kr, retval;
320 vm_map_offset_t src_mapping = 0, dst_mapping = 0;
321 vm_offset_t src_vaddr, dst_vaddr;
322 vm_offset_t cur_offset;
323 boolean_t src_map_page_by_page;
324 vm_map_entry_t map_entry;
325
326 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %x, %llx, %llxx, %x\n", mem_obj, offset, length, protection_required));
327
328 pager = apple_protect_pager_lookup(mem_obj);
329 assert(pager->is_ready);
330 assert(pager->ref_count > 1); /* pager is alive and mapped */
331
332 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %x, %llx, %llx, %x, pager %x\n", mem_obj, offset, length, protection_required, pager));
333
334 /*
335 * Map the encrypted data in the kernel address space from the
336 * backing VM object (itself backed by the encrypted file via
337 * the vnode pager).
338 */
339 src_object = pager->backing_object;
340 assert(src_object != VM_OBJECT_NULL);
341 vm_object_reference(src_object); /* ref. for the mapping */
342 src_mapping = 0;
343 kr = vm_map_enter(kernel_map,
344 &src_mapping,
345 length,
346 0,
347 VM_FLAGS_ANYWHERE,
348 src_object,
349 offset,
350 FALSE,
351 VM_PROT_READ,
352 VM_PROT_READ,
353 VM_INHERIT_NONE);
354 switch (kr) {
355 case KERN_SUCCESS:
356 /* wire the memory to make sure it is available */
357 kr = vm_map_wire(kernel_map,
358 src_mapping,
359 src_mapping + length,
360 VM_PROT_READ,
361 FALSE);
362 if (kr != KERN_SUCCESS) {
363 /*
364 * Wiring failed, so unmap source and fall back
365 * to page by page mapping of the source.
366 */
367 kr = vm_map_remove(kernel_map,
368 src_mapping,
369 src_mapping + length,
370 VM_MAP_NO_FLAGS);
371 assert(kr == KERN_SUCCESS);
372 src_mapping = 0;
373 src_vaddr = 0;
374 src_map_page_by_page = TRUE;
375 break;
376 }
377 /* source region is now fully mapped and wired */
378 src_map_page_by_page = FALSE;
379 src_vaddr = CAST_DOWN(vm_offset_t, src_mapping);
380 break;
381 case KERN_NO_SPACE:
382 /* we couldn't map the entire source, so map it page by page */
383 src_map_page_by_page = TRUE;
384 /* release the reference for the failed mapping */
385 vm_object_deallocate(src_object);
386 break;
387 default:
388 vm_object_deallocate(src_object);
389 retval = kr;
390 goto done;
391 }
392
393
394 /*
395 * Gather in a UPL all the VM pages requested by VM.
396 */
397 mo_control = pager->pager_control;
398
399 upl_size = length;
400 upl_flags =
401 UPL_RET_ONLY_ABSENT |
402 UPL_SET_LITE |
403 UPL_NO_SYNC |
404 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
405 UPL_SET_INTERNAL;
406 kr = memory_object_upl_request(mo_control,
407 offset, upl_size,
408 &upl, NULL, NULL, upl_flags);
409 if (kr != KERN_SUCCESS) {
410 retval = kr;
411 goto done;
412 }
413
414 /*
415 * Reserve a virtual page in the kernel address space to map each
416 * destination physical page when it's its turn to be filled.
417 */
418 dst_object = mo_control->moc_object;
419 assert(dst_object != VM_OBJECT_NULL);
420 dst_mapping = 0;
421 vm_object_reference(kernel_object); /* ref. for mapping */
422 kr = vm_map_find_space(kernel_map,
423 &dst_mapping,
424 PAGE_SIZE_64,
425 0,
426 0,
427 &map_entry);
428 if (kr != KERN_SUCCESS) {
429 vm_object_deallocate(kernel_object);
430 retval = kr;
431 goto done;
432 }
433 map_entry->object.vm_object = kernel_object;
434 map_entry->offset = dst_mapping - VM_MIN_KERNEL_ADDRESS;
435 vm_map_unlock(kernel_map);
436 dst_vaddr = CAST_DOWN(vm_offset_t, dst_mapping);
437
438 /*
439 * Fill in the contents of the pages requested by VM.
440 */
441 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
442 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
443 ppnum_t dst_pnum;
444
445 if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) {
446 /* this page is not in the UPL: skip it */
447 continue;
448 }
449
450 /*
451 * Map the source (encrypted) page in the kernel's
452 * virtual address space.
453 */
454 if (src_map_page_by_page) {
455 vm_object_reference(src_object); /* ref. for mapping */
456 kr = vm_map_enter(kernel_map,
457 &src_mapping,
458 PAGE_SIZE_64,
459 0,
460 VM_FLAGS_ANYWHERE,
461 src_object,
462 offset + cur_offset,
463 FALSE,
464 VM_PROT_READ,
465 VM_PROT_READ,
466 VM_INHERIT_NONE);
467 if (kr != KERN_SUCCESS) {
468 vm_object_deallocate(src_object);
469 retval = kr;
470 goto done;
471 }
472 kr = vm_map_wire(kernel_map,
473 src_mapping,
474 src_mapping + PAGE_SIZE_64,
475 VM_PROT_READ,
476 FALSE);
477 if (kr != KERN_SUCCESS) {
478 retval = kr;
479 kr = vm_map_remove(kernel_map,
480 src_mapping,
481 src_mapping + PAGE_SIZE_64,
482 VM_MAP_NO_FLAGS);
483 assert(kr == KERN_SUCCESS);
484 src_mapping = 0;
485 src_vaddr = 0;
486 printf("apple_protect_pager_data_request: "
487 "failed to resolve page fault for src "
488 "object %p offset 0x%llx "
489 "preempt %d error 0x%x\n",
490 src_object, offset + cur_offset,
491 get_preemption_level(), retval);
492 goto done;
493 }
494 src_vaddr = CAST_DOWN(vm_offset_t, src_mapping);
495 } else {
496 src_vaddr = src_mapping + cur_offset;
497 }
498
499 /*
500 * Establish an explicit pmap mapping of the destination
501 * physical page.
502 * We can't do a regular VM mapping because the VM page
503 * is "busy".
504 */
505 dst_pnum = (addr64_t)
506 upl_phys_page(upl_pl, cur_offset / PAGE_SIZE);
507 assert(dst_pnum != 0);
508 pmap_enter(kernel_pmap, dst_mapping, dst_pnum,
509 VM_PROT_READ | VM_PROT_WRITE,
510 dst_object->wimg_bits & VM_WIMG_MASK,
511 FALSE);
512
513 /*
514 * Decrypt the encrypted contents of the source page
515 * into the destination page.
516 */
517 dsmos_page_transform((const void *) src_vaddr,
518 (void *) dst_vaddr);
519
520 /*
521 * Remove the pmap mapping of the destination page
522 * in the kernel.
523 */
524 pmap_remove(kernel_pmap,
525 (addr64_t) dst_mapping,
526 (addr64_t) (dst_mapping + PAGE_SIZE_64));
527
528 if (src_map_page_by_page) {
529 /*
530 * Remove the wired kernel mapping of the source page.
531 * This releases the extra reference we took on
532 * src_object.
533 */
534 kr = vm_map_remove(kernel_map,
535 src_mapping,
536 src_mapping + PAGE_SIZE_64,
537 VM_MAP_REMOVE_KUNWIRE);
538 assert(kr == KERN_SUCCESS);
539 src_mapping = 0;
540 src_vaddr = 0;
541 }
542 }
543
544 retval = KERN_SUCCESS;
545 done:
546 if (src_mapping != 0) {
547 /* remove the wired mapping of the source pages */
548 kr = vm_map_remove(kernel_map,
549 src_mapping,
550 src_mapping + length,
551 VM_MAP_REMOVE_KUNWIRE);
552 assert(kr == KERN_SUCCESS);
553 src_mapping = 0;
554 src_vaddr = 0;
555 }
556 if (upl != NULL) {
557 /* clean up the UPL */
558
559 /*
560 * The pages are currently dirty because we've just been
561 * writing on them, but as far as we're concerned, they're
562 * clean since they contain their "original" contents as
563 * provided by us, the pager.
564 * Tell the UPL to mark them "clean".
565 */
566 upl_clear_dirty(upl, TRUE);
567
568 /* abort or commit the UPL */
569 if (retval != KERN_SUCCESS) {
570 upl_abort(upl, 0);
571 } else {
572 upl_commit(upl, NULL, 0);
573 }
574
575 /* and deallocate the UPL */
576 upl_deallocate(upl);
577 upl = NULL;
578 }
579 if (dst_mapping != 0) {
580 /* clean up the mapping of the destination pages */
581 kr = vm_map_remove(kernel_map,
582 dst_mapping,
583 dst_mapping + PAGE_SIZE_64,
584 VM_MAP_NO_FLAGS);
585 assert(kr == KERN_SUCCESS);
586 dst_mapping = 0;
587 dst_vaddr = 0;
588 }
589
590 return retval;
591 }
592
593 /*
594 * apple_protect_pager_reference()
595 *
596 * Get a reference on this memory object.
597 * For external usage only. Assumes that the initial reference count is not 0,
598 * i.e one should not "revive" a dead pager this way.
599 */
600 void
601 apple_protect_pager_reference(
602 memory_object_t mem_obj)
603 {
604 apple_protect_pager_t pager;
605
606 pager = apple_protect_pager_lookup(mem_obj);
607
608 mutex_lock(&apple_protect_pager_lock);
609 assert(pager->ref_count > 0);
610 pager->ref_count++;
611 mutex_unlock(&apple_protect_pager_lock);
612 }
613
614
615 /*
616 * apple_protect_pager_dequeue:
617 *
618 * Removes a pager from the list of pagers.
619 *
620 * The caller must hold "apple_protect_pager_lock".
621 */
622 void
623 apple_protect_pager_dequeue(
624 apple_protect_pager_t pager)
625 {
626 assert(!pager->is_mapped);
627
628 queue_remove(&apple_protect_pager_queue,
629 pager,
630 apple_protect_pager_t,
631 pager_queue);
632 pager->pager_queue.next = NULL;
633 pager->pager_queue.prev = NULL;
634
635 apple_protect_pager_count--;
636 }
637
638 /*
639 * apple_protect_pager_terminate_internal:
640 *
641 * Trigger the asynchronous termination of the memory object associated
642 * with this pager.
643 * When the memory object is terminated, there will be one more call
644 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
645 * to finish the clean up.
646 *
647 * "apple_protect_pager_lock" should not be held by the caller.
648 * We don't need the lock because the pager has already been removed from
649 * the pagers' list and is now ours exclusively.
650 */
651 void
652 apple_protect_pager_terminate_internal(
653 apple_protect_pager_t pager)
654 {
655 assert(pager->is_ready);
656 assert(!pager->is_mapped);
657
658 if (pager->backing_object != VM_OBJECT_NULL) {
659 vm_object_deallocate(pager->backing_object);
660 pager->backing_object = VM_OBJECT_NULL;
661 }
662
663 /* trigger the destruction of the memory object */
664 memory_object_destroy(pager->pager_control, 0);
665 }
666
667 /*
668 * apple_protect_pager_deallocate_internal()
669 *
670 * Release a reference on this pager and free it when the last
671 * reference goes away.
672 * Can be called with apple_protect_pager_lock held or not but always returns
673 * with it unlocked.
674 */
675 void
676 apple_protect_pager_deallocate_internal(
677 apple_protect_pager_t pager,
678 boolean_t locked)
679 {
680 boolean_t needs_trimming;
681 int count_unmapped;
682
683 if (! locked) {
684 mutex_lock(&apple_protect_pager_lock);
685 }
686
687 count_unmapped = (apple_protect_pager_count -
688 apple_protect_pager_count_mapped);
689 if (count_unmapped > apple_protect_pager_cache_limit) {
690 /* we have too many unmapped pagers: trim some */
691 needs_trimming = TRUE;
692 } else {
693 needs_trimming = FALSE;
694 }
695
696 /* drop a reference on this pager */
697 pager->ref_count--;
698
699 if (pager->ref_count == 1) {
700 /*
701 * Only the "named" reference is left, which means that
702 * no one is realy holding on to this pager anymore.
703 * Terminate it.
704 */
705 apple_protect_pager_dequeue(pager);
706 /* the pager is all ours: no need for the lock now */
707 mutex_unlock(&apple_protect_pager_lock);
708 apple_protect_pager_terminate_internal(pager);
709 } else if (pager->ref_count == 0) {
710 /*
711 * Dropped the existence reference; the memory object has
712 * been terminated. Do some final cleanup and release the
713 * pager structure.
714 */
715 mutex_unlock(&apple_protect_pager_lock);
716 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
717 memory_object_control_deallocate(pager->pager_control);
718 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
719 }
720 kfree(pager, sizeof (*pager));
721 pager = APPLE_PROTECT_PAGER_NULL;
722 } else {
723 /* there are still plenty of references: keep going... */
724 mutex_unlock(&apple_protect_pager_lock);
725 }
726
727 if (needs_trimming) {
728 apple_protect_pager_trim();
729 }
730 /* caution: lock is not held on return... */
731 }
732
733 /*
734 * apple_protect_pager_deallocate()
735 *
736 * Release a reference on this pager and free it when the last
737 * reference goes away.
738 */
739 void
740 apple_protect_pager_deallocate(
741 memory_object_t mem_obj)
742 {
743 apple_protect_pager_t pager;
744
745 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %x\n", mem_obj));
746 pager = apple_protect_pager_lookup(mem_obj);
747 apple_protect_pager_deallocate_internal(pager, FALSE);
748 }
749
750 /*
751 *
752 */
753 kern_return_t
754 apple_protect_pager_terminate(
755 #if !DEBUG
756 __unused
757 #endif
758 memory_object_t mem_obj)
759 {
760 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %x\n", mem_obj));
761
762 return KERN_SUCCESS;
763 }
764
765 /*
766 *
767 */
768 kern_return_t
769 apple_protect_pager_synchronize(
770 memory_object_t mem_obj,
771 memory_object_offset_t offset,
772 vm_size_t length,
773 __unused vm_sync_t sync_flags)
774 {
775 apple_protect_pager_t pager;
776
777 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %x\n", mem_obj));
778
779 pager = apple_protect_pager_lookup(mem_obj);
780
781 memory_object_synchronize_completed(pager->pager_control,
782 offset, length);
783
784 return KERN_SUCCESS;
785 }
786
787 /*
788 * apple_protect_pager_map()
789 *
790 * This allows VM to let us, the EMM, know that this memory object
791 * is currently mapped one or more times. This is called by VM only the first
792 * time the memory object gets mapped and we take one extra reference on the
793 * memory object to account for all its mappings.
794 */
795 void
796 apple_protect_pager_map(
797 memory_object_t mem_obj)
798 {
799 apple_protect_pager_t pager;
800
801 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %x\n", mem_obj));
802
803 pager = apple_protect_pager_lookup(mem_obj);
804
805 mutex_lock(&apple_protect_pager_lock);
806 assert(pager->is_ready);
807 assert(pager->ref_count > 0); /* pager is alive */
808 if (pager->is_mapped == FALSE) {
809 /*
810 * First mapping of this pager: take an extra reference
811 * that will remain until all the mappings of this pager
812 * are removed.
813 */
814 pager->is_mapped = TRUE;
815 pager->ref_count++;
816 apple_protect_pager_count_mapped++;
817 }
818 mutex_unlock(&apple_protect_pager_lock);
819 }
820
821 /*
822 * apple_protect_pager_unmap()
823 *
824 * This is called by VM when this memory object is no longer mapped anywhere.
825 */
826 kern_return_t
827 apple_protect_pager_unmap(
828 memory_object_t mem_obj)
829 {
830 apple_protect_pager_t pager;
831 int count_unmapped;
832
833 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_unmap: %x\n", mem_obj));
834
835 pager = apple_protect_pager_lookup(mem_obj);
836
837 mutex_lock(&apple_protect_pager_lock);
838 if (pager->is_mapped) {
839 /*
840 * All the mappings are gone, so let go of the one extra
841 * reference that represents all the mappings of this pager.
842 */
843 apple_protect_pager_count_mapped--;
844 count_unmapped = (apple_protect_pager_count -
845 apple_protect_pager_count_mapped);
846 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
847 apple_protect_pager_count_unmapped_max = count_unmapped;
848 }
849 pager->is_mapped = FALSE;
850 apple_protect_pager_deallocate_internal(pager, TRUE);
851 /* caution: deallocate_internal() released the lock ! */
852 } else {
853 mutex_unlock(&apple_protect_pager_lock);
854 }
855
856 return KERN_SUCCESS;
857 }
858
859
860 /*
861 *
862 */
863 apple_protect_pager_t
864 apple_protect_pager_lookup(
865 memory_object_t mem_obj)
866 {
867 apple_protect_pager_t pager;
868
869 pager = (apple_protect_pager_t) mem_obj;
870 assert(pager->pager_ops == &apple_protect_pager_ops);
871 assert(pager->ref_count > 0);
872 return pager;
873 }
874
875 apple_protect_pager_t
876 apple_protect_pager_create(
877 vm_object_t backing_object)
878 {
879 apple_protect_pager_t pager, pager2;
880 memory_object_control_t control;
881 kern_return_t kr;
882
883 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
884 if (pager == APPLE_PROTECT_PAGER_NULL) {
885 return APPLE_PROTECT_PAGER_NULL;
886 }
887
888 /*
889 * The vm_map call takes both named entry ports and raw memory
890 * objects in the same parameter. We need to make sure that
891 * vm_map does not see this object as a named entry port. So,
892 * we reserve the second word in the object for a fake ip_kotype
893 * setting - that will tell vm_map to use it as a memory object.
894 */
895 pager->pager_ops = &apple_protect_pager_ops;
896 pager->pager_ikot = IKOT_MEMORY_OBJECT;
897 pager->is_ready = FALSE;/* not ready until it has a "name" */
898 pager->ref_count = 2; /* existence + setup reference */
899 pager->is_mapped = FALSE;
900 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
901 pager->backing_object = backing_object;
902 vm_object_reference(backing_object);
903
904 mutex_lock(&apple_protect_pager_lock);
905 /* see if anyone raced us to create a pager for the same object */
906 queue_iterate(&apple_protect_pager_queue,
907 pager2,
908 apple_protect_pager_t,
909 pager_queue) {
910 if (pager2->backing_object == backing_object) {
911 break;
912 }
913 }
914 if (! queue_end(&apple_protect_pager_queue,
915 (queue_entry_t) pager2)) {
916 /* while we hold the lock, transfer our setup ref to winner */
917 pager2->ref_count++;
918 /* we lost the race, down with the loser... */
919 mutex_unlock(&apple_protect_pager_lock);
920 vm_object_deallocate(pager->backing_object);
921 pager->backing_object = VM_OBJECT_NULL;
922 kfree(pager, sizeof (*pager));
923 /* ... and go with the winner */
924 pager = pager2;
925 /* let the winner make sure the pager gets ready */
926 return pager;
927 }
928
929 /* enter new pager at the head of our list of pagers */
930 queue_enter_first(&apple_protect_pager_queue,
931 pager,
932 apple_protect_pager_t,
933 pager_queue);
934 apple_protect_pager_count++;
935 if (apple_protect_pager_count > apple_protect_pager_count_max) {
936 apple_protect_pager_count_max = apple_protect_pager_count;
937 }
938 mutex_unlock(&apple_protect_pager_lock);
939
940 kr = memory_object_create_named((memory_object_t) pager,
941 0,
942 &control);
943 assert(kr == KERN_SUCCESS);
944
945 mutex_lock(&apple_protect_pager_lock);
946 /* the new pager is now ready to be used */
947 pager->is_ready = TRUE;
948 mutex_unlock(&apple_protect_pager_lock);
949
950 /* wakeup anyone waiting for this pager to be ready */
951 thread_wakeup(&pager->is_ready);
952
953 return pager;
954 }
955
956 /*
957 * apple_protect_pager_setup()
958 *
959 * Provide the caller with a memory object backed by the provided
960 * "backing_object" VM object. If such a memory object already exists,
961 * re-use it, otherwise create a new memory object.
962 */
963 memory_object_t
964 apple_protect_pager_setup(
965 vm_object_t backing_object)
966 {
967 apple_protect_pager_t pager;
968
969 mutex_lock(&apple_protect_pager_lock);
970
971 queue_iterate(&apple_protect_pager_queue,
972 pager,
973 apple_protect_pager_t,
974 pager_queue) {
975 if (pager->backing_object == backing_object) {
976 break;
977 }
978 }
979 if (queue_end(&apple_protect_pager_queue,
980 (queue_entry_t) pager)) {
981 /* no existing pager for this backing object */
982 pager = APPLE_PROTECT_PAGER_NULL;
983 } else {
984 /* make sure pager doesn't disappear */
985 pager->ref_count++;
986 }
987
988 mutex_unlock(&apple_protect_pager_lock);
989
990 if (pager == APPLE_PROTECT_PAGER_NULL) {
991 pager = apple_protect_pager_create(backing_object);
992 if (pager == APPLE_PROTECT_PAGER_NULL) {
993 return MEMORY_OBJECT_NULL;
994 }
995 }
996
997 mutex_lock(&apple_protect_pager_lock);
998 while (!pager->is_ready) {
999 thread_sleep_mutex(&pager->is_ready,
1000 &apple_protect_pager_lock,
1001 THREAD_UNINT);
1002 }
1003 mutex_unlock(&apple_protect_pager_lock);
1004
1005 return (memory_object_t) pager;
1006 }
1007
1008 void
1009 apple_protect_pager_trim(void)
1010 {
1011 apple_protect_pager_t pager, prev_pager;
1012 queue_head_t trim_queue;
1013 int num_trim;
1014 int count_unmapped;
1015
1016 mutex_lock(&apple_protect_pager_lock);
1017
1018 /*
1019 * We have too many pagers, try and trim some unused ones,
1020 * starting with the oldest pager at the end of the queue.
1021 */
1022 queue_init(&trim_queue);
1023 num_trim = 0;
1024
1025 for (pager = (apple_protect_pager_t)
1026 queue_last(&apple_protect_pager_queue);
1027 !queue_end(&apple_protect_pager_queue,
1028 (queue_entry_t) pager);
1029 pager = prev_pager) {
1030 /* get prev elt before we dequeue */
1031 prev_pager = (apple_protect_pager_t)
1032 queue_prev(&pager->pager_queue);
1033
1034 if (pager->ref_count == 2 &&
1035 pager->is_ready &&
1036 !pager->is_mapped) {
1037 /* this pager can be trimmed */
1038 num_trim++;
1039 /* remove this pager from the main list ... */
1040 apple_protect_pager_dequeue(pager);
1041 /* ... and add it to our trim queue */
1042 queue_enter_first(&trim_queue,
1043 pager,
1044 apple_protect_pager_t,
1045 pager_queue);
1046
1047 count_unmapped = (apple_protect_pager_count -
1048 apple_protect_pager_count_mapped);
1049 if (count_unmapped <= apple_protect_pager_cache_limit) {
1050 /* we have enough pagers to trim */
1051 break;
1052 }
1053 }
1054 }
1055 if (num_trim > apple_protect_pager_num_trim_max) {
1056 apple_protect_pager_num_trim_max = num_trim;
1057 }
1058 apple_protect_pager_num_trim_total += num_trim;
1059
1060 mutex_unlock(&apple_protect_pager_lock);
1061
1062 /* terminate the trimmed pagers */
1063 while (!queue_empty(&trim_queue)) {
1064 queue_remove_first(&trim_queue,
1065 pager,
1066 apple_protect_pager_t,
1067 pager_queue);
1068 pager->pager_queue.next = NULL;
1069 pager->pager_queue.prev = NULL;
1070 assert(pager->ref_count == 2);
1071 /*
1072 * We can't call deallocate_internal() because the pager
1073 * has already been dequeued, but we still need to remove
1074 * a reference.
1075 */
1076 pager->ref_count--;
1077 apple_protect_pager_terminate_internal(pager);
1078 }
1079 }