]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_apple_protect.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_apple_protect.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <default_pager/default_pager_types.h>
53 #include <default_pager/default_pager_object_server.h>
54
55 #include <vm/vm_fault.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/memory_object.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_protos.h>
61
62
63 /*
64 * APPLE PROTECT MEMORY PAGER
65 *
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
68 *
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
72 *
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
76 *
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
79 * for now.
80 */
81
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj);
85 kern_return_t apple_protect_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 vm_size_t pg_size);
88 kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj);
89 kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 vm_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94 kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 vm_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102 kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 vm_size_t data_cnt);
105 kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 vm_size_t size,
108 vm_prot_t desired_access);
109 kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 vm_size_t length,
112 vm_sync_t sync_flags);
113 kern_return_t apple_protect_pager_unmap(memory_object_t mem_obj);
114
115 /*
116 * Vector of VM operations for this EMM.
117 * These routines are invoked by VM via the memory_object_*() interfaces.
118 */
119 const struct memory_object_pager_ops apple_protect_pager_ops = {
120 apple_protect_pager_reference,
121 apple_protect_pager_deallocate,
122 apple_protect_pager_init,
123 apple_protect_pager_terminate,
124 apple_protect_pager_data_request,
125 apple_protect_pager_data_return,
126 apple_protect_pager_data_initialize,
127 apple_protect_pager_data_unlock,
128 apple_protect_pager_synchronize,
129 apple_protect_pager_unmap,
130 "apple protect pager"
131 };
132
133 /*
134 * The "apple_protect_pager" describes a memory object backed by
135 * the "apple protect" EMM.
136 */
137 typedef struct apple_protect_pager {
138 memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */
139 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
140 queue_chain_t pager_queue; /* next & prev pagers */
141 unsigned int ref_count; /* reference count */
142 boolean_t is_ready; /* is this pager ready ? */
143 boolean_t is_mapped; /* is this mem_obj mapped ? */
144 memory_object_control_t pager_control; /* mem object control handle */
145 vm_object_t backing_object; /* VM obj w/ encrypted data */
146 } *apple_protect_pager_t;
147 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
148
149 /*
150 * List of memory objects managed by this EMM.
151 * The list is protected by the "apple_protect_pager_lock" lock.
152 */
153 int apple_protect_pager_count = 0; /* number of pagers */
154 int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */
155 queue_head_t apple_protect_pager_queue;
156 decl_mutex_data(,apple_protect_pager_lock)
157
158 /*
159 * Maximum number of unmapped pagers we're willing to keep around.
160 */
161 int apple_protect_pager_cache_limit = 10;
162
163 /*
164 * Statistics & counters.
165 */
166 int apple_protect_pager_count_max = 0;
167 int apple_protect_pager_count_unmapped_max = 0;
168 int apple_protect_pager_num_trim_max = 0;
169 int apple_protect_pager_num_trim_total = 0;
170
171 /* internal prototypes */
172 apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object);
173 apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj);
174 void apple_protect_pager_dequeue(apple_protect_pager_t pager);
175 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager,
176 boolean_t locked);
177 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager);
178 void apple_protect_pager_trim(void);
179
180
181 #if DEBUG
182 int apple_protect_pagerdebug = 0;
183 #define PAGER_ALL 0xffffffff
184 #define PAGER_INIT 0x00000001
185 #define PAGER_PAGEIN 0x00000002
186
187 #define PAGER_DEBUG(LEVEL, A) \
188 MACRO_BEGIN \
189 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
190 printf A; \
191 } \
192 MACRO_END
193 #else
194 #define PAGER_DEBUG(LEVEL, A)
195 #endif
196
197
198 void
199 apple_protect_pager_bootstrap(void)
200 {
201 mutex_init(&apple_protect_pager_lock, 0);
202 queue_init(&apple_protect_pager_queue);
203 }
204
205 /*
206 * apple_protect_pager_init()
207 *
208 * Initialize the memory object and makes it ready to be used and mapped.
209 */
210 kern_return_t
211 apple_protect_pager_init(
212 memory_object_t mem_obj,
213 memory_object_control_t control,
214 #if !DEBUG
215 __unused
216 #endif
217 vm_size_t pg_size)
218 {
219 apple_protect_pager_t pager;
220 kern_return_t kr;
221 memory_object_attr_info_data_t attributes;
222
223 PAGER_DEBUG(PAGER_ALL,
224 ("apple_protect_pager_init: %p, %p, %x\n",
225 mem_obj, control, pg_size));
226
227 if (control == MEMORY_OBJECT_CONTROL_NULL)
228 return KERN_INVALID_ARGUMENT;
229
230 pager = apple_protect_pager_lookup(mem_obj);
231
232 memory_object_control_reference(control);
233
234 pager->pager_control = control;
235
236 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
237 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
238 attributes.cluster_size = (1 << (PAGE_SHIFT));
239 attributes.may_cache_object = FALSE;
240 attributes.temporary = TRUE;
241
242 kr = memory_object_change_attributes(
243 control,
244 MEMORY_OBJECT_ATTRIBUTE_INFO,
245 (memory_object_info_t) &attributes,
246 MEMORY_OBJECT_ATTR_INFO_COUNT);
247 if (kr != KERN_SUCCESS)
248 panic("apple_protect_pager_init: "
249 "memory_object_change_attributes() failed");
250
251 return KERN_SUCCESS;
252 }
253
254 /*
255 * apple_protect_data_return()
256 *
257 * Handles page-out requests from VM. This should never happen since
258 * the pages provided by this EMM are not supposed to be dirty or dirtied
259 * and VM should simply discard the contents and reclaim the pages if it
260 * needs to.
261 */
262 kern_return_t
263 apple_protect_pager_data_return(
264 __unused memory_object_t mem_obj,
265 __unused memory_object_offset_t offset,
266 __unused vm_size_t data_cnt,
267 __unused memory_object_offset_t *resid_offset,
268 __unused int *io_error,
269 __unused boolean_t dirty,
270 __unused boolean_t kernel_copy,
271 __unused int upl_flags)
272 {
273 panic("apple_protect_pager_data_return: should never get called");
274 return KERN_FAILURE;
275 }
276
277 kern_return_t
278 apple_protect_pager_data_initialize(
279 __unused memory_object_t mem_obj,
280 __unused memory_object_offset_t offset,
281 __unused vm_size_t data_cnt)
282 {
283 panic("apple_protect_pager_data_initialize: should never get called");
284 return KERN_FAILURE;
285 }
286
287 kern_return_t
288 apple_protect_pager_data_unlock(
289 __unused memory_object_t mem_obj,
290 __unused memory_object_offset_t offset,
291 __unused vm_size_t size,
292 __unused vm_prot_t desired_access)
293 {
294 return KERN_FAILURE;
295 }
296
297 /*
298 * apple_protect_pager_data_request()
299 *
300 * Handles page-in requests from VM.
301 */
302 kern_return_t
303 apple_protect_pager_data_request(
304 memory_object_t mem_obj,
305 memory_object_offset_t offset,
306 vm_size_t length,
307 #if !DEBUG
308 __unused
309 #endif
310 vm_prot_t protection_required,
311 memory_object_fault_info_t mo_fault_info)
312 {
313 apple_protect_pager_t pager;
314 memory_object_control_t mo_control;
315 upl_t upl;
316 int upl_flags;
317 upl_size_t upl_size;
318 upl_page_info_t *upl_pl;
319 vm_object_t src_object, dst_object;
320 kern_return_t kr, retval;
321 vm_map_offset_t kernel_mapping;
322 vm_offset_t src_vaddr, dst_vaddr;
323 vm_offset_t cur_offset;
324 vm_map_entry_t map_entry;
325 kern_return_t error_code;
326 vm_prot_t prot;
327 vm_page_t src_page, top_page;
328 int interruptible;
329 vm_object_fault_info_t fault_info;
330
331 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
332
333 src_object = VM_OBJECT_NULL;
334 kernel_mapping = 0;
335 upl = NULL;
336 fault_info = (vm_object_fault_info_t) mo_fault_info;
337 interruptible = fault_info->interruptible;
338
339 pager = apple_protect_pager_lookup(mem_obj);
340 assert(pager->is_ready);
341 assert(pager->ref_count > 1); /* pager is alive and mapped */
342
343 PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
344
345 /*
346 * Gather in a UPL all the VM pages requested by VM.
347 */
348 mo_control = pager->pager_control;
349
350 upl_size = length;
351 upl_flags =
352 UPL_RET_ONLY_ABSENT |
353 UPL_SET_LITE |
354 UPL_NO_SYNC |
355 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
356 UPL_SET_INTERNAL;
357 kr = memory_object_upl_request(mo_control,
358 offset, upl_size,
359 &upl, NULL, NULL, upl_flags);
360 if (kr != KERN_SUCCESS) {
361 retval = kr;
362 goto done;
363 }
364 dst_object = mo_control->moc_object;
365 assert(dst_object != VM_OBJECT_NULL);
366
367
368 /*
369 * Reserve 2 virtual pages in the kernel address space to map each
370 * source and destination physical pages when it's their turn to
371 * be processed.
372 */
373 vm_object_reference(kernel_object); /* ref. for mapping */
374 kr = vm_map_find_space(kernel_map,
375 &kernel_mapping,
376 2 * PAGE_SIZE_64,
377 0,
378 0,
379 &map_entry);
380 if (kr != KERN_SUCCESS) {
381 vm_object_deallocate(kernel_object);
382 retval = kr;
383 goto done;
384 }
385 map_entry->object.vm_object = kernel_object;
386 map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
387 vm_map_unlock(kernel_map);
388 src_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
389 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping + PAGE_SIZE_64);
390
391 /*
392 * We'll map the encrypted data in the kernel address space from the
393 * backing VM object (itself backed by the encrypted file via
394 * the vnode pager).
395 */
396 src_object = pager->backing_object;
397 assert(src_object != VM_OBJECT_NULL);
398 vm_object_reference(src_object); /* to keep the source object alive */
399
400 /*
401 * Fill in the contents of the pages requested by VM.
402 */
403 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
404 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
405 ppnum_t dst_pnum;
406
407 if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) {
408 /* this page is not in the UPL: skip it */
409 continue;
410 }
411
412 /*
413 * Map the source (encrypted) page in the kernel's
414 * virtual address space.
415 * We already hold a reference on the src_object.
416 */
417 retry_src_fault:
418 vm_object_lock(src_object);
419 vm_object_paging_begin(src_object);
420 error_code = 0;
421 prot = VM_PROT_READ;
422 kr = vm_fault_page(src_object,
423 offset + cur_offset,
424 VM_PROT_READ,
425 FALSE,
426 &prot,
427 &src_page,
428 &top_page,
429 NULL,
430 &error_code,
431 FALSE,
432 FALSE,
433 fault_info);
434 switch (kr) {
435 case VM_FAULT_SUCCESS:
436 break;
437 case VM_FAULT_RETRY:
438 goto retry_src_fault;
439 case VM_FAULT_MEMORY_SHORTAGE:
440 if (vm_page_wait(interruptible)) {
441 goto retry_src_fault;
442 }
443 /* fall thru */
444 case VM_FAULT_INTERRUPTED:
445 retval = MACH_SEND_INTERRUPTED;
446 goto done;
447 case VM_FAULT_MEMORY_ERROR:
448 /* the page is not there ! */
449 if (error_code) {
450 retval = error_code;
451 } else {
452 retval = KERN_MEMORY_ERROR;
453 }
454 goto done;
455 default:
456 retval = KERN_FAILURE;
457 goto done;
458 }
459 assert(src_page != VM_PAGE_NULL);
460 assert(src_page->busy);
461
462 /*
463 * Establish an explicit mapping of the source
464 * physical page.
465 */
466 pmap_enter(kernel_pmap,
467 kernel_mapping,
468 src_page->phys_page,
469 VM_PROT_READ,
470 src_object->wimg_bits & VM_WIMG_MASK,
471 TRUE);
472 /*
473 * Establish an explicit pmap mapping of the destination
474 * physical page.
475 * We can't do a regular VM mapping because the VM page
476 * is "busy".
477 */
478 dst_pnum = (addr64_t)
479 upl_phys_page(upl_pl, cur_offset / PAGE_SIZE);
480 assert(dst_pnum != 0);
481 pmap_enter(kernel_pmap,
482 kernel_mapping + PAGE_SIZE_64,
483 dst_pnum,
484 VM_PROT_READ | VM_PROT_WRITE,
485 dst_object->wimg_bits & VM_WIMG_MASK,
486 TRUE);
487
488 /*
489 * Decrypt the encrypted contents of the source page
490 * into the destination page.
491 */
492 dsmos_page_transform((const void *) src_vaddr,
493 (void *) dst_vaddr);
494
495 /*
496 * Remove the pmap mapping of the source and destination pages
497 * in the kernel.
498 */
499 pmap_remove(kernel_pmap,
500 (addr64_t) kernel_mapping,
501 (addr64_t) (kernel_mapping + (2 * PAGE_SIZE_64)));
502
503 /*
504 * Cleanup the result of vm_fault_page() of the source page.
505 */
506 PAGE_WAKEUP_DONE(src_page);
507 vm_object_paging_end(src_page->object);
508 vm_object_unlock(src_page->object);
509 if (top_page != VM_PAGE_NULL) {
510 vm_object_t top_object;
511
512 top_object = top_page->object;
513 vm_object_lock(top_object);
514 VM_PAGE_FREE(top_page);
515 vm_object_paging_end(top_object);
516 vm_object_unlock(top_object);
517 }
518 }
519
520 retval = KERN_SUCCESS;
521 done:
522 if (upl != NULL) {
523 /* clean up the UPL */
524
525 /*
526 * The pages are currently dirty because we've just been
527 * writing on them, but as far as we're concerned, they're
528 * clean since they contain their "original" contents as
529 * provided by us, the pager.
530 * Tell the UPL to mark them "clean".
531 */
532 upl_clear_dirty(upl, TRUE);
533
534 /* abort or commit the UPL */
535 if (retval != KERN_SUCCESS) {
536 upl_abort(upl, 0);
537 } else {
538 upl_commit(upl, NULL, 0);
539 }
540
541 /* and deallocate the UPL */
542 upl_deallocate(upl);
543 upl = NULL;
544 }
545 if (kernel_mapping != 0) {
546 /* clean up the mapping of the source and destination pages */
547 kr = vm_map_remove(kernel_map,
548 kernel_mapping,
549 kernel_mapping + (2 * PAGE_SIZE_64),
550 VM_MAP_NO_FLAGS);
551 assert(kr == KERN_SUCCESS);
552 kernel_mapping = 0;
553 src_vaddr = 0;
554 dst_vaddr = 0;
555 }
556 if (src_object != VM_OBJECT_NULL) {
557 vm_object_deallocate(src_object);
558 }
559
560 return retval;
561 }
562
563 /*
564 * apple_protect_pager_reference()
565 *
566 * Get a reference on this memory object.
567 * For external usage only. Assumes that the initial reference count is not 0,
568 * i.e one should not "revive" a dead pager this way.
569 */
570 void
571 apple_protect_pager_reference(
572 memory_object_t mem_obj)
573 {
574 apple_protect_pager_t pager;
575
576 pager = apple_protect_pager_lookup(mem_obj);
577
578 mutex_lock(&apple_protect_pager_lock);
579 assert(pager->ref_count > 0);
580 pager->ref_count++;
581 mutex_unlock(&apple_protect_pager_lock);
582 }
583
584
585 /*
586 * apple_protect_pager_dequeue:
587 *
588 * Removes a pager from the list of pagers.
589 *
590 * The caller must hold "apple_protect_pager_lock".
591 */
592 void
593 apple_protect_pager_dequeue(
594 apple_protect_pager_t pager)
595 {
596 assert(!pager->is_mapped);
597
598 queue_remove(&apple_protect_pager_queue,
599 pager,
600 apple_protect_pager_t,
601 pager_queue);
602 pager->pager_queue.next = NULL;
603 pager->pager_queue.prev = NULL;
604
605 apple_protect_pager_count--;
606 }
607
608 /*
609 * apple_protect_pager_terminate_internal:
610 *
611 * Trigger the asynchronous termination of the memory object associated
612 * with this pager.
613 * When the memory object is terminated, there will be one more call
614 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
615 * to finish the clean up.
616 *
617 * "apple_protect_pager_lock" should not be held by the caller.
618 * We don't need the lock because the pager has already been removed from
619 * the pagers' list and is now ours exclusively.
620 */
621 void
622 apple_protect_pager_terminate_internal(
623 apple_protect_pager_t pager)
624 {
625 assert(pager->is_ready);
626 assert(!pager->is_mapped);
627
628 if (pager->backing_object != VM_OBJECT_NULL) {
629 vm_object_deallocate(pager->backing_object);
630 pager->backing_object = VM_OBJECT_NULL;
631 }
632
633 /* trigger the destruction of the memory object */
634 memory_object_destroy(pager->pager_control, 0);
635 }
636
637 /*
638 * apple_protect_pager_deallocate_internal()
639 *
640 * Release a reference on this pager and free it when the last
641 * reference goes away.
642 * Can be called with apple_protect_pager_lock held or not but always returns
643 * with it unlocked.
644 */
645 void
646 apple_protect_pager_deallocate_internal(
647 apple_protect_pager_t pager,
648 boolean_t locked)
649 {
650 boolean_t needs_trimming;
651 int count_unmapped;
652
653 if (! locked) {
654 mutex_lock(&apple_protect_pager_lock);
655 }
656
657 count_unmapped = (apple_protect_pager_count -
658 apple_protect_pager_count_mapped);
659 if (count_unmapped > apple_protect_pager_cache_limit) {
660 /* we have too many unmapped pagers: trim some */
661 needs_trimming = TRUE;
662 } else {
663 needs_trimming = FALSE;
664 }
665
666 /* drop a reference on this pager */
667 pager->ref_count--;
668
669 if (pager->ref_count == 1) {
670 /*
671 * Only the "named" reference is left, which means that
672 * no one is really holding on to this pager anymore.
673 * Terminate it.
674 */
675 apple_protect_pager_dequeue(pager);
676 /* the pager is all ours: no need for the lock now */
677 mutex_unlock(&apple_protect_pager_lock);
678 apple_protect_pager_terminate_internal(pager);
679 } else if (pager->ref_count == 0) {
680 /*
681 * Dropped the existence reference; the memory object has
682 * been terminated. Do some final cleanup and release the
683 * pager structure.
684 */
685 mutex_unlock(&apple_protect_pager_lock);
686 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
687 memory_object_control_deallocate(pager->pager_control);
688 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
689 }
690 kfree(pager, sizeof (*pager));
691 pager = APPLE_PROTECT_PAGER_NULL;
692 } else {
693 /* there are still plenty of references: keep going... */
694 mutex_unlock(&apple_protect_pager_lock);
695 }
696
697 if (needs_trimming) {
698 apple_protect_pager_trim();
699 }
700 /* caution: lock is not held on return... */
701 }
702
703 /*
704 * apple_protect_pager_deallocate()
705 *
706 * Release a reference on this pager and free it when the last
707 * reference goes away.
708 */
709 void
710 apple_protect_pager_deallocate(
711 memory_object_t mem_obj)
712 {
713 apple_protect_pager_t pager;
714
715 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj));
716 pager = apple_protect_pager_lookup(mem_obj);
717 apple_protect_pager_deallocate_internal(pager, FALSE);
718 }
719
720 /*
721 *
722 */
723 kern_return_t
724 apple_protect_pager_terminate(
725 #if !DEBUG
726 __unused
727 #endif
728 memory_object_t mem_obj)
729 {
730 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj));
731
732 return KERN_SUCCESS;
733 }
734
735 /*
736 *
737 */
738 kern_return_t
739 apple_protect_pager_synchronize(
740 memory_object_t mem_obj,
741 memory_object_offset_t offset,
742 vm_size_t length,
743 __unused vm_sync_t sync_flags)
744 {
745 apple_protect_pager_t pager;
746
747 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %p\n", mem_obj));
748
749 pager = apple_protect_pager_lookup(mem_obj);
750
751 memory_object_synchronize_completed(pager->pager_control,
752 offset, length);
753
754 return KERN_SUCCESS;
755 }
756
757 /*
758 * apple_protect_pager_map()
759 *
760 * This allows VM to let us, the EMM, know that this memory object
761 * is currently mapped one or more times. This is called by VM only the first
762 * time the memory object gets mapped and we take one extra reference on the
763 * memory object to account for all its mappings.
764 */
765 void
766 apple_protect_pager_map(
767 memory_object_t mem_obj)
768 {
769 apple_protect_pager_t pager;
770
771 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj));
772
773 pager = apple_protect_pager_lookup(mem_obj);
774
775 mutex_lock(&apple_protect_pager_lock);
776 assert(pager->is_ready);
777 assert(pager->ref_count > 0); /* pager is alive */
778 if (pager->is_mapped == FALSE) {
779 /*
780 * First mapping of this pager: take an extra reference
781 * that will remain until all the mappings of this pager
782 * are removed.
783 */
784 pager->is_mapped = TRUE;
785 pager->ref_count++;
786 apple_protect_pager_count_mapped++;
787 }
788 mutex_unlock(&apple_protect_pager_lock);
789 }
790
791 /*
792 * apple_protect_pager_unmap()
793 *
794 * This is called by VM when this memory object is no longer mapped anywhere.
795 */
796 kern_return_t
797 apple_protect_pager_unmap(
798 memory_object_t mem_obj)
799 {
800 apple_protect_pager_t pager;
801 int count_unmapped;
802
803 PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_unmap: %p\n", mem_obj));
804
805 pager = apple_protect_pager_lookup(mem_obj);
806
807 mutex_lock(&apple_protect_pager_lock);
808 if (pager->is_mapped) {
809 /*
810 * All the mappings are gone, so let go of the one extra
811 * reference that represents all the mappings of this pager.
812 */
813 apple_protect_pager_count_mapped--;
814 count_unmapped = (apple_protect_pager_count -
815 apple_protect_pager_count_mapped);
816 if (count_unmapped > apple_protect_pager_count_unmapped_max) {
817 apple_protect_pager_count_unmapped_max = count_unmapped;
818 }
819 pager->is_mapped = FALSE;
820 apple_protect_pager_deallocate_internal(pager, TRUE);
821 /* caution: deallocate_internal() released the lock ! */
822 } else {
823 mutex_unlock(&apple_protect_pager_lock);
824 }
825
826 return KERN_SUCCESS;
827 }
828
829
830 /*
831 *
832 */
833 apple_protect_pager_t
834 apple_protect_pager_lookup(
835 memory_object_t mem_obj)
836 {
837 apple_protect_pager_t pager;
838
839 pager = (apple_protect_pager_t) mem_obj;
840 assert(pager->pager_ops == &apple_protect_pager_ops);
841 assert(pager->ref_count > 0);
842 return pager;
843 }
844
845 apple_protect_pager_t
846 apple_protect_pager_create(
847 vm_object_t backing_object)
848 {
849 apple_protect_pager_t pager, pager2;
850 memory_object_control_t control;
851 kern_return_t kr;
852
853 pager = (apple_protect_pager_t) kalloc(sizeof (*pager));
854 if (pager == APPLE_PROTECT_PAGER_NULL) {
855 return APPLE_PROTECT_PAGER_NULL;
856 }
857
858 /*
859 * The vm_map call takes both named entry ports and raw memory
860 * objects in the same parameter. We need to make sure that
861 * vm_map does not see this object as a named entry port. So,
862 * we reserve the second word in the object for a fake ip_kotype
863 * setting - that will tell vm_map to use it as a memory object.
864 */
865 pager->pager_ops = &apple_protect_pager_ops;
866 pager->pager_ikot = IKOT_MEMORY_OBJECT;
867 pager->is_ready = FALSE;/* not ready until it has a "name" */
868 pager->ref_count = 2; /* existence + setup reference */
869 pager->is_mapped = FALSE;
870 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
871 pager->backing_object = backing_object;
872 vm_object_reference(backing_object);
873
874 mutex_lock(&apple_protect_pager_lock);
875 /* see if anyone raced us to create a pager for the same object */
876 queue_iterate(&apple_protect_pager_queue,
877 pager2,
878 apple_protect_pager_t,
879 pager_queue) {
880 if (pager2->backing_object == backing_object) {
881 break;
882 }
883 }
884 if (! queue_end(&apple_protect_pager_queue,
885 (queue_entry_t) pager2)) {
886 /* while we hold the lock, transfer our setup ref to winner */
887 pager2->ref_count++;
888 /* we lost the race, down with the loser... */
889 mutex_unlock(&apple_protect_pager_lock);
890 vm_object_deallocate(pager->backing_object);
891 pager->backing_object = VM_OBJECT_NULL;
892 kfree(pager, sizeof (*pager));
893 /* ... and go with the winner */
894 pager = pager2;
895 /* let the winner make sure the pager gets ready */
896 return pager;
897 }
898
899 /* enter new pager at the head of our list of pagers */
900 queue_enter_first(&apple_protect_pager_queue,
901 pager,
902 apple_protect_pager_t,
903 pager_queue);
904 apple_protect_pager_count++;
905 if (apple_protect_pager_count > apple_protect_pager_count_max) {
906 apple_protect_pager_count_max = apple_protect_pager_count;
907 }
908 mutex_unlock(&apple_protect_pager_lock);
909
910 kr = memory_object_create_named((memory_object_t) pager,
911 0,
912 &control);
913 assert(kr == KERN_SUCCESS);
914
915 mutex_lock(&apple_protect_pager_lock);
916 /* the new pager is now ready to be used */
917 pager->is_ready = TRUE;
918 mutex_unlock(&apple_protect_pager_lock);
919
920 /* wakeup anyone waiting for this pager to be ready */
921 thread_wakeup(&pager->is_ready);
922
923 return pager;
924 }
925
926 /*
927 * apple_protect_pager_setup()
928 *
929 * Provide the caller with a memory object backed by the provided
930 * "backing_object" VM object. If such a memory object already exists,
931 * re-use it, otherwise create a new memory object.
932 */
933 memory_object_t
934 apple_protect_pager_setup(
935 vm_object_t backing_object)
936 {
937 apple_protect_pager_t pager;
938
939 mutex_lock(&apple_protect_pager_lock);
940
941 queue_iterate(&apple_protect_pager_queue,
942 pager,
943 apple_protect_pager_t,
944 pager_queue) {
945 if (pager->backing_object == backing_object) {
946 break;
947 }
948 }
949 if (queue_end(&apple_protect_pager_queue,
950 (queue_entry_t) pager)) {
951 /* no existing pager for this backing object */
952 pager = APPLE_PROTECT_PAGER_NULL;
953 } else {
954 /* make sure pager doesn't disappear */
955 pager->ref_count++;
956 }
957
958 mutex_unlock(&apple_protect_pager_lock);
959
960 if (pager == APPLE_PROTECT_PAGER_NULL) {
961 pager = apple_protect_pager_create(backing_object);
962 if (pager == APPLE_PROTECT_PAGER_NULL) {
963 return MEMORY_OBJECT_NULL;
964 }
965 }
966
967 mutex_lock(&apple_protect_pager_lock);
968 while (!pager->is_ready) {
969 thread_sleep_mutex(&pager->is_ready,
970 &apple_protect_pager_lock,
971 THREAD_UNINT);
972 }
973 mutex_unlock(&apple_protect_pager_lock);
974
975 return (memory_object_t) pager;
976 }
977
978 void
979 apple_protect_pager_trim(void)
980 {
981 apple_protect_pager_t pager, prev_pager;
982 queue_head_t trim_queue;
983 int num_trim;
984 int count_unmapped;
985
986 mutex_lock(&apple_protect_pager_lock);
987
988 /*
989 * We have too many pagers, try and trim some unused ones,
990 * starting with the oldest pager at the end of the queue.
991 */
992 queue_init(&trim_queue);
993 num_trim = 0;
994
995 for (pager = (apple_protect_pager_t)
996 queue_last(&apple_protect_pager_queue);
997 !queue_end(&apple_protect_pager_queue,
998 (queue_entry_t) pager);
999 pager = prev_pager) {
1000 /* get prev elt before we dequeue */
1001 prev_pager = (apple_protect_pager_t)
1002 queue_prev(&pager->pager_queue);
1003
1004 if (pager->ref_count == 2 &&
1005 pager->is_ready &&
1006 !pager->is_mapped) {
1007 /* this pager can be trimmed */
1008 num_trim++;
1009 /* remove this pager from the main list ... */
1010 apple_protect_pager_dequeue(pager);
1011 /* ... and add it to our trim queue */
1012 queue_enter_first(&trim_queue,
1013 pager,
1014 apple_protect_pager_t,
1015 pager_queue);
1016
1017 count_unmapped = (apple_protect_pager_count -
1018 apple_protect_pager_count_mapped);
1019 if (count_unmapped <= apple_protect_pager_cache_limit) {
1020 /* we have enough pagers to trim */
1021 break;
1022 }
1023 }
1024 }
1025 if (num_trim > apple_protect_pager_num_trim_max) {
1026 apple_protect_pager_num_trim_max = num_trim;
1027 }
1028 apple_protect_pager_num_trim_total += num_trim;
1029
1030 mutex_unlock(&apple_protect_pager_lock);
1031
1032 /* terminate the trimmed pagers */
1033 while (!queue_empty(&trim_queue)) {
1034 queue_remove_first(&trim_queue,
1035 pager,
1036 apple_protect_pager_t,
1037 pager_queue);
1038 pager->pager_queue.next = NULL;
1039 pager->pager_queue.prev = NULL;
1040 assert(pager->ref_count == 2);
1041 /*
1042 * We can't call deallocate_internal() because the pager
1043 * has already been dequeued, but we still need to remove
1044 * a reference.
1045 */
1046 pager->ref_count--;
1047 apple_protect_pager_terminate_internal(pager);
1048 }
1049 }