2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/errno.h>
25 #include <mach/mach_types.h>
26 #include <mach/mach_traps.h>
27 #include <mach/host_priv.h>
28 #include <mach/kern_return.h>
29 #include <mach/memory_object_control.h>
30 #include <mach/memory_object_types.h>
31 #include <mach/port.h>
32 #include <mach/policy.h>
34 #include <mach/thread_act.h>
35 #include <mach/mach_vm.h>
37 #include <kern/host.h>
38 #include <kern/kalloc.h>
39 #include <kern/page_decrypt.h>
40 #include <kern/queue.h>
41 #include <kern/thread.h>
43 #include <ipc/ipc_port.h>
44 #include <ipc/ipc_space.h>
46 #include <default_pager/default_pager_types.h>
47 #include <default_pager/default_pager_object_server.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/memory_object.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_protos.h>
57 * APPLE PROTECT MEMORY PAGER
59 * This external memory manager (EMM) handles memory from the encrypted
60 * sections of some executables protected by the DSMOS kernel extension.
62 * It mostly handles page-in requests (from memory_object_data_request()) by
63 * getting the encrypted data from its backing VM object, itself backed by
64 * the encrypted file, decrypting it and providing it to VM.
66 * The decrypted pages will never be dirtied, so the memory manager doesn't
67 * need to handle page-out requests (from memory_object_data_return()). The
68 * pages need to be mapped copy-on-write, so that the originals stay clean.
70 * We don't expect to have to handle a large number of apple-protected
71 * binaries, so the data structures are very simple (simple linked list)
75 /* forward declarations */
76 void apple_protect_pager_reference(memory_object_t mem_obj
);
77 void apple_protect_pager_deallocate(memory_object_t mem_obj
);
78 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj
,
79 memory_object_control_t control
,
81 kern_return_t
apple_protect_pager_terminate(memory_object_t mem_obj
);
82 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj
,
83 memory_object_offset_t offset
,
85 vm_prot_t protection_required
);
86 kern_return_t
apple_protect_pager_data_return(memory_object_t mem_obj
,
87 memory_object_offset_t offset
,
89 memory_object_offset_t
*resid_offset
,
92 boolean_t kernel_copy
,
94 kern_return_t
apple_protect_pager_data_initialize(memory_object_t mem_obj
,
95 memory_object_offset_t offset
,
97 kern_return_t
apple_protect_pager_data_unlock(memory_object_t mem_obj
,
98 memory_object_offset_t offset
,
100 vm_prot_t desired_access
);
101 kern_return_t
apple_protect_pager_synchronize(memory_object_t mem_obj
,
102 memory_object_offset_t offset
,
104 vm_sync_t sync_flags
);
105 kern_return_t
apple_protect_pager_unmap(memory_object_t mem_obj
);
108 * Vector of VM operations for this EMM.
109 * These routines are invoked by VM via the memory_object_*() interfaces.
111 const struct memory_object_pager_ops apple_protect_pager_ops
= {
112 apple_protect_pager_reference
,
113 apple_protect_pager_deallocate
,
114 apple_protect_pager_init
,
115 apple_protect_pager_terminate
,
116 apple_protect_pager_data_request
,
117 apple_protect_pager_data_return
,
118 apple_protect_pager_data_initialize
,
119 apple_protect_pager_data_unlock
,
120 apple_protect_pager_synchronize
,
121 apple_protect_pager_unmap
,
122 "apple protect pager"
126 * The "apple_protect_pager" describes a memory object backed by
127 * the "apple protect" EMM.
129 typedef struct apple_protect_pager
{
130 memory_object_pager_ops_t pager_ops
; /* == &apple_protect_pager_ops */
131 unsigned int pager_ikot
; /* JMM: fake ip_kotype() */
132 queue_chain_t pager_queue
; /* next & prev pagers */
133 unsigned int ref_count
; /* reference count */
134 boolean_t is_ready
; /* is this pager ready ? */
135 boolean_t is_mapped
; /* is this mem_obj mapped ? */
136 memory_object_control_t pager_control
; /* mem object control handle */
137 vm_object_t backing_object
; /* VM obj w/ encrypted data */
138 } *apple_protect_pager_t
;
139 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
142 * List of memory objects managed by this EMM.
143 * The list is protected by the "apple_protect_pager_lock" lock.
145 int apple_protect_pager_count
= 0; /* number of pagers */
146 int apple_protect_pager_count_mapped
= 0; /* number of unmapped pagers */
147 queue_head_t apple_protect_pager_queue
;
148 decl_mutex_data(,apple_protect_pager_lock
)
151 * Maximum number of unmapped pagers we're willing to keep around.
153 int apple_protect_pager_cache_limit
= 10;
156 * Statistics & counters.
158 int apple_protect_pager_count_max
= 0;
159 int apple_protect_pager_count_unmapped_max
= 0;
160 int apple_protect_pager_num_trim_max
= 0;
161 int apple_protect_pager_num_trim_total
= 0;
163 /* internal prototypes */
164 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object
);
165 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj
);
166 void apple_protect_pager_dequeue(apple_protect_pager_t pager
);
167 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager
,
169 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager
);
170 void apple_protect_pager_trim(void);
174 int apple_protect_pagerdebug
= 0;
175 #define PAGER_ALL 0xffffffff
176 #define PAGER_INIT 0x00000001
177 #define PAGER_PAGEIN 0x00000002
179 #define PAGER_DEBUG(LEVEL, A) \
181 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
186 #define PAGER_DEBUG(LEVEL, A)
191 apple_protect_pager_bootstrap(void)
193 mutex_init(&apple_protect_pager_lock
, 0);
194 queue_init(&apple_protect_pager_queue
);
198 * apple_protect_pager_init()
200 * Initialize the memory object and makes it ready to be used and mapped.
203 apple_protect_pager_init(
204 memory_object_t mem_obj
,
205 memory_object_control_t control
,
211 apple_protect_pager_t pager
;
213 memory_object_attr_info_data_t attributes
;
215 PAGER_DEBUG(PAGER_ALL
,
216 ("apple_protect_pager_init: %p, %p, %x\n",
217 mem_obj
, control
, pg_size
));
219 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
220 return KERN_INVALID_ARGUMENT
;
222 pager
= apple_protect_pager_lookup(mem_obj
);
224 memory_object_control_reference(control
);
226 pager
->pager_control
= control
;
228 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
229 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
230 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
231 attributes
.may_cache_object
= FALSE
;
232 attributes
.temporary
= TRUE
;
234 kr
= memory_object_change_attributes(
236 MEMORY_OBJECT_ATTRIBUTE_INFO
,
237 (memory_object_info_t
) &attributes
,
238 MEMORY_OBJECT_ATTR_INFO_COUNT
);
239 if (kr
!= KERN_SUCCESS
)
240 panic("apple_protect_pager_init: "
241 "memory_object_change_attributes() failed");
247 * apple_protect_data_return()
249 * Handles page-out requests from VM. This should never happen since
250 * the pages provided by this EMM are not supposed to be dirty or dirtied
251 * and VM should simply discard the contents and reclaim the pages if it
255 apple_protect_pager_data_return(
256 __unused memory_object_t mem_obj
,
257 __unused memory_object_offset_t offset
,
258 __unused vm_size_t data_cnt
,
259 __unused memory_object_offset_t
*resid_offset
,
260 __unused
int *io_error
,
261 __unused boolean_t dirty
,
262 __unused boolean_t kernel_copy
,
263 __unused
int upl_flags
)
265 panic("apple_protect_pager_data_return: should never get called");
270 apple_protect_pager_data_initialize(
271 __unused memory_object_t mem_obj
,
272 __unused memory_object_offset_t offset
,
273 __unused vm_size_t data_cnt
)
275 panic("apple_protect_pager_data_initialize: should never get called");
280 apple_protect_pager_data_unlock(
281 __unused memory_object_t mem_obj
,
282 __unused memory_object_offset_t offset
,
283 __unused vm_size_t size
,
284 __unused vm_prot_t desired_access
)
290 * apple_protect_pager_data_request()
292 * Handles page-in requests from VM.
295 apple_protect_pager_data_request(
296 memory_object_t mem_obj
,
297 memory_object_offset_t offset
,
302 vm_prot_t protection_required
)
304 apple_protect_pager_t pager
;
305 memory_object_control_t mo_control
;
309 upl_page_info_t
*upl_pl
;
310 vm_object_t src_object
, dst_object
;
311 kern_return_t kr
, retval
;
312 vm_map_offset_t src_mapping
= 0, dst_mapping
= 0;
313 vm_offset_t src_vaddr
, dst_vaddr
;
314 vm_offset_t cur_offset
;
315 boolean_t src_map_page_by_page
;
316 vm_map_entry_t map_entry
;
318 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_data_request: %x, %llx, %llxx, %x\n", mem_obj
, offset
, length
, protection_required
));
320 pager
= apple_protect_pager_lookup(mem_obj
);
321 assert(pager
->is_ready
);
322 assert(pager
->ref_count
> 1); /* pager is alive and mapped */
324 PAGER_DEBUG(PAGER_PAGEIN
, ("apple_protect_pager_data_request: %x, %llx, %llx, %x, pager %x\n", mem_obj
, offset
, length
, protection_required
, pager
));
327 * Map the encrypted data in the kernel address space from the
328 * backing VM object (itself backed by the encrypted file via
331 src_object
= pager
->backing_object
;
332 assert(src_object
!= VM_OBJECT_NULL
);
333 vm_object_reference(src_object
); /* ref. for the mapping */
335 kr
= vm_map_enter(kernel_map
,
348 src_map_page_by_page
= FALSE
;
349 src_vaddr
= CAST_DOWN(vm_offset_t
, src_mapping
);
352 /* we can't map the entire section, so map it page by page */
353 src_map_page_by_page
= TRUE
;
354 vm_object_deallocate(src_object
);
357 vm_object_deallocate(src_object
);
364 * Gather in a UPL all the VM pages requested by VM.
366 mo_control
= pager
->pager_control
;
370 UPL_RET_ONLY_ABSENT
|
373 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
375 kr
= memory_object_upl_request(mo_control
,
377 &upl
, NULL
, NULL
, upl_flags
);
378 if (kr
!= KERN_SUCCESS
) {
384 * Reserve a virtual page in the kernel address space to map each
385 * destination physical page when it's its turn to be filled.
387 dst_object
= mo_control
->moc_object
;
388 assert(dst_object
!= VM_OBJECT_NULL
);
390 vm_object_reference(kernel_object
); /* ref. for mapping */
391 kr
= vm_map_find_space(kernel_map
,
397 if (kr
!= KERN_SUCCESS
) {
398 vm_object_deallocate(kernel_object
);
402 map_entry
->object
.vm_object
= kernel_object
;
403 map_entry
->offset
= dst_mapping
- VM_MIN_KERNEL_ADDRESS
;
404 vm_map_unlock(kernel_map
);
405 dst_vaddr
= CAST_DOWN(vm_offset_t
, dst_mapping
);
408 * Fill in the contents of the pages requested by VM.
410 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
411 for (cur_offset
= 0; cur_offset
< length
; cur_offset
+= PAGE_SIZE
) {
415 * Establish an explicit pmap mapping of the destination
417 * We can't do a regular VM mapping because the VM page
420 if (!upl_page_present(upl_pl
, cur_offset
/ PAGE_SIZE
)) {
421 /* this page is not in the UPL: skip it */
424 dst_pnum
= (addr64_t
)
425 upl_phys_page(upl_pl
, cur_offset
/ PAGE_SIZE
);
426 assert (dst_pnum
!= 0);
427 pmap_enter(kernel_pmap
, dst_mapping
, dst_pnum
,
428 VM_PROT_READ
| VM_PROT_WRITE
,
429 dst_object
->wimg_bits
& VM_WIMG_MASK
,
433 * Map the source (encrypted) page in the kernel's
434 * virtual address space.
436 if (src_map_page_by_page
) {
437 vm_object_reference(src_object
); /* ref. for mapping */
438 kr
= vm_map_enter(kernel_map
,
449 if (kr
!= KERN_SUCCESS
) {
450 vm_object_deallocate(src_object
);
454 src_vaddr
= CAST_DOWN(vm_offset_t
, src_mapping
);
456 src_vaddr
= src_mapping
+ cur_offset
;
460 * Decrypt the encrypted contents of the source page
461 * into the destination page.
463 dsmos_page_transform((const void *) src_vaddr
,
467 * Remove the pmap mapping of the destination page
470 pmap_remove(kernel_pmap
,
471 (addr64_t
) dst_mapping
,
472 (addr64_t
) (dst_mapping
+ PAGE_SIZE_64
));
473 if (src_map_page_by_page
) {
475 * Remove the kernel mapping of the source page.
476 * This releases the extra reference we took on
479 kr
= vm_map_remove(kernel_map
,
481 src_mapping
+ PAGE_SIZE_64
,
483 assert(kr
== KERN_SUCCESS
);
488 retval
= KERN_SUCCESS
;
490 if (src_mapping
!= 0) {
491 /* clean up the mapping of the source pages */
492 kr
= vm_map_remove(kernel_map
,
494 src_mapping
+ length
,
496 assert (kr
== KERN_SUCCESS
);
501 /* clean up the UPL */
504 * The pages are currently dirty because we've just been
505 * writing on them, but as far as we're concerned, they're
506 * clean since they contain their "original" contents as
507 * provided by us, the pager.
508 * Tell the UPL to mark them "clean".
510 upl_clear_dirty(upl
, TRUE
);
512 /* abort or commit the UPL */
513 if (retval
!= KERN_SUCCESS
) {
516 upl_commit(upl
, NULL
, 0);
519 /* and deallocate the UPL */
523 if (dst_mapping
!= 0) {
524 /* clean up the mapping of the destination pages */
525 kr
= vm_map_remove(kernel_map
,
527 dst_mapping
+ PAGE_SIZE_64
,
529 assert(kr
== KERN_SUCCESS
);
538 * apple_protect_pager_reference()
540 * Get a reference on this memory object.
541 * For external usage only. Assumes that the initial reference count is not 0,
542 * i.e one should not "revive" a dead pager this way.
545 apple_protect_pager_reference(
546 memory_object_t mem_obj
)
548 apple_protect_pager_t pager
;
550 pager
= apple_protect_pager_lookup(mem_obj
);
552 mutex_lock(&apple_protect_pager_lock
);
553 assert(pager
->ref_count
> 0);
555 mutex_unlock(&apple_protect_pager_lock
);
560 * apple_protect_pager_dequeue:
562 * Removes a pager from the list of pagers.
564 * The caller must hold "apple_protect_pager_lock".
567 apple_protect_pager_dequeue(
568 apple_protect_pager_t pager
)
570 assert(!pager
->is_mapped
);
572 queue_remove(&apple_protect_pager_queue
,
574 apple_protect_pager_t
,
576 pager
->pager_queue
.next
= NULL
;
577 pager
->pager_queue
.prev
= NULL
;
579 apple_protect_pager_count
--;
583 * apple_protect_pager_terminate_internal:
585 * Trigger the asynchronous termination of the memory object associated
587 * When the memory object is terminated, there will be one more call
588 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
589 * to finish the clean up.
591 * "apple_protect_pager_lock" should not be held by the caller.
592 * We don't need the lock because the pager has already been removed from
593 * the pagers' list and is now ours exclusively.
596 apple_protect_pager_terminate_internal(
597 apple_protect_pager_t pager
)
599 assert(pager
->is_ready
);
600 assert(!pager
->is_mapped
);
602 if (pager
->backing_object
!= VM_OBJECT_NULL
) {
603 vm_object_deallocate(pager
->backing_object
);
604 pager
->backing_object
= VM_OBJECT_NULL
;
607 /* trigger the destruction of the memory object */
608 memory_object_destroy(pager
->pager_control
, 0);
612 * apple_protect_pager_deallocate_internal()
614 * Release a reference on this pager and free it when the last
615 * reference goes away.
616 * Can be called with apple_protect_pager_lock held or not but always returns
620 apple_protect_pager_deallocate_internal(
621 apple_protect_pager_t pager
,
624 boolean_t needs_trimming
;
628 mutex_lock(&apple_protect_pager_lock
);
631 count_unmapped
= (apple_protect_pager_count
-
632 apple_protect_pager_count_mapped
);
633 if (count_unmapped
> apple_protect_pager_cache_limit
) {
634 /* we have too many unmapped pagers: trim some */
635 needs_trimming
= TRUE
;
637 needs_trimming
= FALSE
;
640 /* drop a reference on this pager */
643 if (pager
->ref_count
== 1) {
645 * Only the "named" reference is left, which means that
646 * no one is realy holding on to this pager anymore.
649 apple_protect_pager_dequeue(pager
);
650 /* the pager is all ours: no need for the lock now */
651 mutex_unlock(&apple_protect_pager_lock
);
652 apple_protect_pager_terminate_internal(pager
);
653 } else if (pager
->ref_count
== 0) {
655 * Dropped the existence reference; the memory object has
656 * been terminated. Do some final cleanup and release the
659 mutex_unlock(&apple_protect_pager_lock
);
660 if (pager
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
661 memory_object_control_deallocate(pager
->pager_control
);
662 pager
->pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
664 kfree(pager
, sizeof (*pager
));
665 pager
= APPLE_PROTECT_PAGER_NULL
;
667 /* there are still plenty of references: keep going... */
668 mutex_unlock(&apple_protect_pager_lock
);
671 if (needs_trimming
) {
672 apple_protect_pager_trim();
674 /* caution: lock is not held on return... */
678 * apple_protect_pager_deallocate()
680 * Release a reference on this pager and free it when the last
681 * reference goes away.
684 apple_protect_pager_deallocate(
685 memory_object_t mem_obj
)
687 apple_protect_pager_t pager
;
689 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_deallocate: %x\n", mem_obj
));
690 pager
= apple_protect_pager_lookup(mem_obj
);
691 apple_protect_pager_deallocate_internal(pager
, FALSE
);
698 apple_protect_pager_terminate(
702 memory_object_t mem_obj
)
704 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_terminate: %x\n", mem_obj
));
713 apple_protect_pager_synchronize(
714 memory_object_t mem_obj
,
715 memory_object_offset_t offset
,
717 __unused vm_sync_t sync_flags
)
719 apple_protect_pager_t pager
;
721 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_synchronize: %x\n", mem_obj
));
723 pager
= apple_protect_pager_lookup(mem_obj
);
725 memory_object_synchronize_completed(pager
->pager_control
,
732 * apple_protect_pager_map()
734 * This allows VM to let us, the EMM, know that this memory object
735 * is currently mapped one or more times. This is called by VM only the first
736 * time the memory object gets mapped and we take one extra reference on the
737 * memory object to account for all its mappings.
740 apple_protect_pager_map(
741 memory_object_t mem_obj
)
743 apple_protect_pager_t pager
;
745 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_map: %x\n", mem_obj
));
747 pager
= apple_protect_pager_lookup(mem_obj
);
749 mutex_lock(&apple_protect_pager_lock
);
750 assert(pager
->is_ready
);
751 assert(pager
->ref_count
> 0); /* pager is alive */
752 if (pager
->is_mapped
== FALSE
) {
754 * First mapping of this pager: take an extra reference
755 * that will remain until all the mappings of this pager
758 pager
->is_mapped
= TRUE
;
760 apple_protect_pager_count_mapped
++;
762 mutex_unlock(&apple_protect_pager_lock
);
766 * apple_protect_pager_unmap()
768 * This is called by VM when this memory object is no longer mapped anywhere.
771 apple_protect_pager_unmap(
772 memory_object_t mem_obj
)
774 apple_protect_pager_t pager
;
777 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_unmap: %x\n", mem_obj
));
779 pager
= apple_protect_pager_lookup(mem_obj
);
781 mutex_lock(&apple_protect_pager_lock
);
782 if (pager
->is_mapped
) {
784 * All the mappings are gone, so let go of the one extra
785 * reference that represents all the mappings of this pager.
787 apple_protect_pager_count_mapped
--;
788 count_unmapped
= (apple_protect_pager_count
-
789 apple_protect_pager_count_mapped
);
790 if (count_unmapped
> apple_protect_pager_count_unmapped_max
) {
791 apple_protect_pager_count_unmapped_max
= count_unmapped
;
793 pager
->is_mapped
= FALSE
;
794 apple_protect_pager_deallocate_internal(pager
, TRUE
);
795 /* caution: deallocate_internal() released the lock ! */
797 mutex_unlock(&apple_protect_pager_lock
);
807 apple_protect_pager_t
808 apple_protect_pager_lookup(
809 memory_object_t mem_obj
)
811 apple_protect_pager_t pager
;
813 pager
= (apple_protect_pager_t
) mem_obj
;
814 assert(pager
->pager_ops
== &apple_protect_pager_ops
);
815 assert(pager
->ref_count
> 0);
819 apple_protect_pager_t
820 apple_protect_pager_create(
821 vm_object_t backing_object
)
823 apple_protect_pager_t pager
, pager2
;
824 memory_object_control_t control
;
827 pager
= (apple_protect_pager_t
) kalloc(sizeof (*pager
));
828 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
829 return APPLE_PROTECT_PAGER_NULL
;
833 * The vm_map call takes both named entry ports and raw memory
834 * objects in the same parameter. We need to make sure that
835 * vm_map does not see this object as a named entry port. So,
836 * we reserve the second word in the object for a fake ip_kotype
837 * setting - that will tell vm_map to use it as a memory object.
839 pager
->pager_ops
= &apple_protect_pager_ops
;
840 pager
->pager_ikot
= IKOT_MEMORY_OBJECT
;
841 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
842 pager
->ref_count
= 2; /* existence + setup reference */
843 pager
->is_mapped
= FALSE
;
844 pager
->pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
845 pager
->backing_object
= backing_object
;
846 vm_object_reference(backing_object
);
848 mutex_lock(&apple_protect_pager_lock
);
849 /* see if anyone raced us to create a pager for the same object */
850 queue_iterate(&apple_protect_pager_queue
,
852 apple_protect_pager_t
,
854 if (pager2
->backing_object
== backing_object
) {
858 if (! queue_end(&apple_protect_pager_queue
,
859 (queue_entry_t
) pager2
)) {
860 /* while we hold the lock, transfer our setup ref to winner */
862 /* we lost the race, down with the loser... */
863 mutex_unlock(&apple_protect_pager_lock
);
864 vm_object_deallocate(pager
->backing_object
);
865 pager
->backing_object
= VM_OBJECT_NULL
;
866 kfree(pager
, sizeof (*pager
));
867 /* ... and go with the winner */
869 /* let the winner make sure the pager gets ready */
873 /* enter new pager at the head of our list of pagers */
874 queue_enter_first(&apple_protect_pager_queue
,
876 apple_protect_pager_t
,
878 apple_protect_pager_count
++;
879 if (apple_protect_pager_count
> apple_protect_pager_count_max
) {
880 apple_protect_pager_count_max
= apple_protect_pager_count
;
882 mutex_unlock(&apple_protect_pager_lock
);
884 kr
= memory_object_create_named((memory_object_t
) pager
,
887 assert(kr
== KERN_SUCCESS
);
889 mutex_lock(&apple_protect_pager_lock
);
890 /* the new pager is now ready to be used */
891 pager
->is_ready
= TRUE
;
892 mutex_unlock(&apple_protect_pager_lock
);
894 /* wakeup anyone waiting for this pager to be ready */
895 thread_wakeup(&pager
->is_ready
);
901 * apple_protect_pager_setup()
903 * Provide the caller with a memory object backed by the provided
904 * "backing_object" VM object. If such a memory object already exists,
905 * re-use it, otherwise create a new memory object.
908 apple_protect_pager_setup(
909 vm_object_t backing_object
)
911 apple_protect_pager_t pager
;
913 mutex_lock(&apple_protect_pager_lock
);
915 queue_iterate(&apple_protect_pager_queue
,
917 apple_protect_pager_t
,
919 if (pager
->backing_object
== backing_object
) {
923 if (queue_end(&apple_protect_pager_queue
,
924 (queue_entry_t
) pager
)) {
925 /* no existing pager for this backing object */
926 pager
= APPLE_PROTECT_PAGER_NULL
;
928 /* make sure pager doesn't disappear */
932 mutex_unlock(&apple_protect_pager_lock
);
934 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
935 pager
= apple_protect_pager_create(backing_object
);
936 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
937 return MEMORY_OBJECT_NULL
;
941 mutex_lock(&apple_protect_pager_lock
);
942 while (!pager
->is_ready
) {
943 thread_sleep_mutex(&pager
->is_ready
,
944 &apple_protect_pager_lock
,
947 mutex_unlock(&apple_protect_pager_lock
);
949 return (memory_object_t
) pager
;
953 apple_protect_pager_trim(void)
955 apple_protect_pager_t pager
, prev_pager
;
956 queue_head_t trim_queue
;
960 mutex_lock(&apple_protect_pager_lock
);
963 * We have too many pagers, try and trim some unused ones,
964 * starting with the oldest pager at the end of the queue.
966 queue_init(&trim_queue
);
969 for (pager
= (apple_protect_pager_t
)
970 queue_last(&apple_protect_pager_queue
);
971 !queue_end(&apple_protect_pager_queue
,
972 (queue_entry_t
) pager
);
973 pager
= prev_pager
) {
974 /* get prev elt before we dequeue */
975 prev_pager
= (apple_protect_pager_t
)
976 queue_prev(&pager
->pager_queue
);
978 if (pager
->ref_count
== 2 &&
981 /* this pager can be trimmed */
983 /* remove this pager from the main list ... */
984 apple_protect_pager_dequeue(pager
);
985 /* ... and add it to our trim queue */
986 queue_enter_first(&trim_queue
,
988 apple_protect_pager_t
,
991 count_unmapped
= (apple_protect_pager_count
-
992 apple_protect_pager_count_mapped
);
993 if (count_unmapped
<= apple_protect_pager_cache_limit
) {
994 /* we have enough pagers to trim */
999 if (num_trim
> apple_protect_pager_num_trim_max
) {
1000 apple_protect_pager_num_trim_max
= num_trim
;
1002 apple_protect_pager_num_trim_total
+= num_trim
;
1004 mutex_unlock(&apple_protect_pager_lock
);
1006 /* terminate the trimmed pagers */
1007 while (!queue_empty(&trim_queue
)) {
1008 queue_remove_first(&trim_queue
,
1010 apple_protect_pager_t
,
1012 pager
->pager_queue
.next
= NULL
;
1013 pager
->pager_queue
.prev
= NULL
;
1014 assert(pager
->ref_count
== 2);
1016 * We can't call deallocate_internal() because the pager
1017 * has already been dequeued, but we still need to remove
1021 apple_protect_pager_terminate_internal(pager
);