2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/page_decrypt.h>
46 #include <kern/queue.h>
47 #include <kern/thread.h>
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
52 #include <default_pager/default_pager_types.h>
53 #include <default_pager/default_pager_object_server.h>
55 #include <vm/vm_fault.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/memory_object.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_protos.h>
64 * APPLE PROTECT MEMORY PAGER
66 * This external memory manager (EMM) handles memory from the encrypted
67 * sections of some executables protected by the DSMOS kernel extension.
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the encrypted data from its backing VM object, itself backed by
71 * the encrypted file, decrypting it and providing it to VM.
73 * The decrypted pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
77 * We don't expect to have to handle a large number of apple-protected
78 * binaries, so the data structures are very simple (simple linked list)
82 /* forward declarations */
83 void apple_protect_pager_reference(memory_object_t mem_obj
);
84 void apple_protect_pager_deallocate(memory_object_t mem_obj
);
85 kern_return_t
apple_protect_pager_init(memory_object_t mem_obj
,
86 memory_object_control_t control
,
88 kern_return_t
apple_protect_pager_terminate(memory_object_t mem_obj
);
89 kern_return_t
apple_protect_pager_data_request(memory_object_t mem_obj
,
90 memory_object_offset_t offset
,
92 vm_prot_t protection_required
,
93 memory_object_fault_info_t fault_info
);
94 kern_return_t
apple_protect_pager_data_return(memory_object_t mem_obj
,
95 memory_object_offset_t offset
,
97 memory_object_offset_t
*resid_offset
,
100 boolean_t kernel_copy
,
102 kern_return_t
apple_protect_pager_data_initialize(memory_object_t mem_obj
,
103 memory_object_offset_t offset
,
105 kern_return_t
apple_protect_pager_data_unlock(memory_object_t mem_obj
,
106 memory_object_offset_t offset
,
108 vm_prot_t desired_access
);
109 kern_return_t
apple_protect_pager_synchronize(memory_object_t mem_obj
,
110 memory_object_offset_t offset
,
112 vm_sync_t sync_flags
);
113 kern_return_t
apple_protect_pager_unmap(memory_object_t mem_obj
);
116 * Vector of VM operations for this EMM.
117 * These routines are invoked by VM via the memory_object_*() interfaces.
119 const struct memory_object_pager_ops apple_protect_pager_ops
= {
120 apple_protect_pager_reference
,
121 apple_protect_pager_deallocate
,
122 apple_protect_pager_init
,
123 apple_protect_pager_terminate
,
124 apple_protect_pager_data_request
,
125 apple_protect_pager_data_return
,
126 apple_protect_pager_data_initialize
,
127 apple_protect_pager_data_unlock
,
128 apple_protect_pager_synchronize
,
129 apple_protect_pager_unmap
,
130 "apple protect pager"
134 * The "apple_protect_pager" describes a memory object backed by
135 * the "apple protect" EMM.
137 typedef struct apple_protect_pager
{
138 memory_object_pager_ops_t pager_ops
; /* == &apple_protect_pager_ops */
139 unsigned int pager_ikot
; /* JMM: fake ip_kotype() */
140 queue_chain_t pager_queue
; /* next & prev pagers */
141 unsigned int ref_count
; /* reference count */
142 boolean_t is_ready
; /* is this pager ready ? */
143 boolean_t is_mapped
; /* is this mem_obj mapped ? */
144 memory_object_control_t pager_control
; /* mem object control handle */
145 vm_object_t backing_object
; /* VM obj w/ encrypted data */
146 } *apple_protect_pager_t
;
147 #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL)
150 * List of memory objects managed by this EMM.
151 * The list is protected by the "apple_protect_pager_lock" lock.
153 int apple_protect_pager_count
= 0; /* number of pagers */
154 int apple_protect_pager_count_mapped
= 0; /* number of unmapped pagers */
155 queue_head_t apple_protect_pager_queue
;
156 decl_mutex_data(,apple_protect_pager_lock
)
159 * Maximum number of unmapped pagers we're willing to keep around.
161 int apple_protect_pager_cache_limit
= 10;
164 * Statistics & counters.
166 int apple_protect_pager_count_max
= 0;
167 int apple_protect_pager_count_unmapped_max
= 0;
168 int apple_protect_pager_num_trim_max
= 0;
169 int apple_protect_pager_num_trim_total
= 0;
171 /* internal prototypes */
172 apple_protect_pager_t
apple_protect_pager_create(vm_object_t backing_object
);
173 apple_protect_pager_t
apple_protect_pager_lookup(memory_object_t mem_obj
);
174 void apple_protect_pager_dequeue(apple_protect_pager_t pager
);
175 void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager
,
177 void apple_protect_pager_terminate_internal(apple_protect_pager_t pager
);
178 void apple_protect_pager_trim(void);
182 int apple_protect_pagerdebug
= 0;
183 #define PAGER_ALL 0xffffffff
184 #define PAGER_INIT 0x00000001
185 #define PAGER_PAGEIN 0x00000002
187 #define PAGER_DEBUG(LEVEL, A) \
189 if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \
194 #define PAGER_DEBUG(LEVEL, A)
199 apple_protect_pager_bootstrap(void)
201 mutex_init(&apple_protect_pager_lock
, 0);
202 queue_init(&apple_protect_pager_queue
);
206 * apple_protect_pager_init()
208 * Initialize the memory object and makes it ready to be used and mapped.
211 apple_protect_pager_init(
212 memory_object_t mem_obj
,
213 memory_object_control_t control
,
219 apple_protect_pager_t pager
;
221 memory_object_attr_info_data_t attributes
;
223 PAGER_DEBUG(PAGER_ALL
,
224 ("apple_protect_pager_init: %p, %p, %x\n",
225 mem_obj
, control
, pg_size
));
227 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
228 return KERN_INVALID_ARGUMENT
;
230 pager
= apple_protect_pager_lookup(mem_obj
);
232 memory_object_control_reference(control
);
234 pager
->pager_control
= control
;
236 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
237 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
238 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
239 attributes
.may_cache_object
= FALSE
;
240 attributes
.temporary
= TRUE
;
242 kr
= memory_object_change_attributes(
244 MEMORY_OBJECT_ATTRIBUTE_INFO
,
245 (memory_object_info_t
) &attributes
,
246 MEMORY_OBJECT_ATTR_INFO_COUNT
);
247 if (kr
!= KERN_SUCCESS
)
248 panic("apple_protect_pager_init: "
249 "memory_object_change_attributes() failed");
255 * apple_protect_data_return()
257 * Handles page-out requests from VM. This should never happen since
258 * the pages provided by this EMM are not supposed to be dirty or dirtied
259 * and VM should simply discard the contents and reclaim the pages if it
263 apple_protect_pager_data_return(
264 __unused memory_object_t mem_obj
,
265 __unused memory_object_offset_t offset
,
266 __unused vm_size_t data_cnt
,
267 __unused memory_object_offset_t
*resid_offset
,
268 __unused
int *io_error
,
269 __unused boolean_t dirty
,
270 __unused boolean_t kernel_copy
,
271 __unused
int upl_flags
)
273 panic("apple_protect_pager_data_return: should never get called");
278 apple_protect_pager_data_initialize(
279 __unused memory_object_t mem_obj
,
280 __unused memory_object_offset_t offset
,
281 __unused vm_size_t data_cnt
)
283 panic("apple_protect_pager_data_initialize: should never get called");
288 apple_protect_pager_data_unlock(
289 __unused memory_object_t mem_obj
,
290 __unused memory_object_offset_t offset
,
291 __unused vm_size_t size
,
292 __unused vm_prot_t desired_access
)
298 * apple_protect_pager_data_request()
300 * Handles page-in requests from VM.
303 apple_protect_pager_data_request(
304 memory_object_t mem_obj
,
305 memory_object_offset_t offset
,
310 vm_prot_t protection_required
,
311 memory_object_fault_info_t mo_fault_info
)
313 apple_protect_pager_t pager
;
314 memory_object_control_t mo_control
;
318 upl_page_info_t
*upl_pl
;
319 vm_object_t src_object
, dst_object
;
320 kern_return_t kr
, retval
;
321 vm_map_offset_t kernel_mapping
;
322 vm_offset_t src_vaddr
, dst_vaddr
;
323 vm_offset_t cur_offset
;
324 vm_map_entry_t map_entry
;
325 kern_return_t error_code
;
327 vm_page_t src_page
, top_page
;
329 vm_object_fault_info_t fault_info
;
331 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj
, offset
, length
, protection_required
));
333 src_object
= VM_OBJECT_NULL
;
336 fault_info
= (vm_object_fault_info_t
) mo_fault_info
;
337 interruptible
= fault_info
->interruptible
;
339 pager
= apple_protect_pager_lookup(mem_obj
);
340 assert(pager
->is_ready
);
341 assert(pager
->ref_count
> 1); /* pager is alive and mapped */
343 PAGER_DEBUG(PAGER_PAGEIN
, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj
, offset
, length
, protection_required
, pager
));
346 * Gather in a UPL all the VM pages requested by VM.
348 mo_control
= pager
->pager_control
;
352 UPL_RET_ONLY_ABSENT
|
355 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
357 kr
= memory_object_upl_request(mo_control
,
359 &upl
, NULL
, NULL
, upl_flags
);
360 if (kr
!= KERN_SUCCESS
) {
364 dst_object
= mo_control
->moc_object
;
365 assert(dst_object
!= VM_OBJECT_NULL
);
369 * Reserve 2 virtual pages in the kernel address space to map each
370 * source and destination physical pages when it's their turn to
373 vm_object_reference(kernel_object
); /* ref. for mapping */
374 kr
= vm_map_find_space(kernel_map
,
380 if (kr
!= KERN_SUCCESS
) {
381 vm_object_deallocate(kernel_object
);
385 map_entry
->object
.vm_object
= kernel_object
;
386 map_entry
->offset
= kernel_mapping
- VM_MIN_KERNEL_ADDRESS
;
387 vm_map_unlock(kernel_map
);
388 src_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping
);
389 dst_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping
+ PAGE_SIZE_64
);
392 * We'll map the encrypted data in the kernel address space from the
393 * backing VM object (itself backed by the encrypted file via
396 src_object
= pager
->backing_object
;
397 assert(src_object
!= VM_OBJECT_NULL
);
398 vm_object_reference(src_object
); /* to keep the source object alive */
401 * Fill in the contents of the pages requested by VM.
403 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
404 for (cur_offset
= 0; cur_offset
< length
; cur_offset
+= PAGE_SIZE
) {
407 if (!upl_page_present(upl_pl
, cur_offset
/ PAGE_SIZE
)) {
408 /* this page is not in the UPL: skip it */
413 * Map the source (encrypted) page in the kernel's
414 * virtual address space.
415 * We already hold a reference on the src_object.
418 vm_object_lock(src_object
);
419 vm_object_paging_begin(src_object
);
422 kr
= vm_fault_page(src_object
,
435 case VM_FAULT_SUCCESS
:
438 goto retry_src_fault
;
439 case VM_FAULT_MEMORY_SHORTAGE
:
440 if (vm_page_wait(interruptible
)) {
441 goto retry_src_fault
;
444 case VM_FAULT_INTERRUPTED
:
445 retval
= MACH_SEND_INTERRUPTED
;
447 case VM_FAULT_MEMORY_ERROR
:
448 /* the page is not there ! */
452 retval
= KERN_MEMORY_ERROR
;
456 retval
= KERN_FAILURE
;
459 assert(src_page
!= VM_PAGE_NULL
);
460 assert(src_page
->busy
);
463 * Establish an explicit mapping of the source
466 pmap_enter(kernel_pmap
,
470 src_object
->wimg_bits
& VM_WIMG_MASK
,
473 * Establish an explicit pmap mapping of the destination
475 * We can't do a regular VM mapping because the VM page
478 dst_pnum
= (addr64_t
)
479 upl_phys_page(upl_pl
, cur_offset
/ PAGE_SIZE
);
480 assert(dst_pnum
!= 0);
481 pmap_enter(kernel_pmap
,
482 kernel_mapping
+ PAGE_SIZE_64
,
484 VM_PROT_READ
| VM_PROT_WRITE
,
485 dst_object
->wimg_bits
& VM_WIMG_MASK
,
489 * Decrypt the encrypted contents of the source page
490 * into the destination page.
492 dsmos_page_transform((const void *) src_vaddr
,
496 * Remove the pmap mapping of the source and destination pages
499 pmap_remove(kernel_pmap
,
500 (addr64_t
) kernel_mapping
,
501 (addr64_t
) (kernel_mapping
+ (2 * PAGE_SIZE_64
)));
504 * Cleanup the result of vm_fault_page() of the source page.
506 PAGE_WAKEUP_DONE(src_page
);
507 vm_object_paging_end(src_page
->object
);
508 vm_object_unlock(src_page
->object
);
509 if (top_page
!= VM_PAGE_NULL
) {
510 vm_object_t top_object
;
512 top_object
= top_page
->object
;
513 vm_object_lock(top_object
);
514 VM_PAGE_FREE(top_page
);
515 vm_object_paging_end(top_object
);
516 vm_object_unlock(top_object
);
520 retval
= KERN_SUCCESS
;
523 /* clean up the UPL */
526 * The pages are currently dirty because we've just been
527 * writing on them, but as far as we're concerned, they're
528 * clean since they contain their "original" contents as
529 * provided by us, the pager.
530 * Tell the UPL to mark them "clean".
532 upl_clear_dirty(upl
, TRUE
);
534 /* abort or commit the UPL */
535 if (retval
!= KERN_SUCCESS
) {
538 upl_commit(upl
, NULL
, 0);
541 /* and deallocate the UPL */
545 if (kernel_mapping
!= 0) {
546 /* clean up the mapping of the source and destination pages */
547 kr
= vm_map_remove(kernel_map
,
549 kernel_mapping
+ (2 * PAGE_SIZE_64
),
551 assert(kr
== KERN_SUCCESS
);
556 if (src_object
!= VM_OBJECT_NULL
) {
557 vm_object_deallocate(src_object
);
564 * apple_protect_pager_reference()
566 * Get a reference on this memory object.
567 * For external usage only. Assumes that the initial reference count is not 0,
568 * i.e one should not "revive" a dead pager this way.
571 apple_protect_pager_reference(
572 memory_object_t mem_obj
)
574 apple_protect_pager_t pager
;
576 pager
= apple_protect_pager_lookup(mem_obj
);
578 mutex_lock(&apple_protect_pager_lock
);
579 assert(pager
->ref_count
> 0);
581 mutex_unlock(&apple_protect_pager_lock
);
586 * apple_protect_pager_dequeue:
588 * Removes a pager from the list of pagers.
590 * The caller must hold "apple_protect_pager_lock".
593 apple_protect_pager_dequeue(
594 apple_protect_pager_t pager
)
596 assert(!pager
->is_mapped
);
598 queue_remove(&apple_protect_pager_queue
,
600 apple_protect_pager_t
,
602 pager
->pager_queue
.next
= NULL
;
603 pager
->pager_queue
.prev
= NULL
;
605 apple_protect_pager_count
--;
609 * apple_protect_pager_terminate_internal:
611 * Trigger the asynchronous termination of the memory object associated
613 * When the memory object is terminated, there will be one more call
614 * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate())
615 * to finish the clean up.
617 * "apple_protect_pager_lock" should not be held by the caller.
618 * We don't need the lock because the pager has already been removed from
619 * the pagers' list and is now ours exclusively.
622 apple_protect_pager_terminate_internal(
623 apple_protect_pager_t pager
)
625 assert(pager
->is_ready
);
626 assert(!pager
->is_mapped
);
628 if (pager
->backing_object
!= VM_OBJECT_NULL
) {
629 vm_object_deallocate(pager
->backing_object
);
630 pager
->backing_object
= VM_OBJECT_NULL
;
633 /* trigger the destruction of the memory object */
634 memory_object_destroy(pager
->pager_control
, 0);
638 * apple_protect_pager_deallocate_internal()
640 * Release a reference on this pager and free it when the last
641 * reference goes away.
642 * Can be called with apple_protect_pager_lock held or not but always returns
646 apple_protect_pager_deallocate_internal(
647 apple_protect_pager_t pager
,
650 boolean_t needs_trimming
;
654 mutex_lock(&apple_protect_pager_lock
);
657 count_unmapped
= (apple_protect_pager_count
-
658 apple_protect_pager_count_mapped
);
659 if (count_unmapped
> apple_protect_pager_cache_limit
) {
660 /* we have too many unmapped pagers: trim some */
661 needs_trimming
= TRUE
;
663 needs_trimming
= FALSE
;
666 /* drop a reference on this pager */
669 if (pager
->ref_count
== 1) {
671 * Only the "named" reference is left, which means that
672 * no one is really holding on to this pager anymore.
675 apple_protect_pager_dequeue(pager
);
676 /* the pager is all ours: no need for the lock now */
677 mutex_unlock(&apple_protect_pager_lock
);
678 apple_protect_pager_terminate_internal(pager
);
679 } else if (pager
->ref_count
== 0) {
681 * Dropped the existence reference; the memory object has
682 * been terminated. Do some final cleanup and release the
685 mutex_unlock(&apple_protect_pager_lock
);
686 if (pager
->pager_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
687 memory_object_control_deallocate(pager
->pager_control
);
688 pager
->pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
690 kfree(pager
, sizeof (*pager
));
691 pager
= APPLE_PROTECT_PAGER_NULL
;
693 /* there are still plenty of references: keep going... */
694 mutex_unlock(&apple_protect_pager_lock
);
697 if (needs_trimming
) {
698 apple_protect_pager_trim();
700 /* caution: lock is not held on return... */
704 * apple_protect_pager_deallocate()
706 * Release a reference on this pager and free it when the last
707 * reference goes away.
710 apple_protect_pager_deallocate(
711 memory_object_t mem_obj
)
713 apple_protect_pager_t pager
;
715 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_deallocate: %p\n", mem_obj
));
716 pager
= apple_protect_pager_lookup(mem_obj
);
717 apple_protect_pager_deallocate_internal(pager
, FALSE
);
724 apple_protect_pager_terminate(
728 memory_object_t mem_obj
)
730 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_terminate: %p\n", mem_obj
));
739 apple_protect_pager_synchronize(
740 memory_object_t mem_obj
,
741 memory_object_offset_t offset
,
743 __unused vm_sync_t sync_flags
)
745 apple_protect_pager_t pager
;
747 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_synchronize: %p\n", mem_obj
));
749 pager
= apple_protect_pager_lookup(mem_obj
);
751 memory_object_synchronize_completed(pager
->pager_control
,
758 * apple_protect_pager_map()
760 * This allows VM to let us, the EMM, know that this memory object
761 * is currently mapped one or more times. This is called by VM only the first
762 * time the memory object gets mapped and we take one extra reference on the
763 * memory object to account for all its mappings.
766 apple_protect_pager_map(
767 memory_object_t mem_obj
)
769 apple_protect_pager_t pager
;
771 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_map: %p\n", mem_obj
));
773 pager
= apple_protect_pager_lookup(mem_obj
);
775 mutex_lock(&apple_protect_pager_lock
);
776 assert(pager
->is_ready
);
777 assert(pager
->ref_count
> 0); /* pager is alive */
778 if (pager
->is_mapped
== FALSE
) {
780 * First mapping of this pager: take an extra reference
781 * that will remain until all the mappings of this pager
784 pager
->is_mapped
= TRUE
;
786 apple_protect_pager_count_mapped
++;
788 mutex_unlock(&apple_protect_pager_lock
);
792 * apple_protect_pager_unmap()
794 * This is called by VM when this memory object is no longer mapped anywhere.
797 apple_protect_pager_unmap(
798 memory_object_t mem_obj
)
800 apple_protect_pager_t pager
;
803 PAGER_DEBUG(PAGER_ALL
, ("apple_protect_pager_unmap: %p\n", mem_obj
));
805 pager
= apple_protect_pager_lookup(mem_obj
);
807 mutex_lock(&apple_protect_pager_lock
);
808 if (pager
->is_mapped
) {
810 * All the mappings are gone, so let go of the one extra
811 * reference that represents all the mappings of this pager.
813 apple_protect_pager_count_mapped
--;
814 count_unmapped
= (apple_protect_pager_count
-
815 apple_protect_pager_count_mapped
);
816 if (count_unmapped
> apple_protect_pager_count_unmapped_max
) {
817 apple_protect_pager_count_unmapped_max
= count_unmapped
;
819 pager
->is_mapped
= FALSE
;
820 apple_protect_pager_deallocate_internal(pager
, TRUE
);
821 /* caution: deallocate_internal() released the lock ! */
823 mutex_unlock(&apple_protect_pager_lock
);
833 apple_protect_pager_t
834 apple_protect_pager_lookup(
835 memory_object_t mem_obj
)
837 apple_protect_pager_t pager
;
839 pager
= (apple_protect_pager_t
) mem_obj
;
840 assert(pager
->pager_ops
== &apple_protect_pager_ops
);
841 assert(pager
->ref_count
> 0);
845 apple_protect_pager_t
846 apple_protect_pager_create(
847 vm_object_t backing_object
)
849 apple_protect_pager_t pager
, pager2
;
850 memory_object_control_t control
;
853 pager
= (apple_protect_pager_t
) kalloc(sizeof (*pager
));
854 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
855 return APPLE_PROTECT_PAGER_NULL
;
859 * The vm_map call takes both named entry ports and raw memory
860 * objects in the same parameter. We need to make sure that
861 * vm_map does not see this object as a named entry port. So,
862 * we reserve the second word in the object for a fake ip_kotype
863 * setting - that will tell vm_map to use it as a memory object.
865 pager
->pager_ops
= &apple_protect_pager_ops
;
866 pager
->pager_ikot
= IKOT_MEMORY_OBJECT
;
867 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
868 pager
->ref_count
= 2; /* existence + setup reference */
869 pager
->is_mapped
= FALSE
;
870 pager
->pager_control
= MEMORY_OBJECT_CONTROL_NULL
;
871 pager
->backing_object
= backing_object
;
872 vm_object_reference(backing_object
);
874 mutex_lock(&apple_protect_pager_lock
);
875 /* see if anyone raced us to create a pager for the same object */
876 queue_iterate(&apple_protect_pager_queue
,
878 apple_protect_pager_t
,
880 if (pager2
->backing_object
== backing_object
) {
884 if (! queue_end(&apple_protect_pager_queue
,
885 (queue_entry_t
) pager2
)) {
886 /* while we hold the lock, transfer our setup ref to winner */
888 /* we lost the race, down with the loser... */
889 mutex_unlock(&apple_protect_pager_lock
);
890 vm_object_deallocate(pager
->backing_object
);
891 pager
->backing_object
= VM_OBJECT_NULL
;
892 kfree(pager
, sizeof (*pager
));
893 /* ... and go with the winner */
895 /* let the winner make sure the pager gets ready */
899 /* enter new pager at the head of our list of pagers */
900 queue_enter_first(&apple_protect_pager_queue
,
902 apple_protect_pager_t
,
904 apple_protect_pager_count
++;
905 if (apple_protect_pager_count
> apple_protect_pager_count_max
) {
906 apple_protect_pager_count_max
= apple_protect_pager_count
;
908 mutex_unlock(&apple_protect_pager_lock
);
910 kr
= memory_object_create_named((memory_object_t
) pager
,
913 assert(kr
== KERN_SUCCESS
);
915 mutex_lock(&apple_protect_pager_lock
);
916 /* the new pager is now ready to be used */
917 pager
->is_ready
= TRUE
;
918 mutex_unlock(&apple_protect_pager_lock
);
920 /* wakeup anyone waiting for this pager to be ready */
921 thread_wakeup(&pager
->is_ready
);
927 * apple_protect_pager_setup()
929 * Provide the caller with a memory object backed by the provided
930 * "backing_object" VM object. If such a memory object already exists,
931 * re-use it, otherwise create a new memory object.
934 apple_protect_pager_setup(
935 vm_object_t backing_object
)
937 apple_protect_pager_t pager
;
939 mutex_lock(&apple_protect_pager_lock
);
941 queue_iterate(&apple_protect_pager_queue
,
943 apple_protect_pager_t
,
945 if (pager
->backing_object
== backing_object
) {
949 if (queue_end(&apple_protect_pager_queue
,
950 (queue_entry_t
) pager
)) {
951 /* no existing pager for this backing object */
952 pager
= APPLE_PROTECT_PAGER_NULL
;
954 /* make sure pager doesn't disappear */
958 mutex_unlock(&apple_protect_pager_lock
);
960 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
961 pager
= apple_protect_pager_create(backing_object
);
962 if (pager
== APPLE_PROTECT_PAGER_NULL
) {
963 return MEMORY_OBJECT_NULL
;
967 mutex_lock(&apple_protect_pager_lock
);
968 while (!pager
->is_ready
) {
969 thread_sleep_mutex(&pager
->is_ready
,
970 &apple_protect_pager_lock
,
973 mutex_unlock(&apple_protect_pager_lock
);
975 return (memory_object_t
) pager
;
979 apple_protect_pager_trim(void)
981 apple_protect_pager_t pager
, prev_pager
;
982 queue_head_t trim_queue
;
986 mutex_lock(&apple_protect_pager_lock
);
989 * We have too many pagers, try and trim some unused ones,
990 * starting with the oldest pager at the end of the queue.
992 queue_init(&trim_queue
);
995 for (pager
= (apple_protect_pager_t
)
996 queue_last(&apple_protect_pager_queue
);
997 !queue_end(&apple_protect_pager_queue
,
998 (queue_entry_t
) pager
);
999 pager
= prev_pager
) {
1000 /* get prev elt before we dequeue */
1001 prev_pager
= (apple_protect_pager_t
)
1002 queue_prev(&pager
->pager_queue
);
1004 if (pager
->ref_count
== 2 &&
1006 !pager
->is_mapped
) {
1007 /* this pager can be trimmed */
1009 /* remove this pager from the main list ... */
1010 apple_protect_pager_dequeue(pager
);
1011 /* ... and add it to our trim queue */
1012 queue_enter_first(&trim_queue
,
1014 apple_protect_pager_t
,
1017 count_unmapped
= (apple_protect_pager_count
-
1018 apple_protect_pager_count_mapped
);
1019 if (count_unmapped
<= apple_protect_pager_cache_limit
) {
1020 /* we have enough pagers to trim */
1025 if (num_trim
> apple_protect_pager_num_trim_max
) {
1026 apple_protect_pager_num_trim_max
= num_trim
;
1028 apple_protect_pager_num_trim_total
+= num_trim
;
1030 mutex_unlock(&apple_protect_pager_lock
);
1032 /* terminate the trimmed pagers */
1033 while (!queue_empty(&trim_queue
)) {
1034 queue_remove_first(&trim_queue
,
1036 apple_protect_pager_t
,
1038 pager
->pager_queue
.next
= NULL
;
1039 pager
->pager_queue
.prev
= NULL
;
1040 assert(pager
->ref_count
== 2);
1042 * We can't call deallocate_internal() because the pager
1043 * has already been dequeued, but we still need to remove
1047 apple_protect_pager_terminate_internal(pager
);