2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
52 #include <vm/memory_object.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_shared_region.h>
63 * SHARED REGION MEMORY PAGER
65 * This external memory manager (EMM) handles mappings of a dyld shared cache
66 * in shared regions, applying any necessary modifications (sliding,
67 * pointer signing, ...).
69 * It mostly handles page-in requests (from memory_object_data_request()) by
70 * getting the original data from its backing VM object, itself backed by
71 * the dyld shared cache file, modifying it if needed and providing it to VM.
73 * The modified pages will never be dirtied, so the memory manager doesn't
74 * need to handle page-out requests (from memory_object_data_return()). The
75 * pages need to be mapped copy-on-write, so that the originals stay clean.
77 * We don't expect to have to handle a large number of shared cache files,
78 * so the data structures are very simple (simple linked list) for now.
81 /* forward declarations */
82 void shared_region_pager_reference(memory_object_t mem_obj
);
83 void shared_region_pager_deallocate(memory_object_t mem_obj
);
84 kern_return_t
shared_region_pager_init(memory_object_t mem_obj
,
85 memory_object_control_t control
,
86 memory_object_cluster_size_t pg_size
);
87 kern_return_t
shared_region_pager_terminate(memory_object_t mem_obj
);
88 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj
,
89 memory_object_offset_t offset
,
90 memory_object_cluster_size_t length
,
91 vm_prot_t protection_required
,
92 memory_object_fault_info_t fault_info
);
93 kern_return_t
shared_region_pager_data_return(memory_object_t mem_obj
,
94 memory_object_offset_t offset
,
95 memory_object_cluster_size_t data_cnt
,
96 memory_object_offset_t
*resid_offset
,
99 boolean_t kernel_copy
,
101 kern_return_t
shared_region_pager_data_initialize(memory_object_t mem_obj
,
102 memory_object_offset_t offset
,
103 memory_object_cluster_size_t data_cnt
);
104 kern_return_t
shared_region_pager_data_unlock(memory_object_t mem_obj
,
105 memory_object_offset_t offset
,
106 memory_object_size_t size
,
107 vm_prot_t desired_access
);
108 kern_return_t
shared_region_pager_synchronize(memory_object_t mem_obj
,
109 memory_object_offset_t offset
,
110 memory_object_size_t length
,
111 vm_sync_t sync_flags
);
112 kern_return_t
shared_region_pager_map(memory_object_t mem_obj
,
114 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj
);
117 * Vector of VM operations for this EMM.
118 * These routines are invoked by VM via the memory_object_*() interfaces.
120 const struct memory_object_pager_ops shared_region_pager_ops
= {
121 .memory_object_reference
= shared_region_pager_reference
,
122 .memory_object_deallocate
= shared_region_pager_deallocate
,
123 .memory_object_init
= shared_region_pager_init
,
124 .memory_object_terminate
= shared_region_pager_terminate
,
125 .memory_object_data_request
= shared_region_pager_data_request
,
126 .memory_object_data_return
= shared_region_pager_data_return
,
127 .memory_object_data_initialize
= shared_region_pager_data_initialize
,
128 .memory_object_data_unlock
= shared_region_pager_data_unlock
,
129 .memory_object_synchronize
= shared_region_pager_synchronize
,
130 .memory_object_map
= shared_region_pager_map
,
131 .memory_object_last_unmap
= shared_region_pager_last_unmap
,
132 .memory_object_data_reclaim
= NULL
,
133 .memory_object_pager_name
= "shared_region"
137 * The "shared_region_pager" describes a memory object backed by
138 * the "shared_region" EMM.
140 typedef struct shared_region_pager
{
141 /* mandatory generic header */
142 struct memory_object sc_pgr_hdr
;
144 /* pager-specific data */
145 queue_chain_t pager_queue
; /* next & prev pagers */
146 unsigned int ref_count
; /* reference count */
147 boolean_t is_ready
; /* is this pager ready ? */
148 boolean_t is_mapped
; /* is this mem_obj mapped ? */
149 vm_object_t backing_object
; /* VM obj for shared cache */
150 vm_object_offset_t backing_offset
;
151 struct vm_shared_region_slide_info
*scp_slide_info
;
152 } *shared_region_pager_t
;
153 #define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL)
156 * List of memory objects managed by this EMM.
157 * The list is protected by the "shared_region_pager_lock" lock.
159 int shared_region_pager_count
= 0; /* number of pagers */
160 int shared_region_pager_count_mapped
= 0; /* number of unmapped pagers */
161 queue_head_t shared_region_pager_queue
;
162 decl_lck_mtx_data(, shared_region_pager_lock
);
165 * Maximum number of unmapped pagers we're willing to keep around.
167 int shared_region_pager_cache_limit
= 0;
170 * Statistics & counters.
172 int shared_region_pager_count_max
= 0;
173 int shared_region_pager_count_unmapped_max
= 0;
174 int shared_region_pager_num_trim_max
= 0;
175 int shared_region_pager_num_trim_total
= 0;
178 lck_grp_t shared_region_pager_lck_grp
;
179 lck_grp_attr_t shared_region_pager_lck_grp_attr
;
180 lck_attr_t shared_region_pager_lck_attr
;
182 uint64_t shared_region_pager_copied
= 0;
183 uint64_t shared_region_pager_slid
= 0;
184 uint64_t shared_region_pager_slid_error
= 0;
185 uint64_t shared_region_pager_reclaimed
= 0;
187 /* internal prototypes */
188 shared_region_pager_t
shared_region_pager_create(
189 vm_object_t backing_object
,
190 vm_object_offset_t backing_offset
,
191 struct vm_shared_region_slide_info
*slide_info
);
192 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj
);
193 void shared_region_pager_dequeue(shared_region_pager_t pager
);
194 void shared_region_pager_deallocate_internal(shared_region_pager_t pager
,
196 void shared_region_pager_terminate_internal(shared_region_pager_t pager
);
197 void shared_region_pager_trim(void);
201 int shared_region_pagerdebug
= 0;
202 #define PAGER_ALL 0xffffffff
203 #define PAGER_INIT 0x00000001
204 #define PAGER_PAGEIN 0x00000002
206 #define PAGER_DEBUG(LEVEL, A) \
208 if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \
213 #define PAGER_DEBUG(LEVEL, A)
218 shared_region_pager_bootstrap(void)
220 lck_grp_attr_setdefault(&shared_region_pager_lck_grp_attr
);
221 lck_grp_init(&shared_region_pager_lck_grp
, "shared_region", &shared_region_pager_lck_grp_attr
);
222 lck_attr_setdefault(&shared_region_pager_lck_attr
);
223 lck_mtx_init(&shared_region_pager_lock
, &shared_region_pager_lck_grp
, &shared_region_pager_lck_attr
);
224 queue_init(&shared_region_pager_queue
);
228 * shared_region_pager_init()
230 * Initialize the memory object and makes it ready to be used and mapped.
233 shared_region_pager_init(
234 memory_object_t mem_obj
,
235 memory_object_control_t control
,
239 memory_object_cluster_size_t pg_size
)
241 shared_region_pager_t pager
;
243 memory_object_attr_info_data_t attributes
;
245 PAGER_DEBUG(PAGER_ALL
,
246 ("shared_region_pager_init: %p, %p, %x\n",
247 mem_obj
, control
, pg_size
));
249 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
250 return KERN_INVALID_ARGUMENT
;
253 pager
= shared_region_pager_lookup(mem_obj
);
255 memory_object_control_reference(control
);
257 pager
->sc_pgr_hdr
.mo_control
= control
;
259 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
260 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
261 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
262 attributes
.may_cache_object
= FALSE
;
263 attributes
.temporary
= TRUE
;
265 kr
= memory_object_change_attributes(
267 MEMORY_OBJECT_ATTRIBUTE_INFO
,
268 (memory_object_info_t
) &attributes
,
269 MEMORY_OBJECT_ATTR_INFO_COUNT
);
270 if (kr
!= KERN_SUCCESS
) {
271 panic("shared_region_pager_init: "
272 "memory_object_change_attributes() failed");
275 #if CONFIG_SECLUDED_MEMORY
276 if (secluded_for_filecache
) {
279 * XXX FBDP do we want this in the secluded pool?
280 * Ideally, we'd want the shared region used by Camera to
281 * NOT be in the secluded pool, but all other shared regions
282 * in the secluded pool...
284 memory_object_mark_eligible_for_secluded(control
, TRUE
);
287 #endif /* CONFIG_SECLUDED_MEMORY */
293 * shared_region_data_return()
295 * Handles page-out requests from VM. This should never happen since
296 * the pages provided by this EMM are not supposed to be dirty or dirtied
297 * and VM should simply discard the contents and reclaim the pages if it
301 shared_region_pager_data_return(
302 __unused memory_object_t mem_obj
,
303 __unused memory_object_offset_t offset
,
304 __unused memory_object_cluster_size_t data_cnt
,
305 __unused memory_object_offset_t
*resid_offset
,
306 __unused
int *io_error
,
307 __unused boolean_t dirty
,
308 __unused boolean_t kernel_copy
,
309 __unused
int upl_flags
)
311 panic("shared_region_pager_data_return: should never get called");
316 shared_region_pager_data_initialize(
317 __unused memory_object_t mem_obj
,
318 __unused memory_object_offset_t offset
,
319 __unused memory_object_cluster_size_t data_cnt
)
321 panic("shared_region_pager_data_initialize: should never get called");
326 shared_region_pager_data_unlock(
327 __unused memory_object_t mem_obj
,
328 __unused memory_object_offset_t offset
,
329 __unused memory_object_size_t size
,
330 __unused vm_prot_t desired_access
)
336 * shared_region_pager_data_request()
338 * Handles page-in requests from VM.
340 int shared_region_pager_data_request_debug
= 0;
342 shared_region_pager_data_request(
343 memory_object_t mem_obj
,
344 memory_object_offset_t offset
,
345 memory_object_cluster_size_t length
,
349 vm_prot_t protection_required
,
350 memory_object_fault_info_t mo_fault_info
)
352 shared_region_pager_t pager
;
353 memory_object_control_t mo_control
;
357 upl_page_info_t
*upl_pl
;
358 unsigned int pl_count
;
359 vm_object_t src_top_object
, src_page_object
, dst_object
;
360 kern_return_t kr
, retval
;
361 vm_offset_t src_vaddr
, dst_vaddr
;
362 vm_offset_t cur_offset
;
363 vm_offset_t offset_in_page
;
364 kern_return_t error_code
;
366 vm_page_t src_page
, top_page
;
368 struct vm_object_fault_info fault_info
;
369 mach_vm_offset_t slide_start_address
;
371 PAGER_DEBUG(PAGER_ALL
, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj
, offset
, length
, protection_required
));
373 retval
= KERN_SUCCESS
;
374 src_top_object
= VM_OBJECT_NULL
;
375 src_page_object
= VM_OBJECT_NULL
;
378 fault_info
= *((struct vm_object_fault_info
*)(uintptr_t)mo_fault_info
);
379 fault_info
.stealth
= TRUE
;
380 fault_info
.io_sync
= FALSE
;
381 fault_info
.mark_zf_absent
= FALSE
;
382 fault_info
.batch_pmap_op
= FALSE
;
383 interruptible
= fault_info
.interruptible
;
385 pager
= shared_region_pager_lookup(mem_obj
);
386 assert(pager
->is_ready
);
387 assert(pager
->ref_count
> 1); /* pager is alive and mapped */
389 PAGER_DEBUG(PAGER_PAGEIN
, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj
, offset
, length
, protection_required
, pager
));
392 * Gather in a UPL all the VM pages requested by VM.
394 mo_control
= pager
->sc_pgr_hdr
.mo_control
;
398 UPL_RET_ONLY_ABSENT
|
401 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
404 kr
= memory_object_upl_request(mo_control
,
406 &upl
, NULL
, NULL
, upl_flags
, VM_KERN_MEMORY_SECURITY
);
407 if (kr
!= KERN_SUCCESS
) {
411 dst_object
= mo_control
->moc_object
;
412 assert(dst_object
!= VM_OBJECT_NULL
);
415 * We'll map the original data in the kernel address space from the
416 * backing VM object (itself backed by the shared cache file via
419 src_top_object
= pager
->backing_object
;
420 assert(src_top_object
!= VM_OBJECT_NULL
);
421 vm_object_reference(src_top_object
); /* keep the source object alive */
423 slide_start_address
= pager
->scp_slide_info
->slid_address
;
425 fault_info
.lo_offset
+= pager
->backing_offset
;
426 fault_info
.hi_offset
+= pager
->backing_offset
;
429 * Fill in the contents of the pages requested by VM.
431 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
432 pl_count
= length
/ PAGE_SIZE
;
434 retval
== KERN_SUCCESS
&& cur_offset
< length
;
435 cur_offset
+= PAGE_SIZE
) {
438 if (!upl_page_present(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
))) {
439 /* this page is not in the UPL: skip it */
444 * Map the source (dyld shared cache) page in the kernel's
445 * virtual address space.
446 * We already hold a reference on the src_top_object.
449 vm_object_lock(src_top_object
);
450 vm_object_paging_begin(src_top_object
);
453 src_page
= VM_PAGE_NULL
;
454 kr
= vm_fault_page(src_top_object
,
455 pager
->backing_offset
+ offset
+ cur_offset
,
458 FALSE
, /* src_page not looked up */
468 case VM_FAULT_SUCCESS
:
471 goto retry_src_fault
;
472 case VM_FAULT_MEMORY_SHORTAGE
:
473 if (vm_page_wait(interruptible
)) {
474 goto retry_src_fault
;
477 case VM_FAULT_INTERRUPTED
:
478 retval
= MACH_SEND_INTERRUPTED
;
480 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
481 /* success but no VM page: fail */
482 vm_object_paging_end(src_top_object
);
483 vm_object_unlock(src_top_object
);
485 case VM_FAULT_MEMORY_ERROR
:
486 /* the page is not there ! */
490 retval
= KERN_MEMORY_ERROR
;
494 panic("shared_region_pager_data_request: "
495 "vm_fault_page() unexpected error 0x%x\n",
498 assert(src_page
!= VM_PAGE_NULL
);
499 assert(src_page
->vmp_busy
);
501 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
502 vm_page_lockspin_queues();
503 if (src_page
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
) {
504 vm_page_speculate(src_page
, FALSE
);
506 vm_page_unlock_queues();
510 * Establish pointers to the source
511 * and destination physical pages.
514 upl_phys_page(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
));
515 assert(dst_pnum
!= 0);
517 src_vaddr
= (vm_map_offset_t
)
518 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(src_page
)
520 dst_vaddr
= (vm_map_offset_t
)
521 phystokv((pmap_paddr_t
)dst_pnum
<< PAGE_SHIFT
);
522 src_page_object
= VM_PAGE_OBJECT(src_page
);
525 * Validate the original page...
527 if (src_page_object
->code_signed
) {
528 vm_page_validate_cs_mapped(
530 (const void *) src_vaddr
);
533 * ... and transfer the results to the destination page.
535 UPL_SET_CS_VALIDATED(upl_pl
, cur_offset
/ PAGE_SIZE
,
536 src_page
->vmp_cs_validated
);
537 UPL_SET_CS_TAINTED(upl_pl
, cur_offset
/ PAGE_SIZE
,
538 src_page
->vmp_cs_tainted
);
539 UPL_SET_CS_NX(upl_pl
, cur_offset
/ PAGE_SIZE
,
540 src_page
->vmp_cs_nx
);
543 * The page provider might access a mapped file, so let's
544 * release the object lock for the source page to avoid a
545 * potential deadlock.
546 * The source page is kept busy and we have a
547 * "paging_in_progress" reference on its object, so it's safe
548 * to unlock the object here.
550 assert(src_page
->vmp_busy
);
551 assert(src_page_object
->paging_in_progress
> 0);
552 vm_object_unlock(src_page_object
);
555 * Process the original contents of the source page
556 * into the destination page.
558 for (offset_in_page
= 0;
559 offset_in_page
< PAGE_SIZE
;
560 offset_in_page
+= PAGE_SIZE_FOR_SR_SLIDE
) {
561 vm_object_offset_t chunk_offset
;
562 vm_object_offset_t offset_in_backing_object
;
563 vm_object_offset_t offset_in_sliding_range
;
565 chunk_offset
= offset
+ cur_offset
+ offset_in_page
;
567 bcopy((const char *)(src_vaddr
+
569 (char *)(dst_vaddr
+ offset_in_page
),
570 PAGE_SIZE_FOR_SR_SLIDE
);
572 offset_in_backing_object
= (chunk_offset
+
573 pager
->backing_offset
);
574 if ((offset_in_backing_object
< pager
->scp_slide_info
->start
) ||
575 (offset_in_backing_object
>= pager
->scp_slide_info
->end
)) {
576 /* chunk is outside of sliding range: done */
577 shared_region_pager_copied
++;
581 offset_in_sliding_range
=
582 (offset_in_backing_object
-
583 pager
->scp_slide_info
->start
);
584 kr
= vm_shared_region_slide_page(
585 pager
->scp_slide_info
,
586 dst_vaddr
+ offset_in_page
,
587 (mach_vm_offset_t
) (offset_in_sliding_range
+
588 slide_start_address
),
589 (uint32_t) (offset_in_sliding_range
/
590 PAGE_SIZE_FOR_SR_SLIDE
));
591 if (shared_region_pager_data_request_debug
) {
592 printf("shared_region_data_request"
593 "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
594 "in sliding range [0x%llx:0x%llx]: "
595 "SLIDE offset 0x%llx="
596 "(0x%llx+0x%llx+0x%llx+0x%04llx)"
597 "[0x%016llx 0x%016llx] "
605 (uint64_t) cur_offset
,
606 (uint64_t) offset_in_page
,
608 pager
->scp_slide_info
->start
,
609 pager
->scp_slide_info
->end
,
610 (pager
->backing_offset
+
614 pager
->backing_offset
,
616 (uint64_t) cur_offset
,
617 (uint64_t) offset_in_page
,
618 *(uint64_t *)(dst_vaddr
+ offset_in_page
),
619 *(uint64_t *)(dst_vaddr
+ offset_in_page
+ 8),
620 src_page_object
->code_signed
,
621 src_page
->vmp_cs_validated
,
622 src_page
->vmp_cs_tainted
,
626 if (kr
!= KERN_SUCCESS
) {
627 shared_region_pager_slid_error
++;
630 shared_region_pager_slid
++;
633 assert(VM_PAGE_OBJECT(src_page
) == src_page_object
);
634 assert(src_page
->vmp_busy
);
635 assert(src_page_object
->paging_in_progress
> 0);
636 vm_object_lock(src_page_object
);
639 * Cleanup the result of vm_fault_page() of the source page.
641 PAGE_WAKEUP_DONE(src_page
);
642 src_page
= VM_PAGE_NULL
;
643 vm_object_paging_end(src_page_object
);
644 vm_object_unlock(src_page_object
);
646 if (top_page
!= VM_PAGE_NULL
) {
647 assert(VM_PAGE_OBJECT(top_page
) == src_top_object
);
648 vm_object_lock(src_top_object
);
649 VM_PAGE_FREE(top_page
);
650 vm_object_paging_end(src_top_object
);
651 vm_object_unlock(src_top_object
);
657 /* clean up the UPL */
660 * The pages are currently dirty because we've just been
661 * writing on them, but as far as we're concerned, they're
662 * clean since they contain their "original" contents as
663 * provided by us, the pager.
664 * Tell the UPL to mark them "clean".
666 upl_clear_dirty(upl
, TRUE
);
668 /* abort or commit the UPL */
669 if (retval
!= KERN_SUCCESS
) {
673 upl_commit_range(upl
, 0, upl
->size
,
674 UPL_COMMIT_CS_VALIDATED
| UPL_COMMIT_WRITTEN_BY_KERNEL
,
675 upl_pl
, pl_count
, &empty
);
678 /* and deallocate the UPL */
682 if (src_top_object
!= VM_OBJECT_NULL
) {
683 vm_object_deallocate(src_top_object
);
689 * shared_region_pager_reference()
691 * Get a reference on this memory object.
692 * For external usage only. Assumes that the initial reference count is not 0,
693 * i.e one should not "revive" a dead pager this way.
696 shared_region_pager_reference(
697 memory_object_t mem_obj
)
699 shared_region_pager_t pager
;
701 pager
= shared_region_pager_lookup(mem_obj
);
703 lck_mtx_lock(&shared_region_pager_lock
);
704 assert(pager
->ref_count
> 0);
706 lck_mtx_unlock(&shared_region_pager_lock
);
711 * shared_region_pager_dequeue:
713 * Removes a pager from the list of pagers.
715 * The caller must hold "shared_region_pager_lock".
718 shared_region_pager_dequeue(
719 shared_region_pager_t pager
)
721 assert(!pager
->is_mapped
);
723 queue_remove(&shared_region_pager_queue
,
725 shared_region_pager_t
,
727 pager
->pager_queue
.next
= NULL
;
728 pager
->pager_queue
.prev
= NULL
;
730 shared_region_pager_count
--;
734 * shared_region_pager_terminate_internal:
736 * Trigger the asynchronous termination of the memory object associated
738 * When the memory object is terminated, there will be one more call
739 * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
740 * to finish the clean up.
742 * "shared_region_pager_lock" should not be held by the caller.
743 * We don't need the lock because the pager has already been removed from
744 * the pagers' list and is now ours exclusively.
747 shared_region_pager_terminate_internal(
748 shared_region_pager_t pager
)
750 assert(pager
->is_ready
);
751 assert(!pager
->is_mapped
);
753 if (pager
->backing_object
!= VM_OBJECT_NULL
) {
754 vm_object_deallocate(pager
->backing_object
);
755 pager
->backing_object
= VM_OBJECT_NULL
;
757 /* trigger the destruction of the memory object */
758 memory_object_destroy(pager
->sc_pgr_hdr
.mo_control
, 0);
762 * shared_region_pager_deallocate_internal()
764 * Release a reference on this pager and free it when the last
765 * reference goes away.
766 * Can be called with shared_region_pager_lock held or not but always returns
770 shared_region_pager_deallocate_internal(
771 shared_region_pager_t pager
,
774 boolean_t needs_trimming
;
778 lck_mtx_lock(&shared_region_pager_lock
);
781 count_unmapped
= (shared_region_pager_count
-
782 shared_region_pager_count_mapped
);
783 if (count_unmapped
> shared_region_pager_cache_limit
) {
784 /* we have too many unmapped pagers: trim some */
785 needs_trimming
= TRUE
;
787 needs_trimming
= FALSE
;
790 /* drop a reference on this pager */
793 if (pager
->ref_count
== 1) {
795 * Only the "named" reference is left, which means that
796 * no one is really holding on to this pager anymore.
799 shared_region_pager_dequeue(pager
);
800 /* the pager is all ours: no need for the lock now */
801 lck_mtx_unlock(&shared_region_pager_lock
);
802 shared_region_pager_terminate_internal(pager
);
803 } else if (pager
->ref_count
== 0) {
805 * Dropped the existence reference; the memory object has
806 * been terminated. Do some final cleanup and release the
809 lck_mtx_unlock(&shared_region_pager_lock
);
810 if (pager
->sc_pgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
811 memory_object_control_deallocate(pager
->sc_pgr_hdr
.mo_control
);
812 pager
->sc_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
814 kfree(pager
, sizeof(*pager
));
815 pager
= SHARED_REGION_PAGER_NULL
;
817 /* there are still plenty of references: keep going... */
818 lck_mtx_unlock(&shared_region_pager_lock
);
821 if (needs_trimming
) {
822 shared_region_pager_trim();
824 /* caution: lock is not held on return... */
828 * shared_region_pager_deallocate()
830 * Release a reference on this pager and free it when the last
831 * reference goes away.
834 shared_region_pager_deallocate(
835 memory_object_t mem_obj
)
837 shared_region_pager_t pager
;
839 PAGER_DEBUG(PAGER_ALL
, ("shared_region_pager_deallocate: %p\n", mem_obj
));
840 pager
= shared_region_pager_lookup(mem_obj
);
841 shared_region_pager_deallocate_internal(pager
, FALSE
);
848 shared_region_pager_terminate(
852 memory_object_t mem_obj
)
854 PAGER_DEBUG(PAGER_ALL
, ("shared_region_pager_terminate: %p\n", mem_obj
));
863 shared_region_pager_synchronize(
864 __unused memory_object_t mem_obj
,
865 __unused memory_object_offset_t offset
,
866 __unused memory_object_size_t length
,
867 __unused vm_sync_t sync_flags
)
869 panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n");
874 * shared_region_pager_map()
876 * This allows VM to let us, the EMM, know that this memory object
877 * is currently mapped one or more times. This is called by VM each time
878 * the memory object gets mapped and we take one extra reference on the
879 * memory object to account for all its mappings.
882 shared_region_pager_map(
883 memory_object_t mem_obj
,
884 __unused vm_prot_t prot
)
886 shared_region_pager_t pager
;
888 PAGER_DEBUG(PAGER_ALL
, ("shared_region_pager_map: %p\n", mem_obj
));
890 pager
= shared_region_pager_lookup(mem_obj
);
892 lck_mtx_lock(&shared_region_pager_lock
);
893 assert(pager
->is_ready
);
894 assert(pager
->ref_count
> 0); /* pager is alive */
895 if (pager
->is_mapped
== FALSE
) {
897 * First mapping of this pager: take an extra reference
898 * that will remain until all the mappings of this pager
901 pager
->is_mapped
= TRUE
;
903 shared_region_pager_count_mapped
++;
905 lck_mtx_unlock(&shared_region_pager_lock
);
911 * shared_region_pager_last_unmap()
913 * This is called by VM when this memory object is no longer mapped anywhere.
916 shared_region_pager_last_unmap(
917 memory_object_t mem_obj
)
919 shared_region_pager_t pager
;
922 PAGER_DEBUG(PAGER_ALL
,
923 ("shared_region_pager_last_unmap: %p\n", mem_obj
));
925 pager
= shared_region_pager_lookup(mem_obj
);
927 lck_mtx_lock(&shared_region_pager_lock
);
928 if (pager
->is_mapped
) {
930 * All the mappings are gone, so let go of the one extra
931 * reference that represents all the mappings of this pager.
933 shared_region_pager_count_mapped
--;
934 count_unmapped
= (shared_region_pager_count
-
935 shared_region_pager_count_mapped
);
936 if (count_unmapped
> shared_region_pager_count_unmapped_max
) {
937 shared_region_pager_count_unmapped_max
= count_unmapped
;
939 pager
->is_mapped
= FALSE
;
940 shared_region_pager_deallocate_internal(pager
, TRUE
);
941 /* caution: deallocate_internal() released the lock ! */
943 lck_mtx_unlock(&shared_region_pager_lock
);
953 shared_region_pager_t
954 shared_region_pager_lookup(
955 memory_object_t mem_obj
)
957 shared_region_pager_t pager
;
959 assert(mem_obj
->mo_pager_ops
== &shared_region_pager_ops
);
960 pager
= (shared_region_pager_t
)(uintptr_t) mem_obj
;
961 assert(pager
->ref_count
> 0);
965 shared_region_pager_t
966 shared_region_pager_create(
967 vm_object_t backing_object
,
968 vm_object_offset_t backing_offset
,
969 struct vm_shared_region_slide_info
*slide_info
)
971 shared_region_pager_t pager
;
972 memory_object_control_t control
;
976 pager
= (shared_region_pager_t
) kalloc(sizeof(*pager
));
977 if (pager
== SHARED_REGION_PAGER_NULL
) {
978 return SHARED_REGION_PAGER_NULL
;
982 * The vm_map call takes both named entry ports and raw memory
983 * objects in the same parameter. We need to make sure that
984 * vm_map does not see this object as a named entry port. So,
985 * we reserve the first word in the object for a fake ip_kotype
986 * setting - that will tell vm_map to use it as a memory object.
988 pager
->sc_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
989 pager
->sc_pgr_hdr
.mo_pager_ops
= &shared_region_pager_ops
;
990 pager
->sc_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
992 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
993 pager
->ref_count
= 1; /* existence reference (for the cache) */
994 pager
->ref_count
++; /* for the caller */
995 pager
->is_mapped
= FALSE
;
996 pager
->backing_object
= backing_object
;
997 pager
->backing_offset
= backing_offset
;
998 pager
->scp_slide_info
= slide_info
;
1000 vm_object_reference(backing_object
);
1002 lck_mtx_lock(&shared_region_pager_lock
);
1003 /* enter new pager at the head of our list of pagers */
1004 queue_enter_first(&shared_region_pager_queue
,
1006 shared_region_pager_t
,
1008 shared_region_pager_count
++;
1009 if (shared_region_pager_count
> shared_region_pager_count_max
) {
1010 shared_region_pager_count_max
= shared_region_pager_count
;
1012 lck_mtx_unlock(&shared_region_pager_lock
);
1014 kr
= memory_object_create_named((memory_object_t
) pager
,
1017 assert(kr
== KERN_SUCCESS
);
1019 memory_object_mark_trusted(control
);
1021 lck_mtx_lock(&shared_region_pager_lock
);
1022 /* the new pager is now ready to be used */
1023 pager
->is_ready
= TRUE
;
1024 object
= memory_object_to_vm_object((memory_object_t
) pager
);
1027 * No one knows about this object and so we get away without the object lock.
1028 * This object is _eventually_ backed by the dyld shared cache and so we want
1029 * to benefit from the lock priority boosting.
1031 object
->object_is_shared_cache
= TRUE
;
1032 lck_mtx_unlock(&shared_region_pager_lock
);
1034 /* wakeup anyone waiting for this pager to be ready */
1035 thread_wakeup(&pager
->is_ready
);
1041 * shared_region_pager_setup()
1043 * Provide the caller with a memory object backed by the provided
1044 * "backing_object" VM object.
1047 shared_region_pager_setup(
1048 vm_object_t backing_object
,
1049 vm_object_offset_t backing_offset
,
1050 struct vm_shared_region_slide_info
*slide_info
)
1052 shared_region_pager_t pager
;
1054 /* create new pager */
1055 pager
= shared_region_pager_create(
1059 if (pager
== SHARED_REGION_PAGER_NULL
) {
1060 /* could not create a new pager */
1061 return MEMORY_OBJECT_NULL
;
1064 lck_mtx_lock(&shared_region_pager_lock
);
1065 while (!pager
->is_ready
) {
1066 lck_mtx_sleep(&shared_region_pager_lock
,
1071 lck_mtx_unlock(&shared_region_pager_lock
);
1073 return (memory_object_t
) pager
;
1077 shared_region_pager_trim(void)
1079 shared_region_pager_t pager
, prev_pager
;
1080 queue_head_t trim_queue
;
1084 lck_mtx_lock(&shared_region_pager_lock
);
1087 * We have too many pagers, try and trim some unused ones,
1088 * starting with the oldest pager at the end of the queue.
1090 queue_init(&trim_queue
);
1093 for (pager
= (shared_region_pager_t
)
1094 queue_last(&shared_region_pager_queue
);
1095 !queue_end(&shared_region_pager_queue
,
1096 (queue_entry_t
) pager
);
1097 pager
= prev_pager
) {
1098 /* get prev elt before we dequeue */
1099 prev_pager
= (shared_region_pager_t
)
1100 queue_prev(&pager
->pager_queue
);
1102 if (pager
->ref_count
== 2 &&
1104 !pager
->is_mapped
) {
1105 /* this pager can be trimmed */
1107 /* remove this pager from the main list ... */
1108 shared_region_pager_dequeue(pager
);
1109 /* ... and add it to our trim queue */
1110 queue_enter_first(&trim_queue
,
1112 shared_region_pager_t
,
1115 count_unmapped
= (shared_region_pager_count
-
1116 shared_region_pager_count_mapped
);
1117 if (count_unmapped
<= shared_region_pager_cache_limit
) {
1118 /* we have enough pagers to trim */
1123 if (num_trim
> shared_region_pager_num_trim_max
) {
1124 shared_region_pager_num_trim_max
= num_trim
;
1126 shared_region_pager_num_trim_total
+= num_trim
;
1128 lck_mtx_unlock(&shared_region_pager_lock
);
1130 /* terminate the trimmed pagers */
1131 while (!queue_empty(&trim_queue
)) {
1132 queue_remove_first(&trim_queue
,
1134 shared_region_pager_t
,
1136 pager
->pager_queue
.next
= NULL
;
1137 pager
->pager_queue
.prev
= NULL
;
1138 assert(pager
->ref_count
== 2);
1140 * We can't call deallocate_internal() because the pager
1141 * has already been dequeued, but we still need to remove
1145 shared_region_pager_terminate_internal(pager
);