]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_region_pager.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_region_pager.c
CommitLineData
d9a64523 1/*
f427ee49 2 * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
d9a64523
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
34#include <mach/kern_return.h>
35#include <mach/memory_object_control.h>
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41#include <mach/mach_vm.h>
42
43#include <kern/host.h>
44#include <kern/kalloc.h>
45#include <kern/queue.h>
46#include <kern/thread.h>
47#include <kern/ipc_kobject.h>
48
49#include <ipc/ipc_port.h>
50#include <ipc/ipc_space.h>
51
52#include <vm/memory_object.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_fault.h>
55#include <vm/vm_map.h>
56#include <vm/vm_pageout.h>
d9a64523
A
57#include <vm/vm_protos.h>
58#include <vm/vm_shared_region.h>
59
f427ee49
A
60#if __has_feature(ptrauth_calls)
61#include <ptrauth.h>
62extern boolean_t diversify_user_jop;
63#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
64
65/*
66 * SHARED REGION MEMORY PAGER
67 *
68 * This external memory manager (EMM) handles mappings of a dyld shared cache
69 * in shared regions, applying any necessary modifications (sliding,
70 * pointer signing, ...).
71 *
72 * It mostly handles page-in requests (from memory_object_data_request()) by
73 * getting the original data from its backing VM object, itself backed by
74 * the dyld shared cache file, modifying it if needed and providing it to VM.
75 *
76 * The modified pages will never be dirtied, so the memory manager doesn't
77 * need to handle page-out requests (from memory_object_data_return()). The
78 * pages need to be mapped copy-on-write, so that the originals stay clean.
79 *
80 * We don't expect to have to handle a large number of shared cache files,
81 * so the data structures are very simple (simple linked list) for now.
82 */
83
84/* forward declarations */
85void shared_region_pager_reference(memory_object_t mem_obj);
86void shared_region_pager_deallocate(memory_object_t mem_obj);
87kern_return_t shared_region_pager_init(memory_object_t mem_obj,
0a7de745
A
88 memory_object_control_t control,
89 memory_object_cluster_size_t pg_size);
d9a64523
A
90kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
91kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
0a7de745
A
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t length,
94 vm_prot_t protection_required,
95 memory_object_fault_info_t fault_info);
d9a64523 96kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
0a7de745
A
97 memory_object_offset_t offset,
98 memory_object_cluster_size_t data_cnt,
99 memory_object_offset_t *resid_offset,
100 int *io_error,
101 boolean_t dirty,
102 boolean_t kernel_copy,
103 int upl_flags);
d9a64523 104kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
0a7de745
A
105 memory_object_offset_t offset,
106 memory_object_cluster_size_t data_cnt);
d9a64523 107kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj,
0a7de745
A
108 memory_object_offset_t offset,
109 memory_object_size_t size,
110 vm_prot_t desired_access);
d9a64523 111kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj,
0a7de745
A
112 memory_object_offset_t offset,
113 memory_object_size_t length,
114 vm_sync_t sync_flags);
d9a64523 115kern_return_t shared_region_pager_map(memory_object_t mem_obj,
0a7de745 116 vm_prot_t prot);
d9a64523
A
117kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
118
119/*
120 * Vector of VM operations for this EMM.
121 * These routines are invoked by VM via the memory_object_*() interfaces.
122 */
123const struct memory_object_pager_ops shared_region_pager_ops = {
cb323159
A
124 .memory_object_reference = shared_region_pager_reference,
125 .memory_object_deallocate = shared_region_pager_deallocate,
126 .memory_object_init = shared_region_pager_init,
127 .memory_object_terminate = shared_region_pager_terminate,
128 .memory_object_data_request = shared_region_pager_data_request,
129 .memory_object_data_return = shared_region_pager_data_return,
130 .memory_object_data_initialize = shared_region_pager_data_initialize,
131 .memory_object_data_unlock = shared_region_pager_data_unlock,
132 .memory_object_synchronize = shared_region_pager_synchronize,
133 .memory_object_map = shared_region_pager_map,
134 .memory_object_last_unmap = shared_region_pager_last_unmap,
135 .memory_object_data_reclaim = NULL,
136 .memory_object_pager_name = "shared_region"
d9a64523
A
137};
138
f427ee49
A
139#if __has_feature(ptrauth_calls)
140/*
141 * Track mappings between shared_region_id and the key used to sign
142 * authenticated pointers.
143 */
144typedef struct shared_region_jop_key_map {
145 queue_chain_t srk_queue;
146 char *srk_shared_region_id;
147 uint64_t srk_jop_key;
148 os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */
149} *shared_region_jop_key_map_t;
150
151os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
152
153/*
154 * The list is protected by the "shared_region_key_map" lock.
155 */
156int shared_region_key_count = 0; /* number of active shared_region_id keys */
157queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
158LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
159LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
160
161/*
162 * Find the pointer signing key for the give shared_region_id.
163 */
164uint64_t
165shared_region_find_key(char *shared_region_id)
166{
167 shared_region_jop_key_map_t region;
168 uint64_t key;
169
170 lck_mtx_lock(&shared_region_jop_key_lock);
171 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
172 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
173 goto found;
174 }
175 }
176 panic("shared_region_find_key() no key for region '%s'", shared_region_id);
177
178found:
179 key = region->srk_jop_key;
180 lck_mtx_unlock(&shared_region_jop_key_lock);
181 return key;
182}
183
184/*
185 * Return a authentication key to use for the given shared_region_id.
186 * If inherit is TRUE, then the key must match inherited_key.
187 * Creates an additional reference when successful.
188 */
189void
190shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
191{
192 shared_region_jop_key_map_t region;
193 shared_region_jop_key_map_t new = NULL;
194
195 assert(shared_region_id != NULL);
196again:
197 lck_mtx_lock(&shared_region_jop_key_lock);
198 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
199 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
200 os_ref_retain_locked(&region->srk_ref_count);
201 goto done;
202 }
203 }
204
205 /*
206 * ID was not found, if first time, allocate a new one and redo the lookup.
207 */
208 if (new == NULL) {
209 lck_mtx_unlock(&shared_region_jop_key_lock);
210 new = kalloc(sizeof *new);
211 uint_t len = strlen(shared_region_id) + 1;
212 new->srk_shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK);
213 strlcpy(new->srk_shared_region_id, shared_region_id, len);
214 os_ref_init(&new->srk_ref_count, &srk_refgrp);
215
216 if (diversify_user_jop && inherit) {
217 new->srk_jop_key = inherited_key;
218 } else if (diversify_user_jop && strlen(shared_region_id) > 0) {
219 new->srk_jop_key = generate_jop_key();
220 } else {
221 new->srk_jop_key = ml_default_jop_pid();
222 }
223
224 goto again;
225 }
226
227 /*
228 * Use the newly allocated entry
229 */
230 ++shared_region_key_count;
231 queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
232 region = new;
233 new = NULL;
234
235done:
236 if (inherit && inherited_key != region->srk_jop_key) {
237 panic("shared_region_key_alloc() inherited key mismatch");
238 }
239 lck_mtx_unlock(&shared_region_jop_key_lock);
240
241 /*
242 * free any unused new entry
243 */
244 if (new != NULL) {
245 kheap_free(KHEAP_DATA_BUFFERS, new->srk_shared_region_id, strlen(new->srk_shared_region_id) + 1);
246 kfree(new, sizeof *new);
247 }
248}
249
250/*
251 * Mark the end of using a shared_region_id's key
252 */
253extern void
254shared_region_key_dealloc(char *shared_region_id)
255{
256 shared_region_jop_key_map_t region;
257
258 assert(shared_region_id != NULL);
259 lck_mtx_lock(&shared_region_jop_key_lock);
260 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
261 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
262 goto done;
263 }
264 }
265 panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
266
267done:
268 if (os_ref_release_locked(&region->srk_ref_count) == 0) {
269 queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
270 --shared_region_key_count;
271 } else {
272 region = NULL;
273 }
274 lck_mtx_unlock(&shared_region_jop_key_lock);
275
276 if (region != NULL) {
277 kheap_free(KHEAP_DATA_BUFFERS, region->srk_shared_region_id, strlen(region->srk_shared_region_id) + 1);
278 kfree(region, sizeof *region);
279 }
280}
281#endif /* __has_feature(ptrauth_calls) */
282
d9a64523
A
283/*
284 * The "shared_region_pager" describes a memory object backed by
285 * the "shared_region" EMM.
286 */
287typedef struct shared_region_pager {
f427ee49 288 struct memory_object srp_header; /* mandatory generic header */
d9a64523
A
289
290 /* pager-specific data */
f427ee49
A
291 queue_chain_t srp_queue; /* next & prev pagers */
292 uint32_t srp_ref_count; /* active uses */
293 bool srp_is_mapped; /* has active mappings */
294 bool srp_is_ready; /* is this pager ready? */
295 vm_object_t srp_backing_object; /* VM object for shared cache */
296 vm_object_offset_t srp_backing_offset;
297 vm_shared_region_slide_info_t srp_slide_info;
298#if __has_feature(ptrauth_calls)
299 uint64_t srp_jop_key; /* zero if used for arm64 */
300#endif /* __has_feature(ptrauth_calls) */
d9a64523 301} *shared_region_pager_t;
0a7de745 302#define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL)
d9a64523
A
303
304/*
305 * List of memory objects managed by this EMM.
306 * The list is protected by the "shared_region_pager_lock" lock.
307 */
0a7de745
A
308int shared_region_pager_count = 0; /* number of pagers */
309int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */
f427ee49
A
310queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
311LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
312LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
d9a64523
A
313
314/*
315 * Maximum number of unmapped pagers we're willing to keep around.
316 */
317int shared_region_pager_cache_limit = 0;
318
319/*
320 * Statistics & counters.
321 */
322int shared_region_pager_count_max = 0;
323int shared_region_pager_count_unmapped_max = 0;
324int shared_region_pager_num_trim_max = 0;
325int shared_region_pager_num_trim_total = 0;
326
d9a64523
A
327uint64_t shared_region_pager_copied = 0;
328uint64_t shared_region_pager_slid = 0;
329uint64_t shared_region_pager_slid_error = 0;
330uint64_t shared_region_pager_reclaimed = 0;
331
332/* internal prototypes */
d9a64523
A
333shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
334void shared_region_pager_dequeue(shared_region_pager_t pager);
335void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
0a7de745 336 boolean_t locked);
d9a64523
A
337void shared_region_pager_terminate_internal(shared_region_pager_t pager);
338void shared_region_pager_trim(void);
339
340
341#if DEBUG
342int shared_region_pagerdebug = 0;
0a7de745
A
343#define PAGER_ALL 0xffffffff
344#define PAGER_INIT 0x00000001
345#define PAGER_PAGEIN 0x00000002
346
347#define PAGER_DEBUG(LEVEL, A) \
348 MACRO_BEGIN \
349 if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \
350 printf A; \
351 } \
d9a64523
A
352 MACRO_END
353#else
354#define PAGER_DEBUG(LEVEL, A)
355#endif
356
d9a64523
A
357/*
358 * shared_region_pager_init()
359 *
360 * Initialize the memory object and makes it ready to be used and mapped.
361 */
362kern_return_t
363shared_region_pager_init(
0a7de745
A
364 memory_object_t mem_obj,
365 memory_object_control_t control,
d9a64523
A
366#if !DEBUG
367 __unused
368#endif
369 memory_object_cluster_size_t pg_size)
370{
0a7de745
A
371 shared_region_pager_t pager;
372 kern_return_t kr;
d9a64523
A
373 memory_object_attr_info_data_t attributes;
374
375 PAGER_DEBUG(PAGER_ALL,
0a7de745
A
376 ("shared_region_pager_init: %p, %p, %x\n",
377 mem_obj, control, pg_size));
d9a64523 378
0a7de745 379 if (control == MEMORY_OBJECT_CONTROL_NULL) {
d9a64523 380 return KERN_INVALID_ARGUMENT;
0a7de745 381 }
d9a64523
A
382
383 pager = shared_region_pager_lookup(mem_obj);
384
385 memory_object_control_reference(control);
386
f427ee49 387 pager->srp_header.mo_control = control;
d9a64523
A
388
389 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
390 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
391 attributes.cluster_size = (1 << (PAGE_SHIFT));
392 attributes.may_cache_object = FALSE;
393 attributes.temporary = TRUE;
394
395 kr = memory_object_change_attributes(
0a7de745
A
396 control,
397 MEMORY_OBJECT_ATTRIBUTE_INFO,
398 (memory_object_info_t) &attributes,
399 MEMORY_OBJECT_ATTR_INFO_COUNT);
400 if (kr != KERN_SUCCESS) {
d9a64523 401 panic("shared_region_pager_init: "
0a7de745
A
402 "memory_object_change_attributes() failed");
403 }
d9a64523
A
404
405#if CONFIG_SECLUDED_MEMORY
406 if (secluded_for_filecache) {
407#if 00
408 /*
409 * XXX FBDP do we want this in the secluded pool?
410 * Ideally, we'd want the shared region used by Camera to
411 * NOT be in the secluded pool, but all other shared regions
412 * in the secluded pool...
413 */
414 memory_object_mark_eligible_for_secluded(control, TRUE);
415#endif /* 00 */
416 }
417#endif /* CONFIG_SECLUDED_MEMORY */
418
419 return KERN_SUCCESS;
420}
421
422/*
423 * shared_region_data_return()
424 *
425 * Handles page-out requests from VM. This should never happen since
426 * the pages provided by this EMM are not supposed to be dirty or dirtied
427 * and VM should simply discard the contents and reclaim the pages if it
428 * needs to.
429 */
430kern_return_t
431shared_region_pager_data_return(
0a7de745
A
432 __unused memory_object_t mem_obj,
433 __unused memory_object_offset_t offset,
434 __unused memory_object_cluster_size_t data_cnt,
435 __unused memory_object_offset_t *resid_offset,
436 __unused int *io_error,
437 __unused boolean_t dirty,
438 __unused boolean_t kernel_copy,
439 __unused int upl_flags)
d9a64523
A
440{
441 panic("shared_region_pager_data_return: should never get called");
442 return KERN_FAILURE;
443}
444
445kern_return_t
446shared_region_pager_data_initialize(
0a7de745
A
447 __unused memory_object_t mem_obj,
448 __unused memory_object_offset_t offset,
449 __unused memory_object_cluster_size_t data_cnt)
d9a64523
A
450{
451 panic("shared_region_pager_data_initialize: should never get called");
452 return KERN_FAILURE;
453}
454
455kern_return_t
456shared_region_pager_data_unlock(
0a7de745
A
457 __unused memory_object_t mem_obj,
458 __unused memory_object_offset_t offset,
459 __unused memory_object_size_t size,
460 __unused vm_prot_t desired_access)
d9a64523
A
461{
462 return KERN_FAILURE;
463}
464
465/*
466 * shared_region_pager_data_request()
467 *
468 * Handles page-in requests from VM.
469 */
470int shared_region_pager_data_request_debug = 0;
471kern_return_t
472shared_region_pager_data_request(
0a7de745
A
473 memory_object_t mem_obj,
474 memory_object_offset_t offset,
475 memory_object_cluster_size_t length,
d9a64523
A
476#if !DEBUG
477 __unused
478#endif
0a7de745 479 vm_prot_t protection_required,
d9a64523
A
480 memory_object_fault_info_t mo_fault_info)
481{
0a7de745
A
482 shared_region_pager_t pager;
483 memory_object_control_t mo_control;
484 upl_t upl;
485 int upl_flags;
486 upl_size_t upl_size;
487 upl_page_info_t *upl_pl;
488 unsigned int pl_count;
489 vm_object_t src_top_object, src_page_object, dst_object;
490 kern_return_t kr, retval;
491 vm_offset_t src_vaddr, dst_vaddr;
492 vm_offset_t cur_offset;
493 vm_offset_t offset_in_page;
494 kern_return_t error_code;
495 vm_prot_t prot;
496 vm_page_t src_page, top_page;
497 int interruptible;
498 struct vm_object_fault_info fault_info;
499 mach_vm_offset_t slide_start_address;
d9a64523
A
500
501 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
502
503 retval = KERN_SUCCESS;
504 src_top_object = VM_OBJECT_NULL;
505 src_page_object = VM_OBJECT_NULL;
506 upl = NULL;
507 upl_pl = NULL;
508 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
509 fault_info.stealth = TRUE;
510 fault_info.io_sync = FALSE;
511 fault_info.mark_zf_absent = FALSE;
512 fault_info.batch_pmap_op = FALSE;
513 interruptible = fault_info.interruptible;
514
515 pager = shared_region_pager_lookup(mem_obj);
f427ee49
A
516 assert(pager->srp_is_ready);
517 assert(pager->srp_ref_count > 1); /* pager is alive */
518 assert(pager->srp_is_mapped); /* pager is mapped */
d9a64523
A
519
520 PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
521
522 /*
523 * Gather in a UPL all the VM pages requested by VM.
524 */
f427ee49 525 mo_control = pager->srp_header.mo_control;
d9a64523
A
526
527 upl_size = length;
528 upl_flags =
0a7de745
A
529 UPL_RET_ONLY_ABSENT |
530 UPL_SET_LITE |
531 UPL_NO_SYNC |
532 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
533 UPL_SET_INTERNAL;
d9a64523
A
534 pl_count = 0;
535 kr = memory_object_upl_request(mo_control,
0a7de745
A
536 offset, upl_size,
537 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
d9a64523
A
538 if (kr != KERN_SUCCESS) {
539 retval = kr;
540 goto done;
541 }
542 dst_object = mo_control->moc_object;
543 assert(dst_object != VM_OBJECT_NULL);
544
545 /*
546 * We'll map the original data in the kernel address space from the
547 * backing VM object (itself backed by the shared cache file via
548 * the vnode pager).
549 */
f427ee49 550 src_top_object = pager->srp_backing_object;
d9a64523
A
551 assert(src_top_object != VM_OBJECT_NULL);
552 vm_object_reference(src_top_object); /* keep the source object alive */
553
f427ee49 554 slide_start_address = pager->srp_slide_info->si_slid_address;
d9a64523 555
f427ee49
A
556 fault_info.lo_offset += pager->srp_backing_offset;
557 fault_info.hi_offset += pager->srp_backing_offset;
d9a64523
A
558
559 /*
560 * Fill in the contents of the pages requested by VM.
561 */
562 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
563 pl_count = length / PAGE_SIZE;
564 for (cur_offset = 0;
0a7de745
A
565 retval == KERN_SUCCESS && cur_offset < length;
566 cur_offset += PAGE_SIZE) {
d9a64523
A
567 ppnum_t dst_pnum;
568
569 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
570 /* this page is not in the UPL: skip it */
571 continue;
572 }
573
574 /*
575 * Map the source (dyld shared cache) page in the kernel's
576 * virtual address space.
577 * We already hold a reference on the src_top_object.
578 */
0a7de745 579retry_src_fault:
d9a64523
A
580 vm_object_lock(src_top_object);
581 vm_object_paging_begin(src_top_object);
582 error_code = 0;
583 prot = VM_PROT_READ;
584 src_page = VM_PAGE_NULL;
585 kr = vm_fault_page(src_top_object,
f427ee49 586 pager->srp_backing_offset + offset + cur_offset,
0a7de745
A
587 VM_PROT_READ,
588 FALSE,
589 FALSE, /* src_page not looked up */
590 &prot,
591 &src_page,
592 &top_page,
593 NULL,
594 &error_code,
595 FALSE,
596 FALSE,
597 &fault_info);
d9a64523
A
598 switch (kr) {
599 case VM_FAULT_SUCCESS:
600 break;
601 case VM_FAULT_RETRY:
602 goto retry_src_fault;
603 case VM_FAULT_MEMORY_SHORTAGE:
604 if (vm_page_wait(interruptible)) {
605 goto retry_src_fault;
606 }
f427ee49 607 OS_FALLTHROUGH;
d9a64523
A
608 case VM_FAULT_INTERRUPTED:
609 retval = MACH_SEND_INTERRUPTED;
610 goto done;
611 case VM_FAULT_SUCCESS_NO_VM_PAGE:
612 /* success but no VM page: fail */
613 vm_object_paging_end(src_top_object);
614 vm_object_unlock(src_top_object);
f427ee49 615 OS_FALLTHROUGH;
d9a64523
A
616 case VM_FAULT_MEMORY_ERROR:
617 /* the page is not there ! */
618 if (error_code) {
619 retval = error_code;
620 } else {
621 retval = KERN_MEMORY_ERROR;
622 }
623 goto done;
624 default:
625 panic("shared_region_pager_data_request: "
0a7de745
A
626 "vm_fault_page() unexpected error 0x%x\n",
627 kr);
d9a64523
A
628 }
629 assert(src_page != VM_PAGE_NULL);
630 assert(src_page->vmp_busy);
631
632 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
633 vm_page_lockspin_queues();
634 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
0a7de745 635 vm_page_speculate(src_page, FALSE);
d9a64523
A
636 }
637 vm_page_unlock_queues();
638 }
639
640 /*
641 * Establish pointers to the source
642 * and destination physical pages.
643 */
644 dst_pnum = (ppnum_t)
0a7de745
A
645 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
646 assert(dst_pnum != 0);
d9a64523 647
d9a64523 648 src_vaddr = (vm_map_offset_t)
0a7de745
A
649 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
650 << PAGE_SHIFT);
d9a64523 651 dst_vaddr = (vm_map_offset_t)
0a7de745 652 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
d9a64523
A
653 src_page_object = VM_PAGE_OBJECT(src_page);
654
655 /*
656 * Validate the original page...
657 */
658 if (src_page_object->code_signed) {
659 vm_page_validate_cs_mapped(
f427ee49 660 src_page, PAGE_SIZE, 0,
d9a64523
A
661 (const void *) src_vaddr);
662 }
663 /*
664 * ... and transfer the results to the destination page.
665 */
666 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 667 src_page->vmp_cs_validated);
d9a64523 668 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 669 src_page->vmp_cs_tainted);
d9a64523 670 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
0a7de745 671 src_page->vmp_cs_nx);
d9a64523
A
672
673 /*
674 * The page provider might access a mapped file, so let's
675 * release the object lock for the source page to avoid a
676 * potential deadlock.
677 * The source page is kept busy and we have a
678 * "paging_in_progress" reference on its object, so it's safe
679 * to unlock the object here.
680 */
681 assert(src_page->vmp_busy);
682 assert(src_page_object->paging_in_progress > 0);
683 vm_object_unlock(src_page_object);
684
685 /*
686 * Process the original contents of the source page
687 * into the destination page.
688 */
689 for (offset_in_page = 0;
0a7de745
A
690 offset_in_page < PAGE_SIZE;
691 offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) {
d9a64523
A
692 vm_object_offset_t chunk_offset;
693 vm_object_offset_t offset_in_backing_object;
694 vm_object_offset_t offset_in_sliding_range;
695
696 chunk_offset = offset + cur_offset + offset_in_page;
697
698 bcopy((const char *)(src_vaddr +
0a7de745
A
699 offset_in_page),
700 (char *)(dst_vaddr + offset_in_page),
701 PAGE_SIZE_FOR_SR_SLIDE);
d9a64523
A
702
703 offset_in_backing_object = (chunk_offset +
f427ee49
A
704 pager->srp_backing_offset);
705 if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
706 (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
d9a64523
A
707 /* chunk is outside of sliding range: done */
708 shared_region_pager_copied++;
709 continue;
710 }
711
f427ee49
A
712 offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
713 kr = vm_shared_region_slide_page(pager->srp_slide_info,
714 dst_vaddr + offset_in_page,
715 (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
716 (uint32_t) (offset_in_sliding_range / PAGE_SIZE_FOR_SR_SLIDE),
717#if __has_feature(ptrauth_calls)
718 pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
719#else /* __has_feature(ptrauth_calls) */
720 0
721#endif /* __has_feature(ptrauth_calls) */
722 );
d9a64523
A
723 if (shared_region_pager_data_request_debug) {
724 printf("shared_region_data_request"
0a7de745
A
725 "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
726 "in sliding range [0x%llx:0x%llx]: "
727 "SLIDE offset 0x%llx="
728 "(0x%llx+0x%llx+0x%llx+0x%04llx)"
729 "[0x%016llx 0x%016llx] "
730 "code_signed=%d "
731 "cs_validated=%d "
732 "cs_tainted=%d "
733 "cs_nx=%d "
734 "kr=0x%x\n",
735 pager,
736 offset,
737 (uint64_t) cur_offset,
738 (uint64_t) offset_in_page,
739 chunk_offset,
f427ee49
A
740 pager->srp_slide_info->si_start,
741 pager->srp_slide_info->si_end,
742 (pager->srp_backing_offset +
0a7de745
A
743 offset +
744 cur_offset +
745 offset_in_page),
f427ee49 746 pager->srp_backing_offset,
0a7de745
A
747 offset,
748 (uint64_t) cur_offset,
749 (uint64_t) offset_in_page,
750 *(uint64_t *)(dst_vaddr + offset_in_page),
751 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
752 src_page_object->code_signed,
753 src_page->vmp_cs_validated,
754 src_page->vmp_cs_tainted,
755 src_page->vmp_cs_nx,
756 kr);
d9a64523
A
757 }
758 if (kr != KERN_SUCCESS) {
759 shared_region_pager_slid_error++;
760 break;
761 }
762 shared_region_pager_slid++;
763 }
764
765 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
766 assert(src_page->vmp_busy);
767 assert(src_page_object->paging_in_progress > 0);
768 vm_object_lock(src_page_object);
769
770 /*
771 * Cleanup the result of vm_fault_page() of the source page.
772 */
773 PAGE_WAKEUP_DONE(src_page);
774 src_page = VM_PAGE_NULL;
775 vm_object_paging_end(src_page_object);
776 vm_object_unlock(src_page_object);
777
778 if (top_page != VM_PAGE_NULL) {
779 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
780 vm_object_lock(src_top_object);
781 VM_PAGE_FREE(top_page);
782 vm_object_paging_end(src_top_object);
783 vm_object_unlock(src_top_object);
784 }
785 }
786
787done:
788 if (upl != NULL) {
789 /* clean up the UPL */
790
791 /*
792 * The pages are currently dirty because we've just been
793 * writing on them, but as far as we're concerned, they're
794 * clean since they contain their "original" contents as
795 * provided by us, the pager.
796 * Tell the UPL to mark them "clean".
797 */
798 upl_clear_dirty(upl, TRUE);
799
800 /* abort or commit the UPL */
801 if (retval != KERN_SUCCESS) {
802 upl_abort(upl, 0);
803 } else {
804 boolean_t empty;
f427ee49
A
805 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
806 "upl %p offset 0x%llx size 0x%x\n",
807 upl, upl->u_offset, upl->u_size);
808 upl_commit_range(upl, 0, upl->u_size,
0a7de745
A
809 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
810 upl_pl, pl_count, &empty);
d9a64523
A
811 }
812
813 /* and deallocate the UPL */
814 upl_deallocate(upl);
815 upl = NULL;
816 }
817 if (src_top_object != VM_OBJECT_NULL) {
818 vm_object_deallocate(src_top_object);
819 }
820 return retval;
821}
822
823/*
824 * shared_region_pager_reference()
825 *
826 * Get a reference on this memory object.
827 * For external usage only. Assumes that the initial reference count is not 0,
828 * i.e one should not "revive" a dead pager this way.
829 */
830void
831shared_region_pager_reference(
0a7de745 832 memory_object_t mem_obj)
d9a64523 833{
0a7de745 834 shared_region_pager_t pager;
d9a64523
A
835
836 pager = shared_region_pager_lookup(mem_obj);
837
838 lck_mtx_lock(&shared_region_pager_lock);
f427ee49
A
839 assert(pager->srp_ref_count > 0);
840 pager->srp_ref_count++;
d9a64523
A
841 lck_mtx_unlock(&shared_region_pager_lock);
842}
843
844
845/*
846 * shared_region_pager_dequeue:
847 *
848 * Removes a pager from the list of pagers.
849 *
850 * The caller must hold "shared_region_pager_lock".
851 */
852void
853shared_region_pager_dequeue(
854 shared_region_pager_t pager)
855{
f427ee49 856 assert(!pager->srp_is_mapped);
d9a64523
A
857
858 queue_remove(&shared_region_pager_queue,
0a7de745
A
859 pager,
860 shared_region_pager_t,
f427ee49
A
861 srp_queue);
862 pager->srp_queue.next = NULL;
863 pager->srp_queue.prev = NULL;
d9a64523
A
864
865 shared_region_pager_count--;
866}
867
868/*
869 * shared_region_pager_terminate_internal:
870 *
871 * Trigger the asynchronous termination of the memory object associated
872 * with this pager.
873 * When the memory object is terminated, there will be one more call
874 * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
875 * to finish the clean up.
876 *
877 * "shared_region_pager_lock" should not be held by the caller.
878 * We don't need the lock because the pager has already been removed from
879 * the pagers' list and is now ours exclusively.
880 */
881void
882shared_region_pager_terminate_internal(
883 shared_region_pager_t pager)
884{
f427ee49
A
885 assert(pager->srp_is_ready);
886 assert(!pager->srp_is_mapped);
887 assert(pager->srp_ref_count == 1);
d9a64523 888
f427ee49
A
889 if (pager->srp_backing_object != VM_OBJECT_NULL) {
890 vm_object_deallocate(pager->srp_backing_object);
891 pager->srp_backing_object = VM_OBJECT_NULL;
d9a64523
A
892 }
893 /* trigger the destruction of the memory object */
f427ee49 894 memory_object_destroy(pager->srp_header.mo_control, 0);
d9a64523
A
895}
896
897/*
898 * shared_region_pager_deallocate_internal()
899 *
f427ee49
A
900 * Release a reference on this pager and free it when the last reference goes away.
901 * Can be called with shared_region_pager_lock held or not, but always returns
d9a64523
A
902 * with it unlocked.
903 */
904void
905shared_region_pager_deallocate_internal(
0a7de745
A
906 shared_region_pager_t pager,
907 boolean_t locked)
d9a64523 908{
0a7de745
A
909 boolean_t needs_trimming;
910 int count_unmapped;
d9a64523 911
0a7de745 912 if (!locked) {
d9a64523
A
913 lck_mtx_lock(&shared_region_pager_lock);
914 }
915
f427ee49
A
916 /* if we have too many unmapped pagers, trim some */
917 count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
918 needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
d9a64523
A
919
920 /* drop a reference on this pager */
f427ee49
A
921 assert(pager->srp_ref_count > 0);
922 pager->srp_ref_count--;
d9a64523 923
f427ee49 924 if (pager->srp_ref_count == 1) {
d9a64523
A
925 /*
926 * Only the "named" reference is left, which means that
927 * no one is really holding on to this pager anymore.
928 * Terminate it.
929 */
930 shared_region_pager_dequeue(pager);
931 /* the pager is all ours: no need for the lock now */
932 lck_mtx_unlock(&shared_region_pager_lock);
933 shared_region_pager_terminate_internal(pager);
f427ee49 934 } else if (pager->srp_ref_count == 0) {
d9a64523
A
935 /*
936 * Dropped the existence reference; the memory object has
937 * been terminated. Do some final cleanup and release the
938 * pager structure.
939 */
940 lck_mtx_unlock(&shared_region_pager_lock);
f427ee49
A
941
942 vm_shared_region_slide_info_t si = pager->srp_slide_info;
943#if __has_feature(ptrauth_calls)
944 /*
945 * The slide_info for auth sections lives in the shared region.
946 * Just deallocate() on the shared region and clear the field.
947 */
948 if (si != NULL) {
949 if (si->si_shared_region != NULL) {
950 assert(si->si_ptrauth);
951 vm_shared_region_deallocate(si->si_shared_region);
952 pager->srp_slide_info = NULL;
953 si = NULL;
954 }
955 }
956#endif /* __has_feature(ptrauth_calls) */
957 if (si != NULL) {
958 vm_object_deallocate(si->si_slide_object);
959 /* free the slide_info_entry */
960 kheap_free(KHEAP_DATA_BUFFERS, si->si_slide_info_entry, si->si_slide_info_size);
961 kfree(si, sizeof *si);
962 pager->srp_slide_info = NULL;
963 }
964
965 if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
966 memory_object_control_deallocate(pager->srp_header.mo_control);
967 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
d9a64523 968 }
0a7de745 969 kfree(pager, sizeof(*pager));
d9a64523
A
970 pager = SHARED_REGION_PAGER_NULL;
971 } else {
972 /* there are still plenty of references: keep going... */
973 lck_mtx_unlock(&shared_region_pager_lock);
974 }
975
976 if (needs_trimming) {
977 shared_region_pager_trim();
978 }
979 /* caution: lock is not held on return... */
980}
981
982/*
983 * shared_region_pager_deallocate()
984 *
985 * Release a reference on this pager and free it when the last
986 * reference goes away.
987 */
988void
989shared_region_pager_deallocate(
0a7de745 990 memory_object_t mem_obj)
d9a64523 991{
0a7de745 992 shared_region_pager_t pager;
d9a64523
A
993
994 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
995 pager = shared_region_pager_lookup(mem_obj);
996 shared_region_pager_deallocate_internal(pager, FALSE);
997}
998
999/*
1000 *
1001 */
1002kern_return_t
1003shared_region_pager_terminate(
1004#if !DEBUG
1005 __unused
1006#endif
0a7de745 1007 memory_object_t mem_obj)
d9a64523
A
1008{
1009 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1010
1011 return KERN_SUCCESS;
1012}
1013
1014/*
1015 *
1016 */
1017kern_return_t
1018shared_region_pager_synchronize(
f427ee49 1019 __unused memory_object_t mem_obj,
0a7de745 1020 __unused memory_object_offset_t offset,
f427ee49 1021 __unused memory_object_size_t length,
0a7de745 1022 __unused vm_sync_t sync_flags)
d9a64523
A
1023{
1024 panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n");
1025 return KERN_FAILURE;
1026}
1027
1028/*
1029 * shared_region_pager_map()
1030 *
1031 * This allows VM to let us, the EMM, know that this memory object
1032 * is currently mapped one or more times. This is called by VM each time
f427ee49
A
1033 * the memory object gets mapped, but we only take one extra reference the
1034 * first time it is called.
d9a64523
A
1035 */
1036kern_return_t
1037shared_region_pager_map(
0a7de745
A
1038 memory_object_t mem_obj,
1039 __unused vm_prot_t prot)
d9a64523 1040{
0a7de745 1041 shared_region_pager_t pager;
d9a64523
A
1042
1043 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1044
1045 pager = shared_region_pager_lookup(mem_obj);
1046
1047 lck_mtx_lock(&shared_region_pager_lock);
f427ee49
A
1048 assert(pager->srp_is_ready);
1049 assert(pager->srp_ref_count > 0); /* pager is alive */
1050 if (!pager->srp_is_mapped) {
1051 pager->srp_is_mapped = TRUE;
1052 pager->srp_ref_count++;
d9a64523
A
1053 shared_region_pager_count_mapped++;
1054 }
1055 lck_mtx_unlock(&shared_region_pager_lock);
1056
1057 return KERN_SUCCESS;
1058}
1059
1060/*
1061 * shared_region_pager_last_unmap()
1062 *
1063 * This is called by VM when this memory object is no longer mapped anywhere.
1064 */
1065kern_return_t
1066shared_region_pager_last_unmap(
0a7de745 1067 memory_object_t mem_obj)
d9a64523 1068{
0a7de745
A
1069 shared_region_pager_t pager;
1070 int count_unmapped;
d9a64523
A
1071
1072 PAGER_DEBUG(PAGER_ALL,
0a7de745 1073 ("shared_region_pager_last_unmap: %p\n", mem_obj));
d9a64523
A
1074
1075 pager = shared_region_pager_lookup(mem_obj);
1076
1077 lck_mtx_lock(&shared_region_pager_lock);
f427ee49 1078 if (pager->srp_is_mapped) {
d9a64523
A
1079 /*
1080 * All the mappings are gone, so let go of the one extra
1081 * reference that represents all the mappings of this pager.
1082 */
1083 shared_region_pager_count_mapped--;
f427ee49 1084 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
d9a64523
A
1085 if (count_unmapped > shared_region_pager_count_unmapped_max) {
1086 shared_region_pager_count_unmapped_max = count_unmapped;
1087 }
f427ee49 1088 pager->srp_is_mapped = FALSE;
d9a64523
A
1089 shared_region_pager_deallocate_internal(pager, TRUE);
1090 /* caution: deallocate_internal() released the lock ! */
1091 } else {
1092 lck_mtx_unlock(&shared_region_pager_lock);
1093 }
1094
1095 return KERN_SUCCESS;
1096}
1097
1098
1099/*
1100 *
1101 */
1102shared_region_pager_t
1103shared_region_pager_lookup(
0a7de745 1104 memory_object_t mem_obj)
d9a64523 1105{
0a7de745 1106 shared_region_pager_t pager;
d9a64523
A
1107
1108 assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1109 pager = (shared_region_pager_t)(uintptr_t) mem_obj;
f427ee49 1110 assert(pager->srp_ref_count > 0);
d9a64523
A
1111 return pager;
1112}
1113
f427ee49
A
1114/*
1115 * Create and return a pager for the given object with the
1116 * given slide information.
1117 */
1118static shared_region_pager_t
d9a64523 1119shared_region_pager_create(
0a7de745
A
1120 vm_object_t backing_object,
1121 vm_object_offset_t backing_offset,
f427ee49
A
1122 struct vm_shared_region_slide_info *slide_info,
1123#if !__has_feature(ptrauth_calls)
1124 __unused
1125#endif /* !__has_feature(ptrauth_calls) */
1126 uint64_t jop_key)
d9a64523 1127{
0a7de745
A
1128 shared_region_pager_t pager;
1129 memory_object_control_t control;
1130 kern_return_t kr;
cb323159 1131 vm_object_t object;
d9a64523 1132
0a7de745 1133 pager = (shared_region_pager_t) kalloc(sizeof(*pager));
d9a64523
A
1134 if (pager == SHARED_REGION_PAGER_NULL) {
1135 return SHARED_REGION_PAGER_NULL;
1136 }
1137
1138 /*
1139 * The vm_map call takes both named entry ports and raw memory
1140 * objects in the same parameter. We need to make sure that
1141 * vm_map does not see this object as a named entry port. So,
1142 * we reserve the first word in the object for a fake ip_kotype
1143 * setting - that will tell vm_map to use it as a memory object.
1144 */
f427ee49
A
1145 pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1146 pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1147 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1148
1149 pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1150 pager->srp_ref_count = 1; /* existence reference (for the cache) */
1151 pager->srp_ref_count++; /* for the caller */
1152 pager->srp_is_mapped = FALSE;
1153 pager->srp_backing_object = backing_object;
1154 pager->srp_backing_offset = backing_offset;
1155 pager->srp_slide_info = slide_info;
1156#if __has_feature(ptrauth_calls)
1157 pager->srp_jop_key = jop_key;
1158 /*
1159 * If we're getting slide_info from the shared_region,
1160 * take a reference, so it can't disappear from under us.
1161 */
1162 if (slide_info->si_shared_region) {
1163 assert(slide_info->si_ptrauth);
1164 vm_shared_region_reference(slide_info->si_shared_region);
1165 }
1166#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
1167
1168 vm_object_reference(backing_object);
1169
1170 lck_mtx_lock(&shared_region_pager_lock);
1171 /* enter new pager at the head of our list of pagers */
1172 queue_enter_first(&shared_region_pager_queue,
0a7de745
A
1173 pager,
1174 shared_region_pager_t,
f427ee49 1175 srp_queue);
d9a64523
A
1176 shared_region_pager_count++;
1177 if (shared_region_pager_count > shared_region_pager_count_max) {
1178 shared_region_pager_count_max = shared_region_pager_count;
1179 }
1180 lck_mtx_unlock(&shared_region_pager_lock);
1181
1182 kr = memory_object_create_named((memory_object_t) pager,
0a7de745
A
1183 0,
1184 &control);
d9a64523
A
1185 assert(kr == KERN_SUCCESS);
1186
cb323159
A
1187 memory_object_mark_trusted(control);
1188
d9a64523
A
1189 lck_mtx_lock(&shared_region_pager_lock);
1190 /* the new pager is now ready to be used */
f427ee49 1191 pager->srp_is_ready = TRUE;
cb323159
A
1192 object = memory_object_to_vm_object((memory_object_t) pager);
1193 assert(object);
1194 /*
1195 * No one knows about this object and so we get away without the object lock.
1196 * This object is _eventually_ backed by the dyld shared cache and so we want
1197 * to benefit from the lock priority boosting.
1198 */
1199 object->object_is_shared_cache = TRUE;
d9a64523
A
1200 lck_mtx_unlock(&shared_region_pager_lock);
1201
1202 /* wakeup anyone waiting for this pager to be ready */
f427ee49 1203 thread_wakeup(&pager->srp_is_ready);
d9a64523
A
1204
1205 return pager;
1206}
1207
1208/*
1209 * shared_region_pager_setup()
1210 *
1211 * Provide the caller with a memory object backed by the provided
1212 * "backing_object" VM object.
1213 */
1214memory_object_t
1215shared_region_pager_setup(
0a7de745
A
1216 vm_object_t backing_object,
1217 vm_object_offset_t backing_offset,
f427ee49
A
1218 struct vm_shared_region_slide_info *slide_info,
1219 uint64_t jop_key)
d9a64523 1220{
0a7de745 1221 shared_region_pager_t pager;
d9a64523
A
1222
1223 /* create new pager */
f427ee49
A
1224 pager = shared_region_pager_create(backing_object,
1225 backing_offset, slide_info, jop_key);
d9a64523
A
1226 if (pager == SHARED_REGION_PAGER_NULL) {
1227 /* could not create a new pager */
1228 return MEMORY_OBJECT_NULL;
1229 }
1230
1231 lck_mtx_lock(&shared_region_pager_lock);
f427ee49 1232 while (!pager->srp_is_ready) {
d9a64523 1233 lck_mtx_sleep(&shared_region_pager_lock,
0a7de745 1234 LCK_SLEEP_DEFAULT,
f427ee49 1235 &pager->srp_is_ready,
0a7de745 1236 THREAD_UNINT);
d9a64523
A
1237 }
1238 lck_mtx_unlock(&shared_region_pager_lock);
1239
1240 return (memory_object_t) pager;
1241}
1242
f427ee49
A
1243#if __has_feature(ptrauth_calls)
1244/*
1245 * shared_region_pager_match()
1246 *
1247 * Provide the caller with a memory object backed by the provided
1248 * "backing_object" VM object.
1249 */
1250memory_object_t
1251shared_region_pager_match(
1252 vm_object_t backing_object,
1253 vm_object_offset_t backing_offset,
1254 vm_shared_region_slide_info_t slide_info,
1255 uint64_t jop_key)
1256{
1257 shared_region_pager_t pager;
1258 vm_shared_region_slide_info_t si;
1259
1260 lck_mtx_lock(&shared_region_pager_lock);
1261 queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1262 if (pager->srp_backing_object != backing_object->copy) {
1263 continue;
1264 }
1265 if (pager->srp_backing_offset != backing_offset) {
1266 continue;
1267 }
1268 si = pager->srp_slide_info;
1269
1270 /* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1271 if (!si->si_ptrauth) {
1272 continue;
1273 }
1274 if (pager->srp_jop_key != jop_key) {
1275 continue;
1276 }
1277 if (si->si_slide != slide_info->si_slide) {
1278 continue;
1279 }
1280 if (si->si_start != slide_info->si_start) {
1281 continue;
1282 }
1283 if (si->si_end != slide_info->si_end) {
1284 continue;
1285 }
1286 if (si->si_slide_object != slide_info->si_slide_object) {
1287 continue;
1288 }
1289 if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1290 continue;
1291 }
1292 if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1293 continue;
1294 }
1295 ++pager->srp_ref_count; /* the caller expects a reference on this */
1296 lck_mtx_unlock(&shared_region_pager_lock);
1297 return (memory_object_t)pager;
1298 }
1299
1300 /*
1301 * We didn't find a pre-existing pager, so create one.
1302 *
1303 * Note slight race condition here since we drop the lock. This could lead to more than one
1304 * thread calling setup with the same arguments here. That shouldn't break anything, just
1305 * waste a little memory.
1306 */
1307 lck_mtx_unlock(&shared_region_pager_lock);
1308 return shared_region_pager_setup(backing_object->copy, backing_offset, slide_info, jop_key);
1309}
1310
1311void
1312shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1313{
1314 __unused shared_region_pager_t pager = (shared_region_pager_t)memobj;
1315
1316 assert(pager->srp_jop_key == task->jop_pid);
1317}
1318#endif /* __has_feature(ptrauth_calls) */
1319
d9a64523
A
1320void
1321shared_region_pager_trim(void)
1322{
0a7de745
A
1323 shared_region_pager_t pager, prev_pager;
1324 queue_head_t trim_queue;
1325 int num_trim;
1326 int count_unmapped;
d9a64523
A
1327
1328 lck_mtx_lock(&shared_region_pager_lock);
1329
1330 /*
1331 * We have too many pagers, try and trim some unused ones,
1332 * starting with the oldest pager at the end of the queue.
1333 */
1334 queue_init(&trim_queue);
1335 num_trim = 0;
1336
f427ee49
A
1337 for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1338 !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
0a7de745 1339 pager = prev_pager) {
d9a64523 1340 /* get prev elt before we dequeue */
f427ee49 1341 prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
d9a64523 1342
f427ee49
A
1343 if (pager->srp_ref_count == 2 &&
1344 pager->srp_is_ready &&
1345 !pager->srp_is_mapped) {
d9a64523
A
1346 /* this pager can be trimmed */
1347 num_trim++;
1348 /* remove this pager from the main list ... */
1349 shared_region_pager_dequeue(pager);
1350 /* ... and add it to our trim queue */
1351 queue_enter_first(&trim_queue,
0a7de745
A
1352 pager,
1353 shared_region_pager_t,
f427ee49 1354 srp_queue);
d9a64523 1355
f427ee49
A
1356 /* do we have enough pagers to trim? */
1357 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
d9a64523 1358 if (count_unmapped <= shared_region_pager_cache_limit) {
d9a64523
A
1359 break;
1360 }
1361 }
1362 }
1363 if (num_trim > shared_region_pager_num_trim_max) {
1364 shared_region_pager_num_trim_max = num_trim;
1365 }
1366 shared_region_pager_num_trim_total += num_trim;
1367
1368 lck_mtx_unlock(&shared_region_pager_lock);
1369
1370 /* terminate the trimmed pagers */
1371 while (!queue_empty(&trim_queue)) {
1372 queue_remove_first(&trim_queue,
0a7de745
A
1373 pager,
1374 shared_region_pager_t,
f427ee49
A
1375 srp_queue);
1376 pager->srp_queue.next = NULL;
1377 pager->srp_queue.prev = NULL;
1378 assert(pager->srp_ref_count == 2);
d9a64523
A
1379 /*
1380 * We can't call deallocate_internal() because the pager
1381 * has already been dequeued, but we still need to remove
1382 * a reference.
1383 */
f427ee49 1384 pager->srp_ref_count--;
d9a64523
A
1385 shared_region_pager_terminate_internal(pager);
1386 }
1387}