]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2018-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/errno.h> | |
30 | ||
31 | #include <mach/mach_types.h> | |
32 | #include <mach/mach_traps.h> | |
33 | #include <mach/host_priv.h> | |
34 | #include <mach/kern_return.h> | |
35 | #include <mach/memory_object_control.h> | |
36 | #include <mach/memory_object_types.h> | |
37 | #include <mach/port.h> | |
38 | #include <mach/policy.h> | |
39 | #include <mach/upl.h> | |
40 | #include <mach/thread_act.h> | |
41 | #include <mach/mach_vm.h> | |
42 | ||
43 | #include <kern/host.h> | |
44 | #include <kern/kalloc.h> | |
45 | #include <kern/queue.h> | |
46 | #include <kern/thread.h> | |
47 | #include <kern/ipc_kobject.h> | |
48 | ||
49 | #include <ipc/ipc_port.h> | |
50 | #include <ipc/ipc_space.h> | |
51 | ||
52 | #include <vm/memory_object.h> | |
53 | #include <vm/vm_kern.h> | |
54 | #include <vm/vm_fault.h> | |
55 | #include <vm/vm_map.h> | |
56 | #include <vm/vm_pageout.h> | |
57 | #include <vm/vm_protos.h> | |
58 | #include <vm/vm_shared_region.h> | |
59 | ||
60 | #if __has_feature(ptrauth_calls) | |
61 | #include <ptrauth.h> | |
62 | extern boolean_t diversify_user_jop; | |
63 | #endif /* __has_feature(ptrauth_calls) */ | |
64 | ||
65 | /* | |
66 | * SHARED REGION MEMORY PAGER | |
67 | * | |
68 | * This external memory manager (EMM) handles mappings of a dyld shared cache | |
69 | * in shared regions, applying any necessary modifications (sliding, | |
70 | * pointer signing, ...). | |
71 | * | |
72 | * It mostly handles page-in requests (from memory_object_data_request()) by | |
73 | * getting the original data from its backing VM object, itself backed by | |
74 | * the dyld shared cache file, modifying it if needed and providing it to VM. | |
75 | * | |
76 | * The modified pages will never be dirtied, so the memory manager doesn't | |
77 | * need to handle page-out requests (from memory_object_data_return()). The | |
78 | * pages need to be mapped copy-on-write, so that the originals stay clean. | |
79 | * | |
80 | * We don't expect to have to handle a large number of shared cache files, | |
81 | * so the data structures are very simple (simple linked list) for now. | |
82 | */ | |
83 | ||
84 | /* forward declarations */ | |
85 | void shared_region_pager_reference(memory_object_t mem_obj); | |
86 | void shared_region_pager_deallocate(memory_object_t mem_obj); | |
87 | kern_return_t shared_region_pager_init(memory_object_t mem_obj, | |
88 | memory_object_control_t control, | |
89 | memory_object_cluster_size_t pg_size); | |
90 | kern_return_t shared_region_pager_terminate(memory_object_t mem_obj); | |
91 | kern_return_t shared_region_pager_data_request(memory_object_t mem_obj, | |
92 | memory_object_offset_t offset, | |
93 | memory_object_cluster_size_t length, | |
94 | vm_prot_t protection_required, | |
95 | memory_object_fault_info_t fault_info); | |
96 | kern_return_t shared_region_pager_data_return(memory_object_t mem_obj, | |
97 | memory_object_offset_t offset, | |
98 | memory_object_cluster_size_t data_cnt, | |
99 | memory_object_offset_t *resid_offset, | |
100 | int *io_error, | |
101 | boolean_t dirty, | |
102 | boolean_t kernel_copy, | |
103 | int upl_flags); | |
104 | kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj, | |
105 | memory_object_offset_t offset, | |
106 | memory_object_cluster_size_t data_cnt); | |
107 | kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj, | |
108 | memory_object_offset_t offset, | |
109 | memory_object_size_t size, | |
110 | vm_prot_t desired_access); | |
111 | kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj, | |
112 | memory_object_offset_t offset, | |
113 | memory_object_size_t length, | |
114 | vm_sync_t sync_flags); | |
115 | kern_return_t shared_region_pager_map(memory_object_t mem_obj, | |
116 | vm_prot_t prot); | |
117 | kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj); | |
118 | boolean_t shared_region_pager_backing_object( | |
119 | memory_object_t mem_obj, | |
120 | memory_object_offset_t mem_obj_offset, | |
121 | vm_object_t *backing_object, | |
122 | vm_object_offset_t *backing_offset); | |
123 | ||
124 | /* | |
125 | * Vector of VM operations for this EMM. | |
126 | * These routines are invoked by VM via the memory_object_*() interfaces. | |
127 | */ | |
128 | const struct memory_object_pager_ops shared_region_pager_ops = { | |
129 | .memory_object_reference = shared_region_pager_reference, | |
130 | .memory_object_deallocate = shared_region_pager_deallocate, | |
131 | .memory_object_init = shared_region_pager_init, | |
132 | .memory_object_terminate = shared_region_pager_terminate, | |
133 | .memory_object_data_request = shared_region_pager_data_request, | |
134 | .memory_object_data_return = shared_region_pager_data_return, | |
135 | .memory_object_data_initialize = shared_region_pager_data_initialize, | |
136 | .memory_object_data_unlock = shared_region_pager_data_unlock, | |
137 | .memory_object_synchronize = shared_region_pager_synchronize, | |
138 | .memory_object_map = shared_region_pager_map, | |
139 | .memory_object_last_unmap = shared_region_pager_last_unmap, | |
140 | .memory_object_data_reclaim = NULL, | |
141 | .memory_object_backing_object = shared_region_pager_backing_object, | |
142 | .memory_object_pager_name = "shared_region" | |
143 | }; | |
144 | ||
145 | #if __has_feature(ptrauth_calls) | |
146 | /* | |
147 | * Track mappings between shared_region_id and the key used to sign | |
148 | * authenticated pointers. | |
149 | */ | |
150 | typedef struct shared_region_jop_key_map { | |
151 | queue_chain_t srk_queue; | |
152 | char *srk_shared_region_id; | |
153 | uint64_t srk_jop_key; | |
154 | os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */ | |
155 | } *shared_region_jop_key_map_t; | |
156 | ||
157 | os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL); | |
158 | ||
159 | /* | |
160 | * The list is protected by the "shared_region_key_map" lock. | |
161 | */ | |
162 | int shared_region_key_count = 0; /* number of active shared_region_id keys */ | |
163 | queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue); | |
164 | LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key"); | |
165 | LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp); | |
166 | ||
167 | /* | |
168 | * Find the pointer signing key for the give shared_region_id. | |
169 | */ | |
170 | uint64_t | |
171 | shared_region_find_key(char *shared_region_id) | |
172 | { | |
173 | shared_region_jop_key_map_t region; | |
174 | uint64_t key; | |
175 | ||
176 | lck_mtx_lock(&shared_region_jop_key_lock); | |
177 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { | |
178 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { | |
179 | goto found; | |
180 | } | |
181 | } | |
182 | panic("shared_region_find_key() no key for region '%s'", shared_region_id); | |
183 | ||
184 | found: | |
185 | key = region->srk_jop_key; | |
186 | lck_mtx_unlock(&shared_region_jop_key_lock); | |
187 | return key; | |
188 | } | |
189 | ||
190 | /* | |
191 | * Return a authentication key to use for the given shared_region_id. | |
192 | * If inherit is TRUE, then the key must match inherited_key. | |
193 | * Creates an additional reference when successful. | |
194 | */ | |
195 | void | |
196 | shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key) | |
197 | { | |
198 | shared_region_jop_key_map_t region; | |
199 | shared_region_jop_key_map_t new = NULL; | |
200 | ||
201 | assert(shared_region_id != NULL); | |
202 | again: | |
203 | lck_mtx_lock(&shared_region_jop_key_lock); | |
204 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { | |
205 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { | |
206 | os_ref_retain_locked(®ion->srk_ref_count); | |
207 | goto done; | |
208 | } | |
209 | } | |
210 | ||
211 | /* | |
212 | * ID was not found, if first time, allocate a new one and redo the lookup. | |
213 | */ | |
214 | if (new == NULL) { | |
215 | lck_mtx_unlock(&shared_region_jop_key_lock); | |
216 | new = kalloc(sizeof *new); | |
217 | uint_t len = strlen(shared_region_id) + 1; | |
218 | new->srk_shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK); | |
219 | strlcpy(new->srk_shared_region_id, shared_region_id, len); | |
220 | os_ref_init(&new->srk_ref_count, &srk_refgrp); | |
221 | ||
222 | if (diversify_user_jop && inherit) { | |
223 | new->srk_jop_key = inherited_key; | |
224 | } else if (diversify_user_jop && strlen(shared_region_id) > 0) { | |
225 | new->srk_jop_key = generate_jop_key(); | |
226 | } else { | |
227 | new->srk_jop_key = ml_default_jop_pid(); | |
228 | } | |
229 | ||
230 | goto again; | |
231 | } | |
232 | ||
233 | /* | |
234 | * Use the newly allocated entry | |
235 | */ | |
236 | ++shared_region_key_count; | |
237 | queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue); | |
238 | region = new; | |
239 | new = NULL; | |
240 | ||
241 | done: | |
242 | if (inherit && inherited_key != region->srk_jop_key) { | |
243 | panic("shared_region_key_alloc() inherited key mismatch"); | |
244 | } | |
245 | lck_mtx_unlock(&shared_region_jop_key_lock); | |
246 | ||
247 | /* | |
248 | * free any unused new entry | |
249 | */ | |
250 | if (new != NULL) { | |
251 | kheap_free(KHEAP_DATA_BUFFERS, new->srk_shared_region_id, strlen(new->srk_shared_region_id) + 1); | |
252 | kfree(new, sizeof *new); | |
253 | } | |
254 | } | |
255 | ||
256 | /* | |
257 | * Mark the end of using a shared_region_id's key | |
258 | */ | |
259 | extern void | |
260 | shared_region_key_dealloc(char *shared_region_id) | |
261 | { | |
262 | shared_region_jop_key_map_t region; | |
263 | ||
264 | assert(shared_region_id != NULL); | |
265 | lck_mtx_lock(&shared_region_jop_key_lock); | |
266 | queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { | |
267 | if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { | |
268 | goto done; | |
269 | } | |
270 | } | |
271 | panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id); | |
272 | ||
273 | done: | |
274 | if (os_ref_release_locked(®ion->srk_ref_count) == 0) { | |
275 | queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue); | |
276 | --shared_region_key_count; | |
277 | } else { | |
278 | region = NULL; | |
279 | } | |
280 | lck_mtx_unlock(&shared_region_jop_key_lock); | |
281 | ||
282 | if (region != NULL) { | |
283 | kheap_free(KHEAP_DATA_BUFFERS, region->srk_shared_region_id, strlen(region->srk_shared_region_id) + 1); | |
284 | kfree(region, sizeof *region); | |
285 | } | |
286 | } | |
287 | #endif /* __has_feature(ptrauth_calls) */ | |
288 | ||
289 | /* | |
290 | * The "shared_region_pager" describes a memory object backed by | |
291 | * the "shared_region" EMM. | |
292 | */ | |
293 | typedef struct shared_region_pager { | |
294 | struct memory_object srp_header; /* mandatory generic header */ | |
295 | ||
296 | /* pager-specific data */ | |
297 | queue_chain_t srp_queue; /* next & prev pagers */ | |
298 | #if MEMORY_OBJECT_HAS_REFCOUNT | |
299 | #define srp_ref_count srp_header.mo_ref | |
300 | #else | |
301 | os_ref_atomic_t srp_ref_count; /* active uses */ | |
302 | #endif | |
303 | bool srp_is_mapped; /* has active mappings */ | |
304 | bool srp_is_ready; /* is this pager ready? */ | |
305 | vm_object_t srp_backing_object; /* VM object for shared cache */ | |
306 | vm_object_offset_t srp_backing_offset; | |
307 | vm_shared_region_slide_info_t srp_slide_info; | |
308 | #if __has_feature(ptrauth_calls) | |
309 | uint64_t srp_jop_key; /* zero if used for arm64 */ | |
310 | #endif /* __has_feature(ptrauth_calls) */ | |
311 | } *shared_region_pager_t; | |
312 | #define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL) | |
313 | ||
314 | /* | |
315 | * List of memory objects managed by this EMM. | |
316 | * The list is protected by the "shared_region_pager_lock" lock. | |
317 | */ | |
318 | int shared_region_pager_count = 0; /* number of pagers */ | |
319 | int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */ | |
320 | queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue); | |
321 | LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager"); | |
322 | LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp); | |
323 | ||
324 | /* | |
325 | * Maximum number of unmapped pagers we're willing to keep around. | |
326 | */ | |
327 | int shared_region_pager_cache_limit = 0; | |
328 | ||
329 | /* | |
330 | * Statistics & counters. | |
331 | */ | |
332 | int shared_region_pager_count_max = 0; | |
333 | int shared_region_pager_count_unmapped_max = 0; | |
334 | int shared_region_pager_num_trim_max = 0; | |
335 | int shared_region_pager_num_trim_total = 0; | |
336 | ||
337 | uint64_t shared_region_pager_copied = 0; | |
338 | uint64_t shared_region_pager_slid = 0; | |
339 | uint64_t shared_region_pager_slid_error = 0; | |
340 | uint64_t shared_region_pager_reclaimed = 0; | |
341 | ||
342 | /* internal prototypes */ | |
343 | shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj); | |
344 | void shared_region_pager_dequeue(shared_region_pager_t pager); | |
345 | void shared_region_pager_deallocate_internal(shared_region_pager_t pager, | |
346 | boolean_t locked); | |
347 | void shared_region_pager_terminate_internal(shared_region_pager_t pager); | |
348 | void shared_region_pager_trim(void); | |
349 | ||
350 | ||
351 | #if DEBUG | |
352 | int shared_region_pagerdebug = 0; | |
353 | #define PAGER_ALL 0xffffffff | |
354 | #define PAGER_INIT 0x00000001 | |
355 | #define PAGER_PAGEIN 0x00000002 | |
356 | ||
357 | #define PAGER_DEBUG(LEVEL, A) \ | |
358 | MACRO_BEGIN \ | |
359 | if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \ | |
360 | printf A; \ | |
361 | } \ | |
362 | MACRO_END | |
363 | #else | |
364 | #define PAGER_DEBUG(LEVEL, A) | |
365 | #endif | |
366 | ||
367 | /* | |
368 | * shared_region_pager_init() | |
369 | * | |
370 | * Initialize the memory object and makes it ready to be used and mapped. | |
371 | */ | |
372 | kern_return_t | |
373 | shared_region_pager_init( | |
374 | memory_object_t mem_obj, | |
375 | memory_object_control_t control, | |
376 | #if !DEBUG | |
377 | __unused | |
378 | #endif | |
379 | memory_object_cluster_size_t pg_size) | |
380 | { | |
381 | shared_region_pager_t pager; | |
382 | kern_return_t kr; | |
383 | memory_object_attr_info_data_t attributes; | |
384 | ||
385 | PAGER_DEBUG(PAGER_ALL, | |
386 | ("shared_region_pager_init: %p, %p, %x\n", | |
387 | mem_obj, control, pg_size)); | |
388 | ||
389 | if (control == MEMORY_OBJECT_CONTROL_NULL) { | |
390 | return KERN_INVALID_ARGUMENT; | |
391 | } | |
392 | ||
393 | pager = shared_region_pager_lookup(mem_obj); | |
394 | ||
395 | memory_object_control_reference(control); | |
396 | ||
397 | pager->srp_header.mo_control = control; | |
398 | ||
399 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
400 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ | |
401 | attributes.cluster_size = (1 << (PAGE_SHIFT)); | |
402 | attributes.may_cache_object = FALSE; | |
403 | attributes.temporary = TRUE; | |
404 | ||
405 | kr = memory_object_change_attributes( | |
406 | control, | |
407 | MEMORY_OBJECT_ATTRIBUTE_INFO, | |
408 | (memory_object_info_t) &attributes, | |
409 | MEMORY_OBJECT_ATTR_INFO_COUNT); | |
410 | if (kr != KERN_SUCCESS) { | |
411 | panic("shared_region_pager_init: " | |
412 | "memory_object_change_attributes() failed"); | |
413 | } | |
414 | ||
415 | #if CONFIG_SECLUDED_MEMORY | |
416 | if (secluded_for_filecache) { | |
417 | #if 00 | |
418 | /* | |
419 | * XXX FBDP do we want this in the secluded pool? | |
420 | * Ideally, we'd want the shared region used by Camera to | |
421 | * NOT be in the secluded pool, but all other shared regions | |
422 | * in the secluded pool... | |
423 | */ | |
424 | memory_object_mark_eligible_for_secluded(control, TRUE); | |
425 | #endif /* 00 */ | |
426 | } | |
427 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
428 | ||
429 | return KERN_SUCCESS; | |
430 | } | |
431 | ||
432 | /* | |
433 | * shared_region_data_return() | |
434 | * | |
435 | * Handles page-out requests from VM. This should never happen since | |
436 | * the pages provided by this EMM are not supposed to be dirty or dirtied | |
437 | * and VM should simply discard the contents and reclaim the pages if it | |
438 | * needs to. | |
439 | */ | |
440 | kern_return_t | |
441 | shared_region_pager_data_return( | |
442 | __unused memory_object_t mem_obj, | |
443 | __unused memory_object_offset_t offset, | |
444 | __unused memory_object_cluster_size_t data_cnt, | |
445 | __unused memory_object_offset_t *resid_offset, | |
446 | __unused int *io_error, | |
447 | __unused boolean_t dirty, | |
448 | __unused boolean_t kernel_copy, | |
449 | __unused int upl_flags) | |
450 | { | |
451 | panic("shared_region_pager_data_return: should never get called"); | |
452 | return KERN_FAILURE; | |
453 | } | |
454 | ||
455 | kern_return_t | |
456 | shared_region_pager_data_initialize( | |
457 | __unused memory_object_t mem_obj, | |
458 | __unused memory_object_offset_t offset, | |
459 | __unused memory_object_cluster_size_t data_cnt) | |
460 | { | |
461 | panic("shared_region_pager_data_initialize: should never get called"); | |
462 | return KERN_FAILURE; | |
463 | } | |
464 | ||
465 | kern_return_t | |
466 | shared_region_pager_data_unlock( | |
467 | __unused memory_object_t mem_obj, | |
468 | __unused memory_object_offset_t offset, | |
469 | __unused memory_object_size_t size, | |
470 | __unused vm_prot_t desired_access) | |
471 | { | |
472 | return KERN_FAILURE; | |
473 | } | |
474 | ||
475 | /* | |
476 | * shared_region_pager_data_request() | |
477 | * | |
478 | * Handles page-in requests from VM. | |
479 | */ | |
480 | int shared_region_pager_data_request_debug = 0; | |
481 | kern_return_t | |
482 | shared_region_pager_data_request( | |
483 | memory_object_t mem_obj, | |
484 | memory_object_offset_t offset, | |
485 | memory_object_cluster_size_t length, | |
486 | #if !DEBUG | |
487 | __unused | |
488 | #endif | |
489 | vm_prot_t protection_required, | |
490 | memory_object_fault_info_t mo_fault_info) | |
491 | { | |
492 | shared_region_pager_t pager; | |
493 | memory_object_control_t mo_control; | |
494 | upl_t upl; | |
495 | int upl_flags; | |
496 | upl_size_t upl_size; | |
497 | upl_page_info_t *upl_pl; | |
498 | unsigned int pl_count; | |
499 | vm_object_t src_top_object, src_page_object, dst_object; | |
500 | kern_return_t kr, retval; | |
501 | vm_offset_t src_vaddr, dst_vaddr; | |
502 | vm_offset_t cur_offset; | |
503 | vm_offset_t offset_in_page; | |
504 | kern_return_t error_code; | |
505 | vm_prot_t prot; | |
506 | vm_page_t src_page, top_page; | |
507 | int interruptible; | |
508 | struct vm_object_fault_info fault_info; | |
509 | mach_vm_offset_t slide_start_address; | |
510 | ||
511 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); | |
512 | ||
513 | retval = KERN_SUCCESS; | |
514 | src_top_object = VM_OBJECT_NULL; | |
515 | src_page_object = VM_OBJECT_NULL; | |
516 | upl = NULL; | |
517 | upl_pl = NULL; | |
518 | fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info); | |
519 | fault_info.stealth = TRUE; | |
520 | fault_info.io_sync = FALSE; | |
521 | fault_info.mark_zf_absent = FALSE; | |
522 | fault_info.batch_pmap_op = FALSE; | |
523 | interruptible = fault_info.interruptible; | |
524 | ||
525 | pager = shared_region_pager_lookup(mem_obj); | |
526 | assert(pager->srp_is_ready); | |
527 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */ | |
528 | assert(pager->srp_is_mapped); /* pager is mapped */ | |
529 | ||
530 | PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); | |
531 | ||
532 | /* | |
533 | * Gather in a UPL all the VM pages requested by VM. | |
534 | */ | |
535 | mo_control = pager->srp_header.mo_control; | |
536 | ||
537 | upl_size = length; | |
538 | upl_flags = | |
539 | UPL_RET_ONLY_ABSENT | | |
540 | UPL_SET_LITE | | |
541 | UPL_NO_SYNC | | |
542 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ | |
543 | UPL_SET_INTERNAL; | |
544 | pl_count = 0; | |
545 | kr = memory_object_upl_request(mo_control, | |
546 | offset, upl_size, | |
547 | &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); | |
548 | if (kr != KERN_SUCCESS) { | |
549 | retval = kr; | |
550 | goto done; | |
551 | } | |
552 | dst_object = memory_object_control_to_vm_object(mo_control); | |
553 | assert(dst_object != VM_OBJECT_NULL); | |
554 | ||
555 | /* | |
556 | * We'll map the original data in the kernel address space from the | |
557 | * backing VM object (itself backed by the shared cache file via | |
558 | * the vnode pager). | |
559 | */ | |
560 | src_top_object = pager->srp_backing_object; | |
561 | assert(src_top_object != VM_OBJECT_NULL); | |
562 | vm_object_reference(src_top_object); /* keep the source object alive */ | |
563 | ||
564 | slide_start_address = pager->srp_slide_info->si_slid_address; | |
565 | ||
566 | fault_info.lo_offset += pager->srp_backing_offset; | |
567 | fault_info.hi_offset += pager->srp_backing_offset; | |
568 | ||
569 | /* | |
570 | * Fill in the contents of the pages requested by VM. | |
571 | */ | |
572 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); | |
573 | pl_count = length / PAGE_SIZE; | |
574 | for (cur_offset = 0; | |
575 | retval == KERN_SUCCESS && cur_offset < length; | |
576 | cur_offset += PAGE_SIZE) { | |
577 | ppnum_t dst_pnum; | |
578 | ||
579 | if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { | |
580 | /* this page is not in the UPL: skip it */ | |
581 | continue; | |
582 | } | |
583 | ||
584 | /* | |
585 | * Map the source (dyld shared cache) page in the kernel's | |
586 | * virtual address space. | |
587 | * We already hold a reference on the src_top_object. | |
588 | */ | |
589 | retry_src_fault: | |
590 | vm_object_lock(src_top_object); | |
591 | vm_object_paging_begin(src_top_object); | |
592 | error_code = 0; | |
593 | prot = VM_PROT_READ; | |
594 | src_page = VM_PAGE_NULL; | |
595 | kr = vm_fault_page(src_top_object, | |
596 | pager->srp_backing_offset + offset + cur_offset, | |
597 | VM_PROT_READ, | |
598 | FALSE, | |
599 | FALSE, /* src_page not looked up */ | |
600 | &prot, | |
601 | &src_page, | |
602 | &top_page, | |
603 | NULL, | |
604 | &error_code, | |
605 | FALSE, | |
606 | FALSE, | |
607 | &fault_info); | |
608 | switch (kr) { | |
609 | case VM_FAULT_SUCCESS: | |
610 | break; | |
611 | case VM_FAULT_RETRY: | |
612 | goto retry_src_fault; | |
613 | case VM_FAULT_MEMORY_SHORTAGE: | |
614 | if (vm_page_wait(interruptible)) { | |
615 | goto retry_src_fault; | |
616 | } | |
617 | OS_FALLTHROUGH; | |
618 | case VM_FAULT_INTERRUPTED: | |
619 | retval = MACH_SEND_INTERRUPTED; | |
620 | goto done; | |
621 | case VM_FAULT_SUCCESS_NO_VM_PAGE: | |
622 | /* success but no VM page: fail */ | |
623 | vm_object_paging_end(src_top_object); | |
624 | vm_object_unlock(src_top_object); | |
625 | OS_FALLTHROUGH; | |
626 | case VM_FAULT_MEMORY_ERROR: | |
627 | /* the page is not there ! */ | |
628 | if (error_code) { | |
629 | retval = error_code; | |
630 | } else { | |
631 | retval = KERN_MEMORY_ERROR; | |
632 | } | |
633 | goto done; | |
634 | default: | |
635 | panic("shared_region_pager_data_request: " | |
636 | "vm_fault_page() unexpected error 0x%x\n", | |
637 | kr); | |
638 | } | |
639 | assert(src_page != VM_PAGE_NULL); | |
640 | assert(src_page->vmp_busy); | |
641 | ||
642 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { | |
643 | vm_page_lockspin_queues(); | |
644 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { | |
645 | vm_page_speculate(src_page, FALSE); | |
646 | } | |
647 | vm_page_unlock_queues(); | |
648 | } | |
649 | ||
650 | /* | |
651 | * Establish pointers to the source | |
652 | * and destination physical pages. | |
653 | */ | |
654 | dst_pnum = (ppnum_t) | |
655 | upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); | |
656 | assert(dst_pnum != 0); | |
657 | ||
658 | src_vaddr = (vm_map_offset_t) | |
659 | phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) | |
660 | << PAGE_SHIFT); | |
661 | dst_vaddr = (vm_map_offset_t) | |
662 | phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); | |
663 | src_page_object = VM_PAGE_OBJECT(src_page); | |
664 | ||
665 | /* | |
666 | * Validate the original page... | |
667 | */ | |
668 | if (src_page_object->code_signed) { | |
669 | vm_page_validate_cs_mapped( | |
670 | src_page, PAGE_SIZE, 0, | |
671 | (const void *) src_vaddr); | |
672 | } | |
673 | /* | |
674 | * ... and transfer the results to the destination page. | |
675 | */ | |
676 | UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, | |
677 | src_page->vmp_cs_validated); | |
678 | UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, | |
679 | src_page->vmp_cs_tainted); | |
680 | UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, | |
681 | src_page->vmp_cs_nx); | |
682 | ||
683 | /* | |
684 | * The page provider might access a mapped file, so let's | |
685 | * release the object lock for the source page to avoid a | |
686 | * potential deadlock. | |
687 | * The source page is kept busy and we have a | |
688 | * "paging_in_progress" reference on its object, so it's safe | |
689 | * to unlock the object here. | |
690 | */ | |
691 | assert(src_page->vmp_busy); | |
692 | assert(src_page_object->paging_in_progress > 0); | |
693 | vm_object_unlock(src_page_object); | |
694 | ||
695 | /* | |
696 | * Process the original contents of the source page | |
697 | * into the destination page. | |
698 | */ | |
699 | for (offset_in_page = 0; | |
700 | offset_in_page < PAGE_SIZE; | |
701 | offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) { | |
702 | vm_object_offset_t chunk_offset; | |
703 | vm_object_offset_t offset_in_backing_object; | |
704 | vm_object_offset_t offset_in_sliding_range; | |
705 | ||
706 | chunk_offset = offset + cur_offset + offset_in_page; | |
707 | ||
708 | bcopy((const char *)(src_vaddr + | |
709 | offset_in_page), | |
710 | (char *)(dst_vaddr + offset_in_page), | |
711 | PAGE_SIZE_FOR_SR_SLIDE); | |
712 | ||
713 | offset_in_backing_object = (chunk_offset + | |
714 | pager->srp_backing_offset); | |
715 | if ((offset_in_backing_object < pager->srp_slide_info->si_start) || | |
716 | (offset_in_backing_object >= pager->srp_slide_info->si_end)) { | |
717 | /* chunk is outside of sliding range: done */ | |
718 | shared_region_pager_copied++; | |
719 | continue; | |
720 | } | |
721 | ||
722 | offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start; | |
723 | kr = vm_shared_region_slide_page(pager->srp_slide_info, | |
724 | dst_vaddr + offset_in_page, | |
725 | (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address), | |
726 | (uint32_t) (offset_in_sliding_range / PAGE_SIZE_FOR_SR_SLIDE), | |
727 | #if __has_feature(ptrauth_calls) | |
728 | pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0 | |
729 | #else /* __has_feature(ptrauth_calls) */ | |
730 | 0 | |
731 | #endif /* __has_feature(ptrauth_calls) */ | |
732 | ); | |
733 | if (shared_region_pager_data_request_debug) { | |
734 | printf("shared_region_data_request" | |
735 | "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx " | |
736 | "in sliding range [0x%llx:0x%llx]: " | |
737 | "SLIDE offset 0x%llx=" | |
738 | "(0x%llx+0x%llx+0x%llx+0x%04llx)" | |
739 | "[0x%016llx 0x%016llx] " | |
740 | "code_signed=%d " | |
741 | "cs_validated=%d " | |
742 | "cs_tainted=%d " | |
743 | "cs_nx=%d " | |
744 | "kr=0x%x\n", | |
745 | pager, | |
746 | offset, | |
747 | (uint64_t) cur_offset, | |
748 | (uint64_t) offset_in_page, | |
749 | chunk_offset, | |
750 | pager->srp_slide_info->si_start, | |
751 | pager->srp_slide_info->si_end, | |
752 | (pager->srp_backing_offset + | |
753 | offset + | |
754 | cur_offset + | |
755 | offset_in_page), | |
756 | pager->srp_backing_offset, | |
757 | offset, | |
758 | (uint64_t) cur_offset, | |
759 | (uint64_t) offset_in_page, | |
760 | *(uint64_t *)(dst_vaddr + offset_in_page), | |
761 | *(uint64_t *)(dst_vaddr + offset_in_page + 8), | |
762 | src_page_object->code_signed, | |
763 | src_page->vmp_cs_validated, | |
764 | src_page->vmp_cs_tainted, | |
765 | src_page->vmp_cs_nx, | |
766 | kr); | |
767 | } | |
768 | if (kr != KERN_SUCCESS) { | |
769 | shared_region_pager_slid_error++; | |
770 | break; | |
771 | } | |
772 | shared_region_pager_slid++; | |
773 | } | |
774 | ||
775 | assert(VM_PAGE_OBJECT(src_page) == src_page_object); | |
776 | assert(src_page->vmp_busy); | |
777 | assert(src_page_object->paging_in_progress > 0); | |
778 | vm_object_lock(src_page_object); | |
779 | ||
780 | /* | |
781 | * Cleanup the result of vm_fault_page() of the source page. | |
782 | */ | |
783 | PAGE_WAKEUP_DONE(src_page); | |
784 | src_page = VM_PAGE_NULL; | |
785 | vm_object_paging_end(src_page_object); | |
786 | vm_object_unlock(src_page_object); | |
787 | ||
788 | if (top_page != VM_PAGE_NULL) { | |
789 | assert(VM_PAGE_OBJECT(top_page) == src_top_object); | |
790 | vm_object_lock(src_top_object); | |
791 | VM_PAGE_FREE(top_page); | |
792 | vm_object_paging_end(src_top_object); | |
793 | vm_object_unlock(src_top_object); | |
794 | } | |
795 | } | |
796 | ||
797 | done: | |
798 | if (upl != NULL) { | |
799 | /* clean up the UPL */ | |
800 | ||
801 | /* | |
802 | * The pages are currently dirty because we've just been | |
803 | * writing on them, but as far as we're concerned, they're | |
804 | * clean since they contain their "original" contents as | |
805 | * provided by us, the pager. | |
806 | * Tell the UPL to mark them "clean". | |
807 | */ | |
808 | upl_clear_dirty(upl, TRUE); | |
809 | ||
810 | /* abort or commit the UPL */ | |
811 | if (retval != KERN_SUCCESS) { | |
812 | upl_abort(upl, 0); | |
813 | } else { | |
814 | boolean_t empty; | |
815 | assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), | |
816 | "upl %p offset 0x%llx size 0x%x\n", | |
817 | upl, upl->u_offset, upl->u_size); | |
818 | upl_commit_range(upl, 0, upl->u_size, | |
819 | UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, | |
820 | upl_pl, pl_count, &empty); | |
821 | } | |
822 | ||
823 | /* and deallocate the UPL */ | |
824 | upl_deallocate(upl); | |
825 | upl = NULL; | |
826 | } | |
827 | if (src_top_object != VM_OBJECT_NULL) { | |
828 | vm_object_deallocate(src_top_object); | |
829 | } | |
830 | return retval; | |
831 | } | |
832 | ||
833 | /* | |
834 | * shared_region_pager_reference() | |
835 | * | |
836 | * Get a reference on this memory object. | |
837 | * For external usage only. Assumes that the initial reference count is not 0, | |
838 | * i.e one should not "revive" a dead pager this way. | |
839 | */ | |
840 | void | |
841 | shared_region_pager_reference( | |
842 | memory_object_t mem_obj) | |
843 | { | |
844 | shared_region_pager_t pager; | |
845 | ||
846 | pager = shared_region_pager_lookup(mem_obj); | |
847 | ||
848 | lck_mtx_lock(&shared_region_pager_lock); | |
849 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); | |
850 | lck_mtx_unlock(&shared_region_pager_lock); | |
851 | } | |
852 | ||
853 | ||
854 | /* | |
855 | * shared_region_pager_dequeue: | |
856 | * | |
857 | * Removes a pager from the list of pagers. | |
858 | * | |
859 | * The caller must hold "shared_region_pager_lock". | |
860 | */ | |
861 | void | |
862 | shared_region_pager_dequeue( | |
863 | shared_region_pager_t pager) | |
864 | { | |
865 | assert(!pager->srp_is_mapped); | |
866 | ||
867 | queue_remove(&shared_region_pager_queue, | |
868 | pager, | |
869 | shared_region_pager_t, | |
870 | srp_queue); | |
871 | pager->srp_queue.next = NULL; | |
872 | pager->srp_queue.prev = NULL; | |
873 | ||
874 | shared_region_pager_count--; | |
875 | } | |
876 | ||
877 | /* | |
878 | * shared_region_pager_terminate_internal: | |
879 | * | |
880 | * Trigger the asynchronous termination of the memory object associated | |
881 | * with this pager. | |
882 | * When the memory object is terminated, there will be one more call | |
883 | * to memory_object_deallocate() (i.e. shared_region_pager_deallocate()) | |
884 | * to finish the clean up. | |
885 | * | |
886 | * "shared_region_pager_lock" should not be held by the caller. | |
887 | * We don't need the lock because the pager has already been removed from | |
888 | * the pagers' list and is now ours exclusively. | |
889 | */ | |
890 | void | |
891 | shared_region_pager_terminate_internal( | |
892 | shared_region_pager_t pager) | |
893 | { | |
894 | assert(pager->srp_is_ready); | |
895 | assert(!pager->srp_is_mapped); | |
896 | assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1); | |
897 | ||
898 | if (pager->srp_backing_object != VM_OBJECT_NULL) { | |
899 | vm_object_deallocate(pager->srp_backing_object); | |
900 | pager->srp_backing_object = VM_OBJECT_NULL; | |
901 | } | |
902 | /* trigger the destruction of the memory object */ | |
903 | memory_object_destroy(pager->srp_header.mo_control, 0); | |
904 | } | |
905 | ||
906 | /* | |
907 | * shared_region_pager_deallocate_internal() | |
908 | * | |
909 | * Release a reference on this pager and free it when the last reference goes away. | |
910 | * Can be called with shared_region_pager_lock held or not, but always returns | |
911 | * with it unlocked. | |
912 | */ | |
913 | void | |
914 | shared_region_pager_deallocate_internal( | |
915 | shared_region_pager_t pager, | |
916 | boolean_t locked) | |
917 | { | |
918 | boolean_t needs_trimming; | |
919 | int count_unmapped; | |
920 | os_ref_count_t ref_count; | |
921 | ||
922 | if (!locked) { | |
923 | lck_mtx_lock(&shared_region_pager_lock); | |
924 | } | |
925 | ||
926 | /* if we have too many unmapped pagers, trim some */ | |
927 | count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped; | |
928 | needs_trimming = (count_unmapped > shared_region_pager_cache_limit); | |
929 | ||
930 | /* drop a reference on this pager */ | |
931 | ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL); | |
932 | ||
933 | if (ref_count == 1) { | |
934 | /* | |
935 | * Only the "named" reference is left, which means that | |
936 | * no one is really holding on to this pager anymore. | |
937 | * Terminate it. | |
938 | */ | |
939 | shared_region_pager_dequeue(pager); | |
940 | /* the pager is all ours: no need for the lock now */ | |
941 | lck_mtx_unlock(&shared_region_pager_lock); | |
942 | shared_region_pager_terminate_internal(pager); | |
943 | } else if (ref_count == 0) { | |
944 | /* | |
945 | * Dropped the existence reference; the memory object has | |
946 | * been terminated. Do some final cleanup and release the | |
947 | * pager structure. | |
948 | */ | |
949 | lck_mtx_unlock(&shared_region_pager_lock); | |
950 | ||
951 | vm_shared_region_slide_info_t si = pager->srp_slide_info; | |
952 | #if __has_feature(ptrauth_calls) | |
953 | /* | |
954 | * The slide_info for auth sections lives in the shared region. | |
955 | * Just deallocate() on the shared region and clear the field. | |
956 | */ | |
957 | if (si != NULL) { | |
958 | if (si->si_shared_region != NULL) { | |
959 | assert(si->si_ptrauth); | |
960 | vm_shared_region_deallocate(si->si_shared_region); | |
961 | pager->srp_slide_info = NULL; | |
962 | si = NULL; | |
963 | } | |
964 | } | |
965 | #endif /* __has_feature(ptrauth_calls) */ | |
966 | if (si != NULL) { | |
967 | vm_object_deallocate(si->si_slide_object); | |
968 | /* free the slide_info_entry */ | |
969 | kheap_free(KHEAP_DATA_BUFFERS, si->si_slide_info_entry, si->si_slide_info_size); | |
970 | kfree(si, sizeof *si); | |
971 | pager->srp_slide_info = NULL; | |
972 | } | |
973 | ||
974 | if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) { | |
975 | memory_object_control_deallocate(pager->srp_header.mo_control); | |
976 | pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; | |
977 | } | |
978 | kfree(pager, sizeof(*pager)); | |
979 | pager = SHARED_REGION_PAGER_NULL; | |
980 | } else { | |
981 | /* there are still plenty of references: keep going... */ | |
982 | lck_mtx_unlock(&shared_region_pager_lock); | |
983 | } | |
984 | ||
985 | if (needs_trimming) { | |
986 | shared_region_pager_trim(); | |
987 | } | |
988 | /* caution: lock is not held on return... */ | |
989 | } | |
990 | ||
991 | /* | |
992 | * shared_region_pager_deallocate() | |
993 | * | |
994 | * Release a reference on this pager and free it when the last | |
995 | * reference goes away. | |
996 | */ | |
997 | void | |
998 | shared_region_pager_deallocate( | |
999 | memory_object_t mem_obj) | |
1000 | { | |
1001 | shared_region_pager_t pager; | |
1002 | ||
1003 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj)); | |
1004 | pager = shared_region_pager_lookup(mem_obj); | |
1005 | shared_region_pager_deallocate_internal(pager, FALSE); | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * | |
1010 | */ | |
1011 | kern_return_t | |
1012 | shared_region_pager_terminate( | |
1013 | #if !DEBUG | |
1014 | __unused | |
1015 | #endif | |
1016 | memory_object_t mem_obj) | |
1017 | { | |
1018 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj)); | |
1019 | ||
1020 | return KERN_SUCCESS; | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * | |
1025 | */ | |
1026 | kern_return_t | |
1027 | shared_region_pager_synchronize( | |
1028 | __unused memory_object_t mem_obj, | |
1029 | __unused memory_object_offset_t offset, | |
1030 | __unused memory_object_size_t length, | |
1031 | __unused vm_sync_t sync_flags) | |
1032 | { | |
1033 | panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n"); | |
1034 | return KERN_FAILURE; | |
1035 | } | |
1036 | ||
1037 | /* | |
1038 | * shared_region_pager_map() | |
1039 | * | |
1040 | * This allows VM to let us, the EMM, know that this memory object | |
1041 | * is currently mapped one or more times. This is called by VM each time | |
1042 | * the memory object gets mapped, but we only take one extra reference the | |
1043 | * first time it is called. | |
1044 | */ | |
1045 | kern_return_t | |
1046 | shared_region_pager_map( | |
1047 | memory_object_t mem_obj, | |
1048 | __unused vm_prot_t prot) | |
1049 | { | |
1050 | shared_region_pager_t pager; | |
1051 | ||
1052 | PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj)); | |
1053 | ||
1054 | pager = shared_region_pager_lookup(mem_obj); | |
1055 | ||
1056 | lck_mtx_lock(&shared_region_pager_lock); | |
1057 | assert(pager->srp_is_ready); | |
1058 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */ | |
1059 | if (!pager->srp_is_mapped) { | |
1060 | pager->srp_is_mapped = TRUE; | |
1061 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); | |
1062 | shared_region_pager_count_mapped++; | |
1063 | } | |
1064 | lck_mtx_unlock(&shared_region_pager_lock); | |
1065 | ||
1066 | return KERN_SUCCESS; | |
1067 | } | |
1068 | ||
1069 | /* | |
1070 | * shared_region_pager_last_unmap() | |
1071 | * | |
1072 | * This is called by VM when this memory object is no longer mapped anywhere. | |
1073 | */ | |
1074 | kern_return_t | |
1075 | shared_region_pager_last_unmap( | |
1076 | memory_object_t mem_obj) | |
1077 | { | |
1078 | shared_region_pager_t pager; | |
1079 | int count_unmapped; | |
1080 | ||
1081 | PAGER_DEBUG(PAGER_ALL, | |
1082 | ("shared_region_pager_last_unmap: %p\n", mem_obj)); | |
1083 | ||
1084 | pager = shared_region_pager_lookup(mem_obj); | |
1085 | ||
1086 | lck_mtx_lock(&shared_region_pager_lock); | |
1087 | if (pager->srp_is_mapped) { | |
1088 | /* | |
1089 | * All the mappings are gone, so let go of the one extra | |
1090 | * reference that represents all the mappings of this pager. | |
1091 | */ | |
1092 | shared_region_pager_count_mapped--; | |
1093 | count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); | |
1094 | if (count_unmapped > shared_region_pager_count_unmapped_max) { | |
1095 | shared_region_pager_count_unmapped_max = count_unmapped; | |
1096 | } | |
1097 | pager->srp_is_mapped = FALSE; | |
1098 | shared_region_pager_deallocate_internal(pager, TRUE); | |
1099 | /* caution: deallocate_internal() released the lock ! */ | |
1100 | } else { | |
1101 | lck_mtx_unlock(&shared_region_pager_lock); | |
1102 | } | |
1103 | ||
1104 | return KERN_SUCCESS; | |
1105 | } | |
1106 | ||
1107 | boolean_t | |
1108 | shared_region_pager_backing_object( | |
1109 | memory_object_t mem_obj, | |
1110 | memory_object_offset_t offset, | |
1111 | vm_object_t *backing_object, | |
1112 | vm_object_offset_t *backing_offset) | |
1113 | { | |
1114 | shared_region_pager_t pager; | |
1115 | ||
1116 | PAGER_DEBUG(PAGER_ALL, | |
1117 | ("shared_region_pager_backing_object: %p\n", mem_obj)); | |
1118 | ||
1119 | pager = shared_region_pager_lookup(mem_obj); | |
1120 | ||
1121 | *backing_object = pager->srp_backing_object; | |
1122 | *backing_offset = pager->srp_backing_offset + offset; | |
1123 | ||
1124 | return TRUE; | |
1125 | } | |
1126 | ||
1127 | ||
1128 | /* | |
1129 | * | |
1130 | */ | |
1131 | shared_region_pager_t | |
1132 | shared_region_pager_lookup( | |
1133 | memory_object_t mem_obj) | |
1134 | { | |
1135 | shared_region_pager_t pager; | |
1136 | ||
1137 | assert(mem_obj->mo_pager_ops == &shared_region_pager_ops); | |
1138 | pager = (shared_region_pager_t)(uintptr_t) mem_obj; | |
1139 | assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); | |
1140 | return pager; | |
1141 | } | |
1142 | ||
1143 | /* | |
1144 | * Create and return a pager for the given object with the | |
1145 | * given slide information. | |
1146 | */ | |
1147 | static shared_region_pager_t | |
1148 | shared_region_pager_create( | |
1149 | vm_object_t backing_object, | |
1150 | vm_object_offset_t backing_offset, | |
1151 | struct vm_shared_region_slide_info *slide_info, | |
1152 | #if !__has_feature(ptrauth_calls) | |
1153 | __unused | |
1154 | #endif /* !__has_feature(ptrauth_calls) */ | |
1155 | uint64_t jop_key) | |
1156 | { | |
1157 | shared_region_pager_t pager; | |
1158 | memory_object_control_t control; | |
1159 | kern_return_t kr; | |
1160 | vm_object_t object; | |
1161 | ||
1162 | pager = (shared_region_pager_t) kalloc(sizeof(*pager)); | |
1163 | if (pager == SHARED_REGION_PAGER_NULL) { | |
1164 | return SHARED_REGION_PAGER_NULL; | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * The vm_map call takes both named entry ports and raw memory | |
1169 | * objects in the same parameter. We need to make sure that | |
1170 | * vm_map does not see this object as a named entry port. So, | |
1171 | * we reserve the first word in the object for a fake ip_kotype | |
1172 | * setting - that will tell vm_map to use it as a memory object. | |
1173 | */ | |
1174 | pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT; | |
1175 | pager->srp_header.mo_pager_ops = &shared_region_pager_ops; | |
1176 | pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; | |
1177 | ||
1178 | pager->srp_is_ready = FALSE;/* not ready until it has a "name" */ | |
1179 | /* existence reference (for the cache) + 1 for the caller */ | |
1180 | os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2); | |
1181 | pager->srp_is_mapped = FALSE; | |
1182 | pager->srp_backing_object = backing_object; | |
1183 | pager->srp_backing_offset = backing_offset; | |
1184 | pager->srp_slide_info = slide_info; | |
1185 | #if __has_feature(ptrauth_calls) | |
1186 | pager->srp_jop_key = jop_key; | |
1187 | /* | |
1188 | * If we're getting slide_info from the shared_region, | |
1189 | * take a reference, so it can't disappear from under us. | |
1190 | */ | |
1191 | if (slide_info->si_shared_region) { | |
1192 | assert(slide_info->si_ptrauth); | |
1193 | vm_shared_region_reference(slide_info->si_shared_region); | |
1194 | } | |
1195 | #endif /* __has_feature(ptrauth_calls) */ | |
1196 | ||
1197 | vm_object_reference(backing_object); | |
1198 | ||
1199 | lck_mtx_lock(&shared_region_pager_lock); | |
1200 | /* enter new pager at the head of our list of pagers */ | |
1201 | queue_enter_first(&shared_region_pager_queue, | |
1202 | pager, | |
1203 | shared_region_pager_t, | |
1204 | srp_queue); | |
1205 | shared_region_pager_count++; | |
1206 | if (shared_region_pager_count > shared_region_pager_count_max) { | |
1207 | shared_region_pager_count_max = shared_region_pager_count; | |
1208 | } | |
1209 | lck_mtx_unlock(&shared_region_pager_lock); | |
1210 | ||
1211 | kr = memory_object_create_named((memory_object_t) pager, | |
1212 | 0, | |
1213 | &control); | |
1214 | assert(kr == KERN_SUCCESS); | |
1215 | ||
1216 | memory_object_mark_trusted(control); | |
1217 | ||
1218 | lck_mtx_lock(&shared_region_pager_lock); | |
1219 | /* the new pager is now ready to be used */ | |
1220 | pager->srp_is_ready = TRUE; | |
1221 | object = memory_object_to_vm_object((memory_object_t) pager); | |
1222 | assert(object); | |
1223 | /* | |
1224 | * No one knows about this object and so we get away without the object lock. | |
1225 | * This object is _eventually_ backed by the dyld shared cache and so we want | |
1226 | * to benefit from the lock priority boosting. | |
1227 | */ | |
1228 | object->object_is_shared_cache = TRUE; | |
1229 | lck_mtx_unlock(&shared_region_pager_lock); | |
1230 | ||
1231 | /* wakeup anyone waiting for this pager to be ready */ | |
1232 | thread_wakeup(&pager->srp_is_ready); | |
1233 | ||
1234 | return pager; | |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * shared_region_pager_setup() | |
1239 | * | |
1240 | * Provide the caller with a memory object backed by the provided | |
1241 | * "backing_object" VM object. | |
1242 | */ | |
1243 | memory_object_t | |
1244 | shared_region_pager_setup( | |
1245 | vm_object_t backing_object, | |
1246 | vm_object_offset_t backing_offset, | |
1247 | struct vm_shared_region_slide_info *slide_info, | |
1248 | uint64_t jop_key) | |
1249 | { | |
1250 | shared_region_pager_t pager; | |
1251 | ||
1252 | /* create new pager */ | |
1253 | pager = shared_region_pager_create(backing_object, | |
1254 | backing_offset, slide_info, jop_key); | |
1255 | if (pager == SHARED_REGION_PAGER_NULL) { | |
1256 | /* could not create a new pager */ | |
1257 | return MEMORY_OBJECT_NULL; | |
1258 | } | |
1259 | ||
1260 | lck_mtx_lock(&shared_region_pager_lock); | |
1261 | while (!pager->srp_is_ready) { | |
1262 | lck_mtx_sleep(&shared_region_pager_lock, | |
1263 | LCK_SLEEP_DEFAULT, | |
1264 | &pager->srp_is_ready, | |
1265 | THREAD_UNINT); | |
1266 | } | |
1267 | lck_mtx_unlock(&shared_region_pager_lock); | |
1268 | ||
1269 | return (memory_object_t) pager; | |
1270 | } | |
1271 | ||
1272 | #if __has_feature(ptrauth_calls) | |
1273 | /* | |
1274 | * shared_region_pager_match() | |
1275 | * | |
1276 | * Provide the caller with a memory object backed by the provided | |
1277 | * "backing_object" VM object. | |
1278 | */ | |
1279 | memory_object_t | |
1280 | shared_region_pager_match( | |
1281 | vm_object_t backing_object, | |
1282 | vm_object_offset_t backing_offset, | |
1283 | vm_shared_region_slide_info_t slide_info, | |
1284 | uint64_t jop_key) | |
1285 | { | |
1286 | shared_region_pager_t pager; | |
1287 | vm_shared_region_slide_info_t si; | |
1288 | ||
1289 | lck_mtx_lock(&shared_region_pager_lock); | |
1290 | queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) { | |
1291 | if (pager->srp_backing_object != backing_object->copy) { | |
1292 | continue; | |
1293 | } | |
1294 | if (pager->srp_backing_offset != backing_offset) { | |
1295 | continue; | |
1296 | } | |
1297 | si = pager->srp_slide_info; | |
1298 | ||
1299 | /* If there's no AUTH section then it can't match (slide_info is always !NULL) */ | |
1300 | if (!si->si_ptrauth) { | |
1301 | continue; | |
1302 | } | |
1303 | if (pager->srp_jop_key != jop_key) { | |
1304 | continue; | |
1305 | } | |
1306 | if (si->si_slide != slide_info->si_slide) { | |
1307 | continue; | |
1308 | } | |
1309 | if (si->si_start != slide_info->si_start) { | |
1310 | continue; | |
1311 | } | |
1312 | if (si->si_end != slide_info->si_end) { | |
1313 | continue; | |
1314 | } | |
1315 | if (si->si_slide_object != slide_info->si_slide_object) { | |
1316 | continue; | |
1317 | } | |
1318 | if (si->si_slide_info_size != slide_info->si_slide_info_size) { | |
1319 | continue; | |
1320 | } | |
1321 | if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) { | |
1322 | continue; | |
1323 | } | |
1324 | /* the caller expects a reference on this */ | |
1325 | os_ref_retain_locked_raw(&pager->srp_ref_count, NULL); | |
1326 | lck_mtx_unlock(&shared_region_pager_lock); | |
1327 | return (memory_object_t)pager; | |
1328 | } | |
1329 | ||
1330 | /* | |
1331 | * We didn't find a pre-existing pager, so create one. | |
1332 | * | |
1333 | * Note slight race condition here since we drop the lock. This could lead to more than one | |
1334 | * thread calling setup with the same arguments here. That shouldn't break anything, just | |
1335 | * waste a little memory. | |
1336 | */ | |
1337 | lck_mtx_unlock(&shared_region_pager_lock); | |
1338 | return shared_region_pager_setup(backing_object->copy, backing_offset, slide_info, jop_key); | |
1339 | } | |
1340 | ||
1341 | void | |
1342 | shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task) | |
1343 | { | |
1344 | __unused shared_region_pager_t pager = (shared_region_pager_t)memobj; | |
1345 | ||
1346 | assert(pager->srp_jop_key == task->jop_pid); | |
1347 | } | |
1348 | #endif /* __has_feature(ptrauth_calls) */ | |
1349 | ||
1350 | void | |
1351 | shared_region_pager_trim(void) | |
1352 | { | |
1353 | shared_region_pager_t pager, prev_pager; | |
1354 | queue_head_t trim_queue; | |
1355 | int num_trim; | |
1356 | int count_unmapped; | |
1357 | ||
1358 | lck_mtx_lock(&shared_region_pager_lock); | |
1359 | ||
1360 | /* | |
1361 | * We have too many pagers, try and trim some unused ones, | |
1362 | * starting with the oldest pager at the end of the queue. | |
1363 | */ | |
1364 | queue_init(&trim_queue); | |
1365 | num_trim = 0; | |
1366 | ||
1367 | for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue); | |
1368 | !queue_end(&shared_region_pager_queue, (queue_entry_t) pager); | |
1369 | pager = prev_pager) { | |
1370 | /* get prev elt before we dequeue */ | |
1371 | prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue); | |
1372 | ||
1373 | if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 && | |
1374 | pager->srp_is_ready && | |
1375 | !pager->srp_is_mapped) { | |
1376 | /* this pager can be trimmed */ | |
1377 | num_trim++; | |
1378 | /* remove this pager from the main list ... */ | |
1379 | shared_region_pager_dequeue(pager); | |
1380 | /* ... and add it to our trim queue */ | |
1381 | queue_enter_first(&trim_queue, | |
1382 | pager, | |
1383 | shared_region_pager_t, | |
1384 | srp_queue); | |
1385 | ||
1386 | /* do we have enough pagers to trim? */ | |
1387 | count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); | |
1388 | if (count_unmapped <= shared_region_pager_cache_limit) { | |
1389 | break; | |
1390 | } | |
1391 | } | |
1392 | } | |
1393 | if (num_trim > shared_region_pager_num_trim_max) { | |
1394 | shared_region_pager_num_trim_max = num_trim; | |
1395 | } | |
1396 | shared_region_pager_num_trim_total += num_trim; | |
1397 | ||
1398 | lck_mtx_unlock(&shared_region_pager_lock); | |
1399 | ||
1400 | /* terminate the trimmed pagers */ | |
1401 | while (!queue_empty(&trim_queue)) { | |
1402 | queue_remove_first(&trim_queue, | |
1403 | pager, | |
1404 | shared_region_pager_t, | |
1405 | srp_queue); | |
1406 | pager->srp_queue.next = NULL; | |
1407 | pager->srp_queue.prev = NULL; | |
1408 | assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2); | |
1409 | /* | |
1410 | * We can't call deallocate_internal() because the pager | |
1411 | * has already been dequeued, but we still need to remove | |
1412 | * a reference. | |
1413 | */ | |
1414 | (void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL); | |
1415 | shared_region_pager_terminate_internal(pager); | |
1416 | } | |
1417 | } |