]>
Commit | Line | Data |
---|---|---|
0c530ab8 A |
1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <sys/errno.h> | |
24 | ||
25 | #include <mach/mach_types.h> | |
26 | #include <mach/mach_traps.h> | |
27 | #include <mach/host_priv.h> | |
28 | #include <mach/kern_return.h> | |
29 | #include <mach/memory_object_control.h> | |
30 | #include <mach/memory_object_types.h> | |
31 | #include <mach/port.h> | |
32 | #include <mach/policy.h> | |
33 | #include <mach/upl.h> | |
34 | #include <mach/thread_act.h> | |
35 | #include <mach/mach_vm.h> | |
36 | ||
37 | #include <kern/host.h> | |
38 | #include <kern/kalloc.h> | |
39 | #include <kern/page_decrypt.h> | |
40 | #include <kern/queue.h> | |
41 | #include <kern/thread.h> | |
42 | ||
43 | #include <ipc/ipc_port.h> | |
44 | #include <ipc/ipc_space.h> | |
45 | ||
46 | #include <default_pager/default_pager_types.h> | |
47 | #include <default_pager/default_pager_object_server.h> | |
48 | ||
49 | #include <vm/vm_map.h> | |
50 | #include <vm/vm_pageout.h> | |
51 | #include <vm/memory_object.h> | |
52 | #include <vm/vm_pageout.h> | |
53 | #include <vm/vm_protos.h> | |
54 | ||
55 | ||
56 | /* | |
57 | * APPLE PROTECT MEMORY PAGER | |
58 | * | |
59 | * This external memory manager (EMM) handles memory from the encrypted | |
60 | * sections of some executables protected by the DSMOS kernel extension. | |
61 | * | |
62 | * It mostly handles page-in requests (from memory_object_data_request()) by | |
63 | * getting the encrypted data from its backing VM object, itself backed by | |
64 | * the encrypted file, decrypting it and providing it to VM. | |
65 | * | |
66 | * The decrypted pages will never be dirtied, so the memory manager doesn't | |
67 | * need to handle page-out requests (from memory_object_data_return()). The | |
68 | * pages need to be mapped copy-on-write, so that the originals stay clean. | |
69 | * | |
70 | * We don't expect to have to handle a large number of apple-protected | |
71 | * binaries, so the data structures are very simple (simple linked list) | |
72 | * for now. | |
73 | */ | |
74 | ||
75 | /* forward declarations */ | |
76 | void apple_protect_pager_reference(memory_object_t mem_obj); | |
77 | void apple_protect_pager_deallocate(memory_object_t mem_obj); | |
78 | kern_return_t apple_protect_pager_init(memory_object_t mem_obj, | |
79 | memory_object_control_t control, | |
80 | vm_size_t pg_size); | |
81 | kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); | |
82 | kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, | |
83 | memory_object_offset_t offset, | |
84 | vm_size_t length, | |
85 | vm_prot_t protection_required); | |
86 | kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, | |
87 | memory_object_offset_t offset, | |
88 | vm_size_t data_cnt, | |
89 | memory_object_offset_t *resid_offset, | |
90 | int *io_error, | |
91 | boolean_t dirty, | |
92 | boolean_t kernel_copy, | |
93 | int upl_flags); | |
94 | kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, | |
95 | memory_object_offset_t offset, | |
96 | vm_size_t data_cnt); | |
97 | kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, | |
98 | memory_object_offset_t offset, | |
99 | vm_size_t size, | |
100 | vm_prot_t desired_access); | |
101 | kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, | |
102 | memory_object_offset_t offset, | |
103 | vm_size_t length, | |
104 | vm_sync_t sync_flags); | |
105 | kern_return_t apple_protect_pager_unmap(memory_object_t mem_obj); | |
106 | ||
107 | /* | |
108 | * Vector of VM operations for this EMM. | |
109 | * These routines are invoked by VM via the memory_object_*() interfaces. | |
110 | */ | |
111 | const struct memory_object_pager_ops apple_protect_pager_ops = { | |
112 | apple_protect_pager_reference, | |
113 | apple_protect_pager_deallocate, | |
114 | apple_protect_pager_init, | |
115 | apple_protect_pager_terminate, | |
116 | apple_protect_pager_data_request, | |
117 | apple_protect_pager_data_return, | |
118 | apple_protect_pager_data_initialize, | |
119 | apple_protect_pager_data_unlock, | |
120 | apple_protect_pager_synchronize, | |
121 | apple_protect_pager_unmap, | |
122 | "apple protect pager" | |
123 | }; | |
124 | ||
125 | /* | |
126 | * The "apple_protect_pager" describes a memory object backed by | |
127 | * the "apple protect" EMM. | |
128 | */ | |
129 | typedef struct apple_protect_pager { | |
130 | memory_object_pager_ops_t pager_ops; /* == &apple_protect_pager_ops */ | |
131 | unsigned int pager_ikot; /* JMM: fake ip_kotype() */ | |
132 | queue_chain_t pager_queue; /* next & prev pagers */ | |
133 | unsigned int ref_count; /* reference count */ | |
134 | boolean_t is_ready; /* is this pager ready ? */ | |
135 | boolean_t is_mapped; /* is this mem_obj mapped ? */ | |
136 | memory_object_control_t pager_control; /* mem object control handle */ | |
137 | vm_object_t backing_object; /* VM obj w/ encrypted data */ | |
138 | } *apple_protect_pager_t; | |
139 | #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) | |
140 | ||
141 | /* | |
142 | * List of memory objects managed by this EMM. | |
143 | * The list is protected by the "apple_protect_pager_lock" lock. | |
144 | */ | |
145 | int apple_protect_pager_count = 0; /* number of pagers */ | |
146 | int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ | |
147 | queue_head_t apple_protect_pager_queue; | |
148 | decl_mutex_data(,apple_protect_pager_lock) | |
149 | ||
150 | /* | |
151 | * Maximum number of unmapped pagers we're willing to keep around. | |
152 | */ | |
153 | int apple_protect_pager_cache_limit = 10; | |
154 | ||
155 | /* | |
156 | * Statistics & counters. | |
157 | */ | |
158 | int apple_protect_pager_count_max = 0; | |
159 | int apple_protect_pager_count_unmapped_max = 0; | |
160 | int apple_protect_pager_num_trim_max = 0; | |
161 | int apple_protect_pager_num_trim_total = 0; | |
162 | ||
163 | /* internal prototypes */ | |
164 | apple_protect_pager_t apple_protect_pager_create(vm_object_t backing_object); | |
165 | apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); | |
166 | void apple_protect_pager_dequeue(apple_protect_pager_t pager); | |
167 | void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, | |
168 | boolean_t locked); | |
169 | void apple_protect_pager_terminate_internal(apple_protect_pager_t pager); | |
170 | void apple_protect_pager_trim(void); | |
171 | ||
172 | ||
173 | #if DEBUG | |
174 | int apple_protect_pagerdebug = 0; | |
175 | #define PAGER_ALL 0xffffffff | |
176 | #define PAGER_INIT 0x00000001 | |
177 | #define PAGER_PAGEIN 0x00000002 | |
178 | ||
179 | #define PAGER_DEBUG(LEVEL, A) \ | |
180 | MACRO_BEGIN \ | |
181 | if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \ | |
182 | printf A; \ | |
183 | } \ | |
184 | MACRO_END | |
185 | #else | |
186 | #define PAGER_DEBUG(LEVEL, A) | |
187 | #endif | |
188 | ||
189 | ||
190 | void | |
191 | apple_protect_pager_bootstrap(void) | |
192 | { | |
193 | mutex_init(&apple_protect_pager_lock, 0); | |
194 | queue_init(&apple_protect_pager_queue); | |
195 | } | |
196 | ||
197 | /* | |
198 | * apple_protect_pager_init() | |
199 | * | |
200 | * Initialize the memory object and makes it ready to be used and mapped. | |
201 | */ | |
202 | kern_return_t | |
203 | apple_protect_pager_init( | |
204 | memory_object_t mem_obj, | |
205 | memory_object_control_t control, | |
206 | #if !DEBUG | |
207 | __unused | |
208 | #endif | |
209 | vm_size_t pg_size) | |
210 | { | |
211 | apple_protect_pager_t pager; | |
212 | kern_return_t kr; | |
213 | memory_object_attr_info_data_t attributes; | |
214 | ||
215 | PAGER_DEBUG(PAGER_ALL, | |
216 | ("apple_protect_pager_init: %p, %p, %x\n", | |
217 | mem_obj, control, pg_size)); | |
218 | ||
219 | if (control == MEMORY_OBJECT_CONTROL_NULL) | |
220 | return KERN_INVALID_ARGUMENT; | |
221 | ||
222 | pager = apple_protect_pager_lookup(mem_obj); | |
223 | ||
224 | memory_object_control_reference(control); | |
225 | ||
226 | pager->pager_control = control; | |
227 | ||
228 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
229 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ | |
230 | attributes.cluster_size = (1 << (PAGE_SHIFT)); | |
231 | attributes.may_cache_object = FALSE; | |
232 | attributes.temporary = TRUE; | |
233 | ||
234 | kr = memory_object_change_attributes( | |
235 | control, | |
236 | MEMORY_OBJECT_ATTRIBUTE_INFO, | |
237 | (memory_object_info_t) &attributes, | |
238 | MEMORY_OBJECT_ATTR_INFO_COUNT); | |
239 | if (kr != KERN_SUCCESS) | |
240 | panic("apple_protect_pager_init: " | |
241 | "memory_object_change_attributes() failed"); | |
242 | ||
243 | return KERN_SUCCESS; | |
244 | } | |
245 | ||
246 | /* | |
247 | * apple_protect_data_return() | |
248 | * | |
249 | * Handles page-out requests from VM. This should never happen since | |
250 | * the pages provided by this EMM are not supposed to be dirty or dirtied | |
251 | * and VM should simply discard the contents and reclaim the pages if it | |
252 | * needs to. | |
253 | */ | |
254 | kern_return_t | |
255 | apple_protect_pager_data_return( | |
256 | __unused memory_object_t mem_obj, | |
257 | __unused memory_object_offset_t offset, | |
258 | __unused vm_size_t data_cnt, | |
259 | __unused memory_object_offset_t *resid_offset, | |
260 | __unused int *io_error, | |
261 | __unused boolean_t dirty, | |
262 | __unused boolean_t kernel_copy, | |
263 | __unused int upl_flags) | |
264 | { | |
265 | panic("apple_protect_pager_data_return: should never get called"); | |
266 | return KERN_FAILURE; | |
267 | } | |
268 | ||
269 | kern_return_t | |
270 | apple_protect_pager_data_initialize( | |
271 | __unused memory_object_t mem_obj, | |
272 | __unused memory_object_offset_t offset, | |
273 | __unused vm_size_t data_cnt) | |
274 | { | |
275 | panic("apple_protect_pager_data_initialize: should never get called"); | |
276 | return KERN_FAILURE; | |
277 | } | |
278 | ||
279 | kern_return_t | |
280 | apple_protect_pager_data_unlock( | |
281 | __unused memory_object_t mem_obj, | |
282 | __unused memory_object_offset_t offset, | |
283 | __unused vm_size_t size, | |
284 | __unused vm_prot_t desired_access) | |
285 | { | |
286 | return KERN_FAILURE; | |
287 | } | |
288 | ||
289 | /* | |
290 | * apple_protect_pager_data_request() | |
291 | * | |
292 | * Handles page-in requests from VM. | |
293 | */ | |
294 | kern_return_t | |
295 | apple_protect_pager_data_request( | |
296 | memory_object_t mem_obj, | |
297 | memory_object_offset_t offset, | |
298 | vm_size_t length, | |
299 | #if !DEBUG | |
300 | __unused | |
301 | #endif | |
302 | vm_prot_t protection_required) | |
303 | { | |
304 | apple_protect_pager_t pager; | |
305 | memory_object_control_t mo_control; | |
306 | upl_t upl = NULL; | |
307 | int upl_flags; | |
308 | upl_size_t upl_size; | |
309 | upl_page_info_t *upl_pl; | |
310 | vm_object_t src_object, dst_object; | |
311 | kern_return_t kr, retval; | |
312 | vm_map_offset_t src_mapping = 0, dst_mapping = 0; | |
313 | vm_offset_t src_vaddr, dst_vaddr; | |
314 | vm_offset_t cur_offset; | |
315 | boolean_t src_map_page_by_page; | |
316 | vm_map_entry_t map_entry; | |
317 | ||
318 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %x, %llx, %llxx, %x\n", mem_obj, offset, length, protection_required)); | |
319 | ||
320 | pager = apple_protect_pager_lookup(mem_obj); | |
321 | assert(pager->is_ready); | |
322 | assert(pager->ref_count > 1); /* pager is alive and mapped */ | |
323 | ||
324 | PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %x, %llx, %llx, %x, pager %x\n", mem_obj, offset, length, protection_required, pager)); | |
325 | ||
326 | /* | |
327 | * Map the encrypted data in the kernel address space from the | |
328 | * backing VM object (itself backed by the encrypted file via | |
329 | * the vnode pager). | |
330 | */ | |
331 | src_object = pager->backing_object; | |
332 | assert(src_object != VM_OBJECT_NULL); | |
333 | vm_object_reference(src_object); /* ref. for the mapping */ | |
334 | src_mapping = 0; | |
335 | kr = vm_map_enter(kernel_map, | |
336 | &src_mapping, | |
337 | length, | |
338 | 0, | |
339 | VM_FLAGS_ANYWHERE, | |
340 | src_object, | |
341 | offset, | |
342 | FALSE, | |
343 | VM_PROT_READ, | |
344 | VM_PROT_READ, | |
345 | VM_INHERIT_NONE); | |
346 | switch (kr) { | |
347 | case KERN_SUCCESS: | |
348 | /* wire the memory to make sure it is available */ | |
349 | kr = vm_map_wire(kernel_map, | |
350 | src_mapping, | |
351 | src_mapping + length, | |
352 | VM_PROT_READ, | |
353 | FALSE); | |
354 | if (kr != KERN_SUCCESS) { | |
355 | /* | |
356 | * Wiring failed, so unmap source and fall back | |
357 | * to page by page mapping of the source. | |
358 | */ | |
359 | kr = vm_map_remove(kernel_map, | |
360 | src_mapping, | |
361 | src_mapping + length, | |
362 | VM_MAP_NO_FLAGS); | |
363 | assert(kr == KERN_SUCCESS); | |
364 | src_mapping = 0; | |
365 | src_vaddr = 0; | |
366 | src_map_page_by_page = TRUE; | |
367 | break; | |
368 | } | |
369 | /* source region is now fully mapped and wired */ | |
370 | src_map_page_by_page = FALSE; | |
371 | src_vaddr = CAST_DOWN(vm_offset_t, src_mapping); | |
372 | break; | |
373 | case KERN_NO_SPACE: | |
374 | /* we couldn't map the entire source, so map it page by page */ | |
375 | src_map_page_by_page = TRUE; | |
376 | /* release the reference for the failed mapping */ | |
377 | vm_object_deallocate(src_object); | |
378 | break; | |
379 | default: | |
380 | vm_object_deallocate(src_object); | |
381 | retval = kr; | |
382 | goto done; | |
383 | } | |
384 | ||
385 | ||
386 | /* | |
387 | * Gather in a UPL all the VM pages requested by VM. | |
388 | */ | |
389 | mo_control = pager->pager_control; | |
390 | ||
391 | upl_size = length; | |
392 | upl_flags = | |
393 | UPL_RET_ONLY_ABSENT | | |
394 | UPL_SET_LITE | | |
395 | UPL_NO_SYNC | | |
396 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ | |
397 | UPL_SET_INTERNAL; | |
398 | kr = memory_object_upl_request(mo_control, | |
399 | offset, upl_size, | |
400 | &upl, NULL, NULL, upl_flags); | |
401 | if (kr != KERN_SUCCESS) { | |
402 | retval = kr; | |
403 | goto done; | |
404 | } | |
405 | ||
406 | /* | |
407 | * Reserve a virtual page in the kernel address space to map each | |
408 | * destination physical page when it's its turn to be filled. | |
409 | */ | |
410 | dst_object = mo_control->moc_object; | |
411 | assert(dst_object != VM_OBJECT_NULL); | |
412 | dst_mapping = 0; | |
413 | vm_object_reference(kernel_object); /* ref. for mapping */ | |
414 | kr = vm_map_find_space(kernel_map, | |
415 | &dst_mapping, | |
416 | PAGE_SIZE_64, | |
417 | 0, | |
418 | 0, | |
419 | &map_entry); | |
420 | if (kr != KERN_SUCCESS) { | |
421 | vm_object_deallocate(kernel_object); | |
422 | retval = kr; | |
423 | goto done; | |
424 | } | |
425 | map_entry->object.vm_object = kernel_object; | |
426 | map_entry->offset = dst_mapping - VM_MIN_KERNEL_ADDRESS; | |
427 | vm_map_unlock(kernel_map); | |
428 | dst_vaddr = CAST_DOWN(vm_offset_t, dst_mapping); | |
429 | ||
430 | /* | |
431 | * Fill in the contents of the pages requested by VM. | |
432 | */ | |
433 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); | |
434 | for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) { | |
435 | ppnum_t dst_pnum; | |
436 | ||
437 | if (!upl_page_present(upl_pl, cur_offset / PAGE_SIZE)) { | |
438 | /* this page is not in the UPL: skip it */ | |
439 | continue; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Map the source (encrypted) page in the kernel's | |
444 | * virtual address space. | |
445 | */ | |
446 | if (src_map_page_by_page) { | |
447 | vm_object_reference(src_object); /* ref. for mapping */ | |
448 | kr = vm_map_enter(kernel_map, | |
449 | &src_mapping, | |
450 | PAGE_SIZE_64, | |
451 | 0, | |
452 | VM_FLAGS_ANYWHERE, | |
453 | src_object, | |
454 | offset + cur_offset, | |
455 | FALSE, | |
456 | VM_PROT_READ, | |
457 | VM_PROT_READ, | |
458 | VM_INHERIT_NONE); | |
459 | if (kr != KERN_SUCCESS) { | |
460 | vm_object_deallocate(src_object); | |
461 | retval = kr; | |
462 | goto done; | |
463 | } | |
464 | kr = vm_map_wire(kernel_map, | |
465 | src_mapping, | |
466 | src_mapping + PAGE_SIZE_64, | |
467 | VM_PROT_READ, | |
468 | FALSE); | |
469 | if (kr != KERN_SUCCESS) { | |
470 | retval = kr; | |
471 | kr = vm_map_remove(kernel_map, | |
472 | src_mapping, | |
473 | src_mapping + PAGE_SIZE_64, | |
474 | VM_MAP_NO_FLAGS); | |
475 | assert(kr == KERN_SUCCESS); | |
476 | src_mapping = 0; | |
477 | src_vaddr = 0; | |
478 | printf("apple_protect_pager_data_request: " | |
479 | "failed to resolve page fault for src " | |
480 | "object %p offset 0x%llx " | |
481 | "preempt %d error 0x%x\n", | |
482 | src_object, offset + cur_offset, | |
483 | get_preemption_level(), retval); | |
484 | goto done; | |
485 | } | |
486 | src_vaddr = CAST_DOWN(vm_offset_t, src_mapping); | |
487 | } else { | |
488 | src_vaddr = src_mapping + cur_offset; | |
489 | } | |
490 | ||
491 | /* | |
492 | * Establish an explicit pmap mapping of the destination | |
493 | * physical page. | |
494 | * We can't do a regular VM mapping because the VM page | |
495 | * is "busy". | |
496 | */ | |
497 | dst_pnum = (addr64_t) | |
498 | upl_phys_page(upl_pl, cur_offset / PAGE_SIZE); | |
499 | assert(dst_pnum != 0); | |
500 | pmap_enter(kernel_pmap, dst_mapping, dst_pnum, | |
501 | VM_PROT_READ | VM_PROT_WRITE, | |
502 | dst_object->wimg_bits & VM_WIMG_MASK, | |
503 | FALSE); | |
504 | ||
505 | /* | |
506 | * Decrypt the encrypted contents of the source page | |
507 | * into the destination page. | |
508 | */ | |
509 | dsmos_page_transform((const void *) src_vaddr, | |
510 | (void *) dst_vaddr); | |
511 | ||
512 | /* | |
513 | * Remove the pmap mapping of the destination page | |
514 | * in the kernel. | |
515 | */ | |
516 | pmap_remove(kernel_pmap, | |
517 | (addr64_t) dst_mapping, | |
518 | (addr64_t) (dst_mapping + PAGE_SIZE_64)); | |
519 | ||
520 | if (src_map_page_by_page) { | |
521 | /* | |
522 | * Remove the wired kernel mapping of the source page. | |
523 | * This releases the extra reference we took on | |
524 | * src_object. | |
525 | */ | |
526 | kr = vm_map_remove(kernel_map, | |
527 | src_mapping, | |
528 | src_mapping + PAGE_SIZE_64, | |
529 | VM_MAP_REMOVE_KUNWIRE); | |
530 | assert(kr == KERN_SUCCESS); | |
531 | src_mapping = 0; | |
532 | src_vaddr = 0; | |
533 | } | |
534 | } | |
535 | ||
536 | retval = KERN_SUCCESS; | |
537 | done: | |
538 | if (src_mapping != 0) { | |
539 | /* remove the wired mapping of the source pages */ | |
540 | kr = vm_map_remove(kernel_map, | |
541 | src_mapping, | |
542 | src_mapping + length, | |
543 | VM_MAP_REMOVE_KUNWIRE); | |
544 | assert(kr == KERN_SUCCESS); | |
545 | src_mapping = 0; | |
546 | src_vaddr = 0; | |
547 | } | |
548 | if (upl != NULL) { | |
549 | /* clean up the UPL */ | |
550 | ||
551 | /* | |
552 | * The pages are currently dirty because we've just been | |
553 | * writing on them, but as far as we're concerned, they're | |
554 | * clean since they contain their "original" contents as | |
555 | * provided by us, the pager. | |
556 | * Tell the UPL to mark them "clean". | |
557 | */ | |
558 | upl_clear_dirty(upl, TRUE); | |
559 | ||
560 | /* abort or commit the UPL */ | |
561 | if (retval != KERN_SUCCESS) { | |
562 | upl_abort(upl, 0); | |
563 | } else { | |
564 | upl_commit(upl, NULL, 0); | |
565 | } | |
566 | ||
567 | /* and deallocate the UPL */ | |
568 | upl_deallocate(upl); | |
569 | upl = NULL; | |
570 | } | |
571 | if (dst_mapping != 0) { | |
572 | /* clean up the mapping of the destination pages */ | |
573 | kr = vm_map_remove(kernel_map, | |
574 | dst_mapping, | |
575 | dst_mapping + PAGE_SIZE_64, | |
576 | VM_MAP_NO_FLAGS); | |
577 | assert(kr == KERN_SUCCESS); | |
578 | dst_mapping = 0; | |
579 | dst_vaddr = 0; | |
580 | } | |
581 | ||
582 | return retval; | |
583 | } | |
584 | ||
585 | /* | |
586 | * apple_protect_pager_reference() | |
587 | * | |
588 | * Get a reference on this memory object. | |
589 | * For external usage only. Assumes that the initial reference count is not 0, | |
590 | * i.e one should not "revive" a dead pager this way. | |
591 | */ | |
592 | void | |
593 | apple_protect_pager_reference( | |
594 | memory_object_t mem_obj) | |
595 | { | |
596 | apple_protect_pager_t pager; | |
597 | ||
598 | pager = apple_protect_pager_lookup(mem_obj); | |
599 | ||
600 | mutex_lock(&apple_protect_pager_lock); | |
601 | assert(pager->ref_count > 0); | |
602 | pager->ref_count++; | |
603 | mutex_unlock(&apple_protect_pager_lock); | |
604 | } | |
605 | ||
606 | ||
607 | /* | |
608 | * apple_protect_pager_dequeue: | |
609 | * | |
610 | * Removes a pager from the list of pagers. | |
611 | * | |
612 | * The caller must hold "apple_protect_pager_lock". | |
613 | */ | |
614 | void | |
615 | apple_protect_pager_dequeue( | |
616 | apple_protect_pager_t pager) | |
617 | { | |
618 | assert(!pager->is_mapped); | |
619 | ||
620 | queue_remove(&apple_protect_pager_queue, | |
621 | pager, | |
622 | apple_protect_pager_t, | |
623 | pager_queue); | |
624 | pager->pager_queue.next = NULL; | |
625 | pager->pager_queue.prev = NULL; | |
626 | ||
627 | apple_protect_pager_count--; | |
628 | } | |
629 | ||
630 | /* | |
631 | * apple_protect_pager_terminate_internal: | |
632 | * | |
633 | * Trigger the asynchronous termination of the memory object associated | |
634 | * with this pager. | |
635 | * When the memory object is terminated, there will be one more call | |
636 | * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate()) | |
637 | * to finish the clean up. | |
638 | * | |
639 | * "apple_protect_pager_lock" should not be held by the caller. | |
640 | * We don't need the lock because the pager has already been removed from | |
641 | * the pagers' list and is now ours exclusively. | |
642 | */ | |
643 | void | |
644 | apple_protect_pager_terminate_internal( | |
645 | apple_protect_pager_t pager) | |
646 | { | |
647 | assert(pager->is_ready); | |
648 | assert(!pager->is_mapped); | |
649 | ||
650 | if (pager->backing_object != VM_OBJECT_NULL) { | |
651 | vm_object_deallocate(pager->backing_object); | |
652 | pager->backing_object = VM_OBJECT_NULL; | |
653 | } | |
654 | ||
655 | /* trigger the destruction of the memory object */ | |
656 | memory_object_destroy(pager->pager_control, 0); | |
657 | } | |
658 | ||
659 | /* | |
660 | * apple_protect_pager_deallocate_internal() | |
661 | * | |
662 | * Release a reference on this pager and free it when the last | |
663 | * reference goes away. | |
664 | * Can be called with apple_protect_pager_lock held or not but always returns | |
665 | * with it unlocked. | |
666 | */ | |
667 | void | |
668 | apple_protect_pager_deallocate_internal( | |
669 | apple_protect_pager_t pager, | |
670 | boolean_t locked) | |
671 | { | |
672 | boolean_t needs_trimming; | |
673 | int count_unmapped; | |
674 | ||
675 | if (! locked) { | |
676 | mutex_lock(&apple_protect_pager_lock); | |
677 | } | |
678 | ||
679 | count_unmapped = (apple_protect_pager_count - | |
680 | apple_protect_pager_count_mapped); | |
681 | if (count_unmapped > apple_protect_pager_cache_limit) { | |
682 | /* we have too many unmapped pagers: trim some */ | |
683 | needs_trimming = TRUE; | |
684 | } else { | |
685 | needs_trimming = FALSE; | |
686 | } | |
687 | ||
688 | /* drop a reference on this pager */ | |
689 | pager->ref_count--; | |
690 | ||
691 | if (pager->ref_count == 1) { | |
692 | /* | |
693 | * Only the "named" reference is left, which means that | |
694 | * no one is realy holding on to this pager anymore. | |
695 | * Terminate it. | |
696 | */ | |
697 | apple_protect_pager_dequeue(pager); | |
698 | /* the pager is all ours: no need for the lock now */ | |
699 | mutex_unlock(&apple_protect_pager_lock); | |
700 | apple_protect_pager_terminate_internal(pager); | |
701 | } else if (pager->ref_count == 0) { | |
702 | /* | |
703 | * Dropped the existence reference; the memory object has | |
704 | * been terminated. Do some final cleanup and release the | |
705 | * pager structure. | |
706 | */ | |
707 | mutex_unlock(&apple_protect_pager_lock); | |
708 | if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) { | |
709 | memory_object_control_deallocate(pager->pager_control); | |
710 | pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; | |
711 | } | |
712 | kfree(pager, sizeof (*pager)); | |
713 | pager = APPLE_PROTECT_PAGER_NULL; | |
714 | } else { | |
715 | /* there are still plenty of references: keep going... */ | |
716 | mutex_unlock(&apple_protect_pager_lock); | |
717 | } | |
718 | ||
719 | if (needs_trimming) { | |
720 | apple_protect_pager_trim(); | |
721 | } | |
722 | /* caution: lock is not held on return... */ | |
723 | } | |
724 | ||
725 | /* | |
726 | * apple_protect_pager_deallocate() | |
727 | * | |
728 | * Release a reference on this pager and free it when the last | |
729 | * reference goes away. | |
730 | */ | |
731 | void | |
732 | apple_protect_pager_deallocate( | |
733 | memory_object_t mem_obj) | |
734 | { | |
735 | apple_protect_pager_t pager; | |
736 | ||
737 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %x\n", mem_obj)); | |
738 | pager = apple_protect_pager_lookup(mem_obj); | |
739 | apple_protect_pager_deallocate_internal(pager, FALSE); | |
740 | } | |
741 | ||
742 | /* | |
743 | * | |
744 | */ | |
745 | kern_return_t | |
746 | apple_protect_pager_terminate( | |
747 | #if !DEBUG | |
748 | __unused | |
749 | #endif | |
750 | memory_object_t mem_obj) | |
751 | { | |
752 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %x\n", mem_obj)); | |
753 | ||
754 | return KERN_SUCCESS; | |
755 | } | |
756 | ||
757 | /* | |
758 | * | |
759 | */ | |
760 | kern_return_t | |
761 | apple_protect_pager_synchronize( | |
762 | memory_object_t mem_obj, | |
763 | memory_object_offset_t offset, | |
764 | vm_size_t length, | |
765 | __unused vm_sync_t sync_flags) | |
766 | { | |
767 | apple_protect_pager_t pager; | |
768 | ||
769 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_synchronize: %x\n", mem_obj)); | |
770 | ||
771 | pager = apple_protect_pager_lookup(mem_obj); | |
772 | ||
773 | memory_object_synchronize_completed(pager->pager_control, | |
774 | offset, length); | |
775 | ||
776 | return KERN_SUCCESS; | |
777 | } | |
778 | ||
779 | /* | |
780 | * apple_protect_pager_map() | |
781 | * | |
782 | * This allows VM to let us, the EMM, know that this memory object | |
783 | * is currently mapped one or more times. This is called by VM only the first | |
784 | * time the memory object gets mapped and we take one extra reference on the | |
785 | * memory object to account for all its mappings. | |
786 | */ | |
787 | void | |
788 | apple_protect_pager_map( | |
789 | memory_object_t mem_obj) | |
790 | { | |
791 | apple_protect_pager_t pager; | |
792 | ||
793 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %x\n", mem_obj)); | |
794 | ||
795 | pager = apple_protect_pager_lookup(mem_obj); | |
796 | ||
797 | mutex_lock(&apple_protect_pager_lock); | |
798 | assert(pager->is_ready); | |
799 | assert(pager->ref_count > 0); /* pager is alive */ | |
800 | if (pager->is_mapped == FALSE) { | |
801 | /* | |
802 | * First mapping of this pager: take an extra reference | |
803 | * that will remain until all the mappings of this pager | |
804 | * are removed. | |
805 | */ | |
806 | pager->is_mapped = TRUE; | |
807 | pager->ref_count++; | |
808 | apple_protect_pager_count_mapped++; | |
809 | } | |
810 | mutex_unlock(&apple_protect_pager_lock); | |
811 | } | |
812 | ||
813 | /* | |
814 | * apple_protect_pager_unmap() | |
815 | * | |
816 | * This is called by VM when this memory object is no longer mapped anywhere. | |
817 | */ | |
818 | kern_return_t | |
819 | apple_protect_pager_unmap( | |
820 | memory_object_t mem_obj) | |
821 | { | |
822 | apple_protect_pager_t pager; | |
823 | int count_unmapped; | |
824 | ||
825 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_unmap: %x\n", mem_obj)); | |
826 | ||
827 | pager = apple_protect_pager_lookup(mem_obj); | |
828 | ||
829 | mutex_lock(&apple_protect_pager_lock); | |
830 | if (pager->is_mapped) { | |
831 | /* | |
832 | * All the mappings are gone, so let go of the one extra | |
833 | * reference that represents all the mappings of this pager. | |
834 | */ | |
835 | apple_protect_pager_count_mapped--; | |
836 | count_unmapped = (apple_protect_pager_count - | |
837 | apple_protect_pager_count_mapped); | |
838 | if (count_unmapped > apple_protect_pager_count_unmapped_max) { | |
839 | apple_protect_pager_count_unmapped_max = count_unmapped; | |
840 | } | |
841 | pager->is_mapped = FALSE; | |
842 | apple_protect_pager_deallocate_internal(pager, TRUE); | |
843 | /* caution: deallocate_internal() released the lock ! */ | |
844 | } else { | |
845 | mutex_unlock(&apple_protect_pager_lock); | |
846 | } | |
847 | ||
848 | return KERN_SUCCESS; | |
849 | } | |
850 | ||
851 | ||
852 | /* | |
853 | * | |
854 | */ | |
855 | apple_protect_pager_t | |
856 | apple_protect_pager_lookup( | |
857 | memory_object_t mem_obj) | |
858 | { | |
859 | apple_protect_pager_t pager; | |
860 | ||
861 | pager = (apple_protect_pager_t) mem_obj; | |
862 | assert(pager->pager_ops == &apple_protect_pager_ops); | |
863 | assert(pager->ref_count > 0); | |
864 | return pager; | |
865 | } | |
866 | ||
867 | apple_protect_pager_t | |
868 | apple_protect_pager_create( | |
869 | vm_object_t backing_object) | |
870 | { | |
871 | apple_protect_pager_t pager, pager2; | |
872 | memory_object_control_t control; | |
873 | kern_return_t kr; | |
874 | ||
875 | pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); | |
876 | if (pager == APPLE_PROTECT_PAGER_NULL) { | |
877 | return APPLE_PROTECT_PAGER_NULL; | |
878 | } | |
879 | ||
880 | /* | |
881 | * The vm_map call takes both named entry ports and raw memory | |
882 | * objects in the same parameter. We need to make sure that | |
883 | * vm_map does not see this object as a named entry port. So, | |
884 | * we reserve the second word in the object for a fake ip_kotype | |
885 | * setting - that will tell vm_map to use it as a memory object. | |
886 | */ | |
887 | pager->pager_ops = &apple_protect_pager_ops; | |
888 | pager->pager_ikot = IKOT_MEMORY_OBJECT; | |
889 | pager->is_ready = FALSE;/* not ready until it has a "name" */ | |
890 | pager->ref_count = 2; /* existence + setup reference */ | |
891 | pager->is_mapped = FALSE; | |
892 | pager->pager_control = MEMORY_OBJECT_CONTROL_NULL; | |
893 | pager->backing_object = backing_object; | |
894 | vm_object_reference(backing_object); | |
895 | ||
896 | mutex_lock(&apple_protect_pager_lock); | |
897 | /* see if anyone raced us to create a pager for the same object */ | |
898 | queue_iterate(&apple_protect_pager_queue, | |
899 | pager2, | |
900 | apple_protect_pager_t, | |
901 | pager_queue) { | |
902 | if (pager2->backing_object == backing_object) { | |
903 | break; | |
904 | } | |
905 | } | |
906 | if (! queue_end(&apple_protect_pager_queue, | |
907 | (queue_entry_t) pager2)) { | |
908 | /* while we hold the lock, transfer our setup ref to winner */ | |
909 | pager2->ref_count++; | |
910 | /* we lost the race, down with the loser... */ | |
911 | mutex_unlock(&apple_protect_pager_lock); | |
912 | vm_object_deallocate(pager->backing_object); | |
913 | pager->backing_object = VM_OBJECT_NULL; | |
914 | kfree(pager, sizeof (*pager)); | |
915 | /* ... and go with the winner */ | |
916 | pager = pager2; | |
917 | /* let the winner make sure the pager gets ready */ | |
918 | return pager; | |
919 | } | |
920 | ||
921 | /* enter new pager at the head of our list of pagers */ | |
922 | queue_enter_first(&apple_protect_pager_queue, | |
923 | pager, | |
924 | apple_protect_pager_t, | |
925 | pager_queue); | |
926 | apple_protect_pager_count++; | |
927 | if (apple_protect_pager_count > apple_protect_pager_count_max) { | |
928 | apple_protect_pager_count_max = apple_protect_pager_count; | |
929 | } | |
930 | mutex_unlock(&apple_protect_pager_lock); | |
931 | ||
932 | kr = memory_object_create_named((memory_object_t) pager, | |
933 | 0, | |
934 | &control); | |
935 | assert(kr == KERN_SUCCESS); | |
936 | ||
937 | mutex_lock(&apple_protect_pager_lock); | |
938 | /* the new pager is now ready to be used */ | |
939 | pager->is_ready = TRUE; | |
940 | mutex_unlock(&apple_protect_pager_lock); | |
941 | ||
942 | /* wakeup anyone waiting for this pager to be ready */ | |
943 | thread_wakeup(&pager->is_ready); | |
944 | ||
945 | return pager; | |
946 | } | |
947 | ||
948 | /* | |
949 | * apple_protect_pager_setup() | |
950 | * | |
951 | * Provide the caller with a memory object backed by the provided | |
952 | * "backing_object" VM object. If such a memory object already exists, | |
953 | * re-use it, otherwise create a new memory object. | |
954 | */ | |
955 | memory_object_t | |
956 | apple_protect_pager_setup( | |
957 | vm_object_t backing_object) | |
958 | { | |
959 | apple_protect_pager_t pager; | |
960 | ||
961 | mutex_lock(&apple_protect_pager_lock); | |
962 | ||
963 | queue_iterate(&apple_protect_pager_queue, | |
964 | pager, | |
965 | apple_protect_pager_t, | |
966 | pager_queue) { | |
967 | if (pager->backing_object == backing_object) { | |
968 | break; | |
969 | } | |
970 | } | |
971 | if (queue_end(&apple_protect_pager_queue, | |
972 | (queue_entry_t) pager)) { | |
973 | /* no existing pager for this backing object */ | |
974 | pager = APPLE_PROTECT_PAGER_NULL; | |
975 | } else { | |
976 | /* make sure pager doesn't disappear */ | |
977 | pager->ref_count++; | |
978 | } | |
979 | ||
980 | mutex_unlock(&apple_protect_pager_lock); | |
981 | ||
982 | if (pager == APPLE_PROTECT_PAGER_NULL) { | |
983 | pager = apple_protect_pager_create(backing_object); | |
984 | if (pager == APPLE_PROTECT_PAGER_NULL) { | |
985 | return MEMORY_OBJECT_NULL; | |
986 | } | |
987 | } | |
988 | ||
989 | mutex_lock(&apple_protect_pager_lock); | |
990 | while (!pager->is_ready) { | |
991 | thread_sleep_mutex(&pager->is_ready, | |
992 | &apple_protect_pager_lock, | |
993 | THREAD_UNINT); | |
994 | } | |
995 | mutex_unlock(&apple_protect_pager_lock); | |
996 | ||
997 | return (memory_object_t) pager; | |
998 | } | |
999 | ||
1000 | void | |
1001 | apple_protect_pager_trim(void) | |
1002 | { | |
1003 | apple_protect_pager_t pager, prev_pager; | |
1004 | queue_head_t trim_queue; | |
1005 | int num_trim; | |
1006 | int count_unmapped; | |
1007 | ||
1008 | mutex_lock(&apple_protect_pager_lock); | |
1009 | ||
1010 | /* | |
1011 | * We have too many pagers, try and trim some unused ones, | |
1012 | * starting with the oldest pager at the end of the queue. | |
1013 | */ | |
1014 | queue_init(&trim_queue); | |
1015 | num_trim = 0; | |
1016 | ||
1017 | for (pager = (apple_protect_pager_t) | |
1018 | queue_last(&apple_protect_pager_queue); | |
1019 | !queue_end(&apple_protect_pager_queue, | |
1020 | (queue_entry_t) pager); | |
1021 | pager = prev_pager) { | |
1022 | /* get prev elt before we dequeue */ | |
1023 | prev_pager = (apple_protect_pager_t) | |
1024 | queue_prev(&pager->pager_queue); | |
1025 | ||
1026 | if (pager->ref_count == 2 && | |
1027 | pager->is_ready && | |
1028 | !pager->is_mapped) { | |
1029 | /* this pager can be trimmed */ | |
1030 | num_trim++; | |
1031 | /* remove this pager from the main list ... */ | |
1032 | apple_protect_pager_dequeue(pager); | |
1033 | /* ... and add it to our trim queue */ | |
1034 | queue_enter_first(&trim_queue, | |
1035 | pager, | |
1036 | apple_protect_pager_t, | |
1037 | pager_queue); | |
1038 | ||
1039 | count_unmapped = (apple_protect_pager_count - | |
1040 | apple_protect_pager_count_mapped); | |
1041 | if (count_unmapped <= apple_protect_pager_cache_limit) { | |
1042 | /* we have enough pagers to trim */ | |
1043 | break; | |
1044 | } | |
1045 | } | |
1046 | } | |
1047 | if (num_trim > apple_protect_pager_num_trim_max) { | |
1048 | apple_protect_pager_num_trim_max = num_trim; | |
1049 | } | |
1050 | apple_protect_pager_num_trim_total += num_trim; | |
1051 | ||
1052 | mutex_unlock(&apple_protect_pager_lock); | |
1053 | ||
1054 | /* terminate the trimmed pagers */ | |
1055 | while (!queue_empty(&trim_queue)) { | |
1056 | queue_remove_first(&trim_queue, | |
1057 | pager, | |
1058 | apple_protect_pager_t, | |
1059 | pager_queue); | |
1060 | pager->pager_queue.next = NULL; | |
1061 | pager->pager_queue.prev = NULL; | |
1062 | assert(pager->ref_count == 2); | |
1063 | /* | |
1064 | * We can't call deallocate_internal() because the pager | |
1065 | * has already been dequeued, but we still need to remove | |
1066 | * a reference. | |
1067 | */ | |
1068 | pager->ref_count--; | |
1069 | apple_protect_pager_terminate_internal(pager); | |
1070 | } | |
1071 | } |