]>
Commit | Line | Data |
---|---|---|
0c530ab8 A |
1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0c530ab8 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0c530ab8 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0c530ab8 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | ||
29 | #include <sys/errno.h> | |
30 | ||
31 | #include <mach/mach_types.h> | |
32 | #include <mach/mach_traps.h> | |
33 | #include <mach/host_priv.h> | |
34 | #include <mach/kern_return.h> | |
35 | #include <mach/memory_object_control.h> | |
36 | #include <mach/memory_object_types.h> | |
37 | #include <mach/port.h> | |
38 | #include <mach/policy.h> | |
39 | #include <mach/upl.h> | |
40 | #include <mach/thread_act.h> | |
41 | #include <mach/mach_vm.h> | |
42 | ||
43 | #include <kern/host.h> | |
44 | #include <kern/kalloc.h> | |
45 | #include <kern/page_decrypt.h> | |
46 | #include <kern/queue.h> | |
47 | #include <kern/thread.h> | |
39037602 | 48 | #include <kern/ipc_kobject.h> |
0c530ab8 A |
49 | |
50 | #include <ipc/ipc_port.h> | |
51 | #include <ipc/ipc_space.h> | |
52 | ||
2d21ac55 | 53 | #include <vm/vm_fault.h> |
0c530ab8 A |
54 | #include <vm/vm_map.h> |
55 | #include <vm/vm_pageout.h> | |
56 | #include <vm/memory_object.h> | |
57 | #include <vm/vm_pageout.h> | |
58 | #include <vm/vm_protos.h> | |
39037602 | 59 | #include <vm/vm_kern.h> |
0c530ab8 A |
60 | |
61 | ||
62 | /* | |
63 | * APPLE PROTECT MEMORY PAGER | |
64 | * | |
65 | * This external memory manager (EMM) handles memory from the encrypted | |
66 | * sections of some executables protected by the DSMOS kernel extension. | |
67 | * | |
68 | * It mostly handles page-in requests (from memory_object_data_request()) by | |
69 | * getting the encrypted data from its backing VM object, itself backed by | |
70 | * the encrypted file, decrypting it and providing it to VM. | |
71 | * | |
72 | * The decrypted pages will never be dirtied, so the memory manager doesn't | |
73 | * need to handle page-out requests (from memory_object_data_return()). The | |
74 | * pages need to be mapped copy-on-write, so that the originals stay clean. | |
75 | * | |
76 | * We don't expect to have to handle a large number of apple-protected | |
77 | * binaries, so the data structures are very simple (simple linked list) | |
78 | * for now. | |
79 | */ | |
80 | ||
81 | /* forward declarations */ | |
82 | void apple_protect_pager_reference(memory_object_t mem_obj); | |
83 | void apple_protect_pager_deallocate(memory_object_t mem_obj); | |
84 | kern_return_t apple_protect_pager_init(memory_object_t mem_obj, | |
85 | memory_object_control_t control, | |
b0d623f7 | 86 | memory_object_cluster_size_t pg_size); |
0c530ab8 A |
87 | kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); |
88 | kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, | |
89 | memory_object_offset_t offset, | |
b0d623f7 | 90 | memory_object_cluster_size_t length, |
2d21ac55 A |
91 | vm_prot_t protection_required, |
92 | memory_object_fault_info_t fault_info); | |
0c530ab8 A |
93 | kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, |
94 | memory_object_offset_t offset, | |
b0d623f7 | 95 | memory_object_cluster_size_t data_cnt, |
0c530ab8 A |
96 | memory_object_offset_t *resid_offset, |
97 | int *io_error, | |
98 | boolean_t dirty, | |
99 | boolean_t kernel_copy, | |
100 | int upl_flags); | |
101 | kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, | |
102 | memory_object_offset_t offset, | |
b0d623f7 | 103 | memory_object_cluster_size_t data_cnt); |
0c530ab8 A |
104 | kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, |
105 | memory_object_offset_t offset, | |
b0d623f7 | 106 | memory_object_size_t size, |
0c530ab8 A |
107 | vm_prot_t desired_access); |
108 | kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, | |
109 | memory_object_offset_t offset, | |
b0d623f7 | 110 | memory_object_size_t length, |
0c530ab8 | 111 | vm_sync_t sync_flags); |
593a1d5f A |
112 | kern_return_t apple_protect_pager_map(memory_object_t mem_obj, |
113 | vm_prot_t prot); | |
114 | kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); | |
0c530ab8 | 115 | |
3e170ce0 A |
116 | #define CRYPT_INFO_DEBUG 0 |
117 | void crypt_info_reference(struct pager_crypt_info *crypt_info); | |
118 | void crypt_info_deallocate(struct pager_crypt_info *crypt_info); | |
119 | ||
0c530ab8 A |
120 | /* |
121 | * Vector of VM operations for this EMM. | |
122 | * These routines are invoked by VM via the memory_object_*() interfaces. | |
123 | */ | |
124 | const struct memory_object_pager_ops apple_protect_pager_ops = { | |
125 | apple_protect_pager_reference, | |
126 | apple_protect_pager_deallocate, | |
127 | apple_protect_pager_init, | |
128 | apple_protect_pager_terminate, | |
129 | apple_protect_pager_data_request, | |
130 | apple_protect_pager_data_return, | |
131 | apple_protect_pager_data_initialize, | |
132 | apple_protect_pager_data_unlock, | |
133 | apple_protect_pager_synchronize, | |
593a1d5f A |
134 | apple_protect_pager_map, |
135 | apple_protect_pager_last_unmap, | |
6d2010ae | 136 | NULL, /* data_reclaim */ |
3e170ce0 | 137 | "apple_protect" |
0c530ab8 A |
138 | }; |
139 | ||
140 | /* | |
141 | * The "apple_protect_pager" describes a memory object backed by | |
142 | * the "apple protect" EMM. | |
143 | */ | |
144 | typedef struct apple_protect_pager { | |
5ba3f43e A |
145 | /* mandatory generic header */ |
146 | struct memory_object ap_pgr_hdr; | |
147 | ||
148 | /* pager-specific data */ | |
0c530ab8 A |
149 | queue_chain_t pager_queue; /* next & prev pagers */ |
150 | unsigned int ref_count; /* reference count */ | |
151 | boolean_t is_ready; /* is this pager ready ? */ | |
152 | boolean_t is_mapped; /* is this mem_obj mapped ? */ | |
0c530ab8 | 153 | vm_object_t backing_object; /* VM obj w/ encrypted data */ |
3e170ce0 A |
154 | vm_object_offset_t backing_offset; |
155 | vm_object_offset_t crypto_backing_offset; /* for key... */ | |
156 | vm_object_offset_t crypto_start; | |
157 | vm_object_offset_t crypto_end; | |
158 | struct pager_crypt_info *crypt_info; | |
0c530ab8 A |
159 | } *apple_protect_pager_t; |
160 | #define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) | |
161 | ||
162 | /* | |
163 | * List of memory objects managed by this EMM. | |
164 | * The list is protected by the "apple_protect_pager_lock" lock. | |
165 | */ | |
166 | int apple_protect_pager_count = 0; /* number of pagers */ | |
167 | int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ | |
168 | queue_head_t apple_protect_pager_queue; | |
b0d623f7 | 169 | decl_lck_mtx_data(,apple_protect_pager_lock) |
0c530ab8 A |
170 | |
171 | /* | |
172 | * Maximum number of unmapped pagers we're willing to keep around. | |
173 | */ | |
490019cf | 174 | int apple_protect_pager_cache_limit = 20; |
0c530ab8 A |
175 | |
176 | /* | |
177 | * Statistics & counters. | |
178 | */ | |
179 | int apple_protect_pager_count_max = 0; | |
180 | int apple_protect_pager_count_unmapped_max = 0; | |
181 | int apple_protect_pager_num_trim_max = 0; | |
182 | int apple_protect_pager_num_trim_total = 0; | |
183 | ||
b0d623f7 A |
184 | |
185 | lck_grp_t apple_protect_pager_lck_grp; | |
186 | lck_grp_attr_t apple_protect_pager_lck_grp_attr; | |
187 | lck_attr_t apple_protect_pager_lck_attr; | |
188 | ||
189 | ||
0c530ab8 | 190 | /* internal prototypes */ |
3e170ce0 A |
191 | apple_protect_pager_t apple_protect_pager_create( |
192 | vm_object_t backing_object, | |
193 | vm_object_offset_t backing_offset, | |
194 | vm_object_offset_t crypto_backing_offset, | |
195 | struct pager_crypt_info *crypt_info, | |
196 | vm_object_offset_t crypto_start, | |
197 | vm_object_offset_t crypto_end); | |
0c530ab8 A |
198 | apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); |
199 | void apple_protect_pager_dequeue(apple_protect_pager_t pager); | |
200 | void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, | |
201 | boolean_t locked); | |
202 | void apple_protect_pager_terminate_internal(apple_protect_pager_t pager); | |
203 | void apple_protect_pager_trim(void); | |
204 | ||
205 | ||
206 | #if DEBUG | |
207 | int apple_protect_pagerdebug = 0; | |
208 | #define PAGER_ALL 0xffffffff | |
209 | #define PAGER_INIT 0x00000001 | |
210 | #define PAGER_PAGEIN 0x00000002 | |
211 | ||
212 | #define PAGER_DEBUG(LEVEL, A) \ | |
213 | MACRO_BEGIN \ | |
214 | if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \ | |
215 | printf A; \ | |
216 | } \ | |
217 | MACRO_END | |
218 | #else | |
219 | #define PAGER_DEBUG(LEVEL, A) | |
220 | #endif | |
221 | ||
222 | ||
223 | void | |
224 | apple_protect_pager_bootstrap(void) | |
225 | { | |
b0d623f7 A |
226 | lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr); |
227 | lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr); | |
228 | lck_attr_setdefault(&apple_protect_pager_lck_attr); | |
229 | lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr); | |
0c530ab8 A |
230 | queue_init(&apple_protect_pager_queue); |
231 | } | |
232 | ||
233 | /* | |
234 | * apple_protect_pager_init() | |
235 | * | |
236 | * Initialize the memory object and makes it ready to be used and mapped. | |
237 | */ | |
238 | kern_return_t | |
239 | apple_protect_pager_init( | |
240 | memory_object_t mem_obj, | |
241 | memory_object_control_t control, | |
242 | #if !DEBUG | |
243 | __unused | |
244 | #endif | |
b0d623f7 | 245 | memory_object_cluster_size_t pg_size) |
0c530ab8 A |
246 | { |
247 | apple_protect_pager_t pager; | |
248 | kern_return_t kr; | |
249 | memory_object_attr_info_data_t attributes; | |
250 | ||
251 | PAGER_DEBUG(PAGER_ALL, | |
252 | ("apple_protect_pager_init: %p, %p, %x\n", | |
253 | mem_obj, control, pg_size)); | |
254 | ||
255 | if (control == MEMORY_OBJECT_CONTROL_NULL) | |
256 | return KERN_INVALID_ARGUMENT; | |
257 | ||
258 | pager = apple_protect_pager_lookup(mem_obj); | |
259 | ||
260 | memory_object_control_reference(control); | |
261 | ||
5ba3f43e | 262 | pager->ap_pgr_hdr.mo_control = control; |
0c530ab8 A |
263 | |
264 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
265 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ | |
266 | attributes.cluster_size = (1 << (PAGE_SHIFT)); | |
267 | attributes.may_cache_object = FALSE; | |
268 | attributes.temporary = TRUE; | |
269 | ||
270 | kr = memory_object_change_attributes( | |
271 | control, | |
272 | MEMORY_OBJECT_ATTRIBUTE_INFO, | |
273 | (memory_object_info_t) &attributes, | |
274 | MEMORY_OBJECT_ATTR_INFO_COUNT); | |
275 | if (kr != KERN_SUCCESS) | |
276 | panic("apple_protect_pager_init: " | |
277 | "memory_object_change_attributes() failed"); | |
278 | ||
39037602 A |
279 | #if CONFIG_SECLUDED_MEMORY |
280 | if (secluded_for_filecache) { | |
281 | memory_object_mark_eligible_for_secluded(control, TRUE); | |
282 | } | |
283 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
284 | ||
0c530ab8 A |
285 | return KERN_SUCCESS; |
286 | } | |
287 | ||
288 | /* | |
289 | * apple_protect_data_return() | |
290 | * | |
291 | * Handles page-out requests from VM. This should never happen since | |
292 | * the pages provided by this EMM are not supposed to be dirty or dirtied | |
293 | * and VM should simply discard the contents and reclaim the pages if it | |
294 | * needs to. | |
295 | */ | |
296 | kern_return_t | |
297 | apple_protect_pager_data_return( | |
298 | __unused memory_object_t mem_obj, | |
299 | __unused memory_object_offset_t offset, | |
b0d623f7 | 300 | __unused memory_object_cluster_size_t data_cnt, |
0c530ab8 A |
301 | __unused memory_object_offset_t *resid_offset, |
302 | __unused int *io_error, | |
303 | __unused boolean_t dirty, | |
304 | __unused boolean_t kernel_copy, | |
305 | __unused int upl_flags) | |
306 | { | |
307 | panic("apple_protect_pager_data_return: should never get called"); | |
308 | return KERN_FAILURE; | |
309 | } | |
310 | ||
311 | kern_return_t | |
312 | apple_protect_pager_data_initialize( | |
313 | __unused memory_object_t mem_obj, | |
314 | __unused memory_object_offset_t offset, | |
b0d623f7 | 315 | __unused memory_object_cluster_size_t data_cnt) |
0c530ab8 A |
316 | { |
317 | panic("apple_protect_pager_data_initialize: should never get called"); | |
318 | return KERN_FAILURE; | |
319 | } | |
320 | ||
321 | kern_return_t | |
322 | apple_protect_pager_data_unlock( | |
323 | __unused memory_object_t mem_obj, | |
324 | __unused memory_object_offset_t offset, | |
b0d623f7 | 325 | __unused memory_object_size_t size, |
0c530ab8 A |
326 | __unused vm_prot_t desired_access) |
327 | { | |
328 | return KERN_FAILURE; | |
329 | } | |
330 | ||
331 | /* | |
332 | * apple_protect_pager_data_request() | |
333 | * | |
334 | * Handles page-in requests from VM. | |
335 | */ | |
3e170ce0 | 336 | int apple_protect_pager_data_request_debug = 0; |
0c530ab8 A |
337 | kern_return_t |
338 | apple_protect_pager_data_request( | |
339 | memory_object_t mem_obj, | |
340 | memory_object_offset_t offset, | |
b0d623f7 | 341 | memory_object_cluster_size_t length, |
0c530ab8 A |
342 | #if !DEBUG |
343 | __unused | |
344 | #endif | |
2d21ac55 A |
345 | vm_prot_t protection_required, |
346 | memory_object_fault_info_t mo_fault_info) | |
0c530ab8 A |
347 | { |
348 | apple_protect_pager_t pager; | |
349 | memory_object_control_t mo_control; | |
2d21ac55 | 350 | upl_t upl; |
0c530ab8 A |
351 | int upl_flags; |
352 | upl_size_t upl_size; | |
b0d623f7 | 353 | upl_page_info_t *upl_pl; |
593a1d5f | 354 | unsigned int pl_count; |
39037602 | 355 | vm_object_t src_top_object, src_page_object, dst_object; |
0c530ab8 | 356 | kern_return_t kr, retval; |
0c530ab8 A |
357 | vm_offset_t src_vaddr, dst_vaddr; |
358 | vm_offset_t cur_offset; | |
fe8ab488 | 359 | vm_offset_t offset_in_page; |
2d21ac55 A |
360 | kern_return_t error_code; |
361 | vm_prot_t prot; | |
362 | vm_page_t src_page, top_page; | |
363 | int interruptible; | |
b0d623f7 A |
364 | struct vm_object_fault_info fault_info; |
365 | int ret; | |
2d21ac55 A |
366 | |
367 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); | |
0c530ab8 | 368 | |
b0d623f7 | 369 | retval = KERN_SUCCESS; |
39037602 A |
370 | src_top_object = VM_OBJECT_NULL; |
371 | src_page_object = VM_OBJECT_NULL; | |
2d21ac55 | 372 | upl = NULL; |
593a1d5f | 373 | upl_pl = NULL; |
d9a64523 | 374 | fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info); |
b0d623f7 | 375 | fault_info.stealth = TRUE; |
6d2010ae | 376 | fault_info.io_sync = FALSE; |
0b4c1975 | 377 | fault_info.mark_zf_absent = FALSE; |
316670eb | 378 | fault_info.batch_pmap_op = FALSE; |
b0d623f7 | 379 | interruptible = fault_info.interruptible; |
0c530ab8 A |
380 | |
381 | pager = apple_protect_pager_lookup(mem_obj); | |
382 | assert(pager->is_ready); | |
383 | assert(pager->ref_count > 1); /* pager is alive and mapped */ | |
384 | ||
2d21ac55 | 385 | PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); |
0c530ab8 | 386 | |
d9a64523 A |
387 | fault_info.lo_offset += pager->backing_offset; |
388 | fault_info.hi_offset += pager->backing_offset; | |
389 | ||
0c530ab8 A |
390 | /* |
391 | * Gather in a UPL all the VM pages requested by VM. | |
392 | */ | |
5ba3f43e | 393 | mo_control = pager->ap_pgr_hdr.mo_control; |
0c530ab8 A |
394 | |
395 | upl_size = length; | |
396 | upl_flags = | |
397 | UPL_RET_ONLY_ABSENT | | |
398 | UPL_SET_LITE | | |
399 | UPL_NO_SYNC | | |
400 | UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ | |
401 | UPL_SET_INTERNAL; | |
593a1d5f | 402 | pl_count = 0; |
0c530ab8 A |
403 | kr = memory_object_upl_request(mo_control, |
404 | offset, upl_size, | |
5ba3f43e | 405 | &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); |
0c530ab8 A |
406 | if (kr != KERN_SUCCESS) { |
407 | retval = kr; | |
408 | goto done; | |
409 | } | |
2d21ac55 A |
410 | dst_object = mo_control->moc_object; |
411 | assert(dst_object != VM_OBJECT_NULL); | |
412 | ||
2d21ac55 A |
413 | /* |
414 | * We'll map the encrypted data in the kernel address space from the | |
415 | * backing VM object (itself backed by the encrypted file via | |
416 | * the vnode pager). | |
417 | */ | |
39037602 A |
418 | src_top_object = pager->backing_object; |
419 | assert(src_top_object != VM_OBJECT_NULL); | |
420 | vm_object_reference(src_top_object); /* keep the source object alive */ | |
0c530ab8 A |
421 | |
422 | /* | |
423 | * Fill in the contents of the pages requested by VM. | |
424 | */ | |
425 | upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); | |
593a1d5f | 426 | pl_count = length / PAGE_SIZE; |
b0d623f7 A |
427 | for (cur_offset = 0; |
428 | retval == KERN_SUCCESS && cur_offset < length; | |
429 | cur_offset += PAGE_SIZE) { | |
0c530ab8 A |
430 | ppnum_t dst_pnum; |
431 | ||
b0d623f7 | 432 | if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { |
0c530ab8 A |
433 | /* this page is not in the UPL: skip it */ |
434 | continue; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Map the source (encrypted) page in the kernel's | |
439 | * virtual address space. | |
39037602 | 440 | * We already hold a reference on the src_top_object. |
0c530ab8 | 441 | */ |
2d21ac55 | 442 | retry_src_fault: |
39037602 A |
443 | vm_object_lock(src_top_object); |
444 | vm_object_paging_begin(src_top_object); | |
2d21ac55 A |
445 | error_code = 0; |
446 | prot = VM_PROT_READ; | |
39236c6e | 447 | src_page = VM_PAGE_NULL; |
39037602 | 448 | kr = vm_fault_page(src_top_object, |
3e170ce0 | 449 | pager->backing_offset + offset + cur_offset, |
2d21ac55 A |
450 | VM_PROT_READ, |
451 | FALSE, | |
39236c6e | 452 | FALSE, /* src_page not looked up */ |
2d21ac55 A |
453 | &prot, |
454 | &src_page, | |
455 | &top_page, | |
b0d623f7 | 456 | NULL, |
2d21ac55 A |
457 | &error_code, |
458 | FALSE, | |
459 | FALSE, | |
b0d623f7 | 460 | &fault_info); |
2d21ac55 A |
461 | switch (kr) { |
462 | case VM_FAULT_SUCCESS: | |
463 | break; | |
464 | case VM_FAULT_RETRY: | |
465 | goto retry_src_fault; | |
466 | case VM_FAULT_MEMORY_SHORTAGE: | |
467 | if (vm_page_wait(interruptible)) { | |
468 | goto retry_src_fault; | |
0c530ab8 | 469 | } |
2d21ac55 A |
470 | /* fall thru */ |
471 | case VM_FAULT_INTERRUPTED: | |
472 | retval = MACH_SEND_INTERRUPTED; | |
473 | goto done; | |
b0d623f7 A |
474 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
475 | /* success but no VM page: fail */ | |
39037602 A |
476 | vm_object_paging_end(src_top_object); |
477 | vm_object_unlock(src_top_object); | |
b0d623f7 | 478 | /*FALLTHROUGH*/ |
2d21ac55 A |
479 | case VM_FAULT_MEMORY_ERROR: |
480 | /* the page is not there ! */ | |
481 | if (error_code) { | |
482 | retval = error_code; | |
483 | } else { | |
484 | retval = KERN_MEMORY_ERROR; | |
0c530ab8 | 485 | } |
2d21ac55 A |
486 | goto done; |
487 | default: | |
b0d623f7 A |
488 | panic("apple_protect_pager_data_request: " |
489 | "vm_fault_page() unexpected error 0x%x\n", | |
490 | kr); | |
0c530ab8 | 491 | } |
2d21ac55 | 492 | assert(src_page != VM_PAGE_NULL); |
d9a64523 | 493 | assert(src_page->vmp_busy); |
b0d623f7 | 494 | |
d9a64523 | 495 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
39037602 | 496 | |
b0d623f7 | 497 | vm_page_lockspin_queues(); |
39037602 | 498 | |
d9a64523 A |
499 | if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { |
500 | vm_page_speculate(src_page, FALSE); | |
b0d623f7 A |
501 | } |
502 | vm_page_unlock_queues(); | |
503 | } | |
3e170ce0 | 504 | |
2d21ac55 | 505 | /* |
d9a64523 A |
506 | * Establish pointers to the source |
507 | * and destination physical pages. | |
2d21ac55 | 508 | */ |
d9a64523 A |
509 | dst_pnum = (ppnum_t) |
510 | upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); | |
511 | assert(dst_pnum != 0); | |
3e170ce0 A |
512 | #if __x86_64__ |
513 | src_vaddr = (vm_map_offset_t) | |
39037602 | 514 | PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) |
3e170ce0 | 515 | << PAGE_SHIFT); |
d9a64523 A |
516 | dst_vaddr = (vm_map_offset_t) |
517 | PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); | |
518 | ||
5ba3f43e A |
519 | #elif __arm__ || __arm64__ |
520 | src_vaddr = (vm_map_offset_t) | |
521 | phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) | |
522 | << PAGE_SHIFT); | |
5ba3f43e A |
523 | dst_vaddr = (vm_map_offset_t) |
524 | phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); | |
3e170ce0 | 525 | #else |
d9a64523 A |
526 | #error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..." |
527 | src_vaddr = 0; | |
528 | dst_vaddr = 0; | |
3e170ce0 | 529 | #endif |
39037602 | 530 | src_page_object = VM_PAGE_OBJECT(src_page); |
3e170ce0 A |
531 | |
532 | /* | |
533 | * Validate the original page... | |
534 | */ | |
39037602 | 535 | if (src_page_object->code_signed) { |
3e170ce0 A |
536 | vm_page_validate_cs_mapped( |
537 | src_page, | |
538 | (const void *) src_vaddr); | |
539 | } | |
540 | /* | |
541 | * ... and transfer the results to the destination page. | |
542 | */ | |
543 | UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, | |
d9a64523 | 544 | src_page->vmp_cs_validated); |
3e170ce0 | 545 | UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, |
d9a64523 | 546 | src_page->vmp_cs_tainted); |
3e170ce0 | 547 | UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, |
d9a64523 | 548 | src_page->vmp_cs_nx); |
3e170ce0 A |
549 | |
550 | /* | |
551 | * page_decrypt() might access a mapped file, so let's release | |
552 | * the object lock for the source page to avoid a potential | |
553 | * deadlock. The source page is kept busy and we have a | |
554 | * "paging_in_progress" reference on its object, so it's safe | |
555 | * to unlock the object here. | |
556 | */ | |
d9a64523 | 557 | assert(src_page->vmp_busy); |
39037602 A |
558 | assert(src_page_object->paging_in_progress > 0); |
559 | vm_object_unlock(src_page_object); | |
0c530ab8 A |
560 | |
561 | /* | |
562 | * Decrypt the encrypted contents of the source page | |
563 | * into the destination page. | |
564 | */ | |
fe8ab488 A |
565 | for (offset_in_page = 0; |
566 | offset_in_page < PAGE_SIZE; | |
567 | offset_in_page += 4096) { | |
3e170ce0 A |
568 | if (offset + cur_offset + offset_in_page < |
569 | pager->crypto_start || | |
570 | offset + cur_offset + offset_in_page >= | |
571 | pager->crypto_end) { | |
572 | /* not encrypted: just copy */ | |
573 | bcopy((const char *)(src_vaddr + | |
574 | offset_in_page), | |
575 | (char *)(dst_vaddr + offset_in_page), | |
576 | 4096); | |
d9a64523 | 577 | |
3e170ce0 A |
578 | if (apple_protect_pager_data_request_debug) { |
579 | printf("apple_protect_data_request" | |
580 | "(%p,0x%llx+0x%llx+0x%04llx): " | |
581 | "out of crypto range " | |
582 | "[0x%llx:0x%llx]: " | |
583 | "COPY [0x%016llx 0x%016llx] " | |
584 | "code_signed=%d " | |
585 | "cs_validated=%d " | |
586 | "cs_tainted=%d " | |
587 | "cs_nx=%d\n", | |
588 | pager, | |
589 | offset, | |
590 | (uint64_t) cur_offset, | |
591 | (uint64_t) offset_in_page, | |
592 | pager->crypto_start, | |
593 | pager->crypto_end, | |
594 | *(uint64_t *)(dst_vaddr+ | |
595 | offset_in_page), | |
596 | *(uint64_t *)(dst_vaddr+ | |
597 | offset_in_page+8), | |
39037602 | 598 | src_page_object->code_signed, |
d9a64523 A |
599 | src_page->vmp_cs_validated, |
600 | src_page->vmp_cs_tainted, | |
601 | src_page->vmp_cs_nx); | |
3e170ce0 A |
602 | } |
603 | ret = 0; | |
604 | continue; | |
605 | } | |
606 | ret = pager->crypt_info->page_decrypt( | |
607 | (const void *)(src_vaddr + offset_in_page), | |
608 | (void *)(dst_vaddr + offset_in_page), | |
609 | ((pager->crypto_backing_offset - | |
610 | pager->crypto_start) + /* XXX ? */ | |
611 | offset + | |
612 | cur_offset + | |
613 | offset_in_page), | |
614 | pager->crypt_info->crypt_ops); | |
d9a64523 | 615 | |
3e170ce0 A |
616 | if (apple_protect_pager_data_request_debug) { |
617 | printf("apple_protect_data_request" | |
618 | "(%p,0x%llx+0x%llx+0x%04llx): " | |
619 | "in crypto range [0x%llx:0x%llx]: " | |
620 | "DECRYPT offset 0x%llx=" | |
621 | "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" | |
622 | "[0x%016llx 0x%016llx] " | |
623 | "code_signed=%d " | |
624 | "cs_validated=%d " | |
625 | "cs_tainted=%d " | |
626 | "cs_nx=%d " | |
627 | "ret=0x%x\n", | |
628 | pager, | |
629 | offset, | |
630 | (uint64_t) cur_offset, | |
631 | (uint64_t) offset_in_page, | |
632 | pager->crypto_start, pager->crypto_end, | |
633 | ((pager->crypto_backing_offset - | |
634 | pager->crypto_start) + | |
635 | offset + | |
636 | cur_offset + | |
637 | offset_in_page), | |
638 | pager->crypto_backing_offset, | |
639 | pager->crypto_start, | |
640 | offset, | |
641 | (uint64_t) cur_offset, | |
642 | (uint64_t) offset_in_page, | |
643 | *(uint64_t *)(dst_vaddr+offset_in_page), | |
644 | *(uint64_t *)(dst_vaddr+offset_in_page+8), | |
39037602 | 645 | src_page_object->code_signed, |
d9a64523 A |
646 | src_page->vmp_cs_validated, |
647 | src_page->vmp_cs_tainted, | |
648 | src_page->vmp_cs_nx, | |
3e170ce0 A |
649 | ret); |
650 | } | |
fe8ab488 A |
651 | if (ret) { |
652 | break; | |
653 | } | |
654 | } | |
b0d623f7 A |
655 | if (ret) { |
656 | /* | |
657 | * Decryption failed. Abort the fault. | |
658 | */ | |
659 | retval = KERN_ABORTED; | |
b0d623f7 | 660 | } |
3e170ce0 | 661 | |
39037602 | 662 | assert(VM_PAGE_OBJECT(src_page) == src_page_object); |
d9a64523 | 663 | assert(src_page->vmp_busy); |
39037602 A |
664 | assert(src_page_object->paging_in_progress > 0); |
665 | vm_object_lock(src_page_object); | |
3e170ce0 | 666 | |
2d21ac55 A |
667 | /* |
668 | * Cleanup the result of vm_fault_page() of the source page. | |
669 | */ | |
d9a64523 | 670 | PAGE_WAKEUP_DONE(src_page); |
39037602 A |
671 | src_page = VM_PAGE_NULL; |
672 | vm_object_paging_end(src_page_object); | |
673 | vm_object_unlock(src_page_object); | |
d9a64523 | 674 | |
2d21ac55 | 675 | if (top_page != VM_PAGE_NULL) { |
39037602 A |
676 | assert(VM_PAGE_OBJECT(top_page) == src_top_object); |
677 | vm_object_lock(src_top_object); | |
2d21ac55 | 678 | VM_PAGE_FREE(top_page); |
39037602 A |
679 | vm_object_paging_end(src_top_object); |
680 | vm_object_unlock(src_top_object); | |
0c530ab8 A |
681 | } |
682 | } | |
683 | ||
0c530ab8 | 684 | done: |
0c530ab8 A |
685 | if (upl != NULL) { |
686 | /* clean up the UPL */ | |
687 | ||
688 | /* | |
689 | * The pages are currently dirty because we've just been | |
690 | * writing on them, but as far as we're concerned, they're | |
691 | * clean since they contain their "original" contents as | |
692 | * provided by us, the pager. | |
693 | * Tell the UPL to mark them "clean". | |
694 | */ | |
695 | upl_clear_dirty(upl, TRUE); | |
696 | ||
697 | /* abort or commit the UPL */ | |
698 | if (retval != KERN_SUCCESS) { | |
699 | upl_abort(upl, 0); | |
b0d623f7 A |
700 | if (retval == KERN_ABORTED) { |
701 | wait_result_t wait_result; | |
702 | ||
703 | /* | |
704 | * We aborted the fault and did not provide | |
705 | * any contents for the requested pages but | |
706 | * the pages themselves are not invalid, so | |
707 | * let's return success and let the caller | |
708 | * retry the fault, in case it might succeed | |
709 | * later (when the decryption code is up and | |
710 | * running in the kernel, for example). | |
711 | */ | |
712 | retval = KERN_SUCCESS; | |
713 | /* | |
714 | * Wait a little bit first to avoid using | |
715 | * too much CPU time retrying and failing | |
716 | * the same fault over and over again. | |
717 | */ | |
718 | wait_result = assert_wait_timeout( | |
719 | (event_t) apple_protect_pager_data_request, | |
720 | THREAD_UNINT, | |
721 | 10000, /* 10ms */ | |
722 | NSEC_PER_USEC); | |
723 | assert(wait_result == THREAD_WAITING); | |
724 | wait_result = thread_block(THREAD_CONTINUE_NULL); | |
725 | assert(wait_result == THREAD_TIMED_OUT); | |
726 | } | |
0c530ab8 | 727 | } else { |
593a1d5f A |
728 | boolean_t empty; |
729 | upl_commit_range(upl, 0, upl->size, | |
15129b1c | 730 | UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, |
593a1d5f | 731 | upl_pl, pl_count, &empty); |
0c530ab8 A |
732 | } |
733 | ||
734 | /* and deallocate the UPL */ | |
735 | upl_deallocate(upl); | |
736 | upl = NULL; | |
737 | } | |
39037602 A |
738 | if (src_top_object != VM_OBJECT_NULL) { |
739 | vm_object_deallocate(src_top_object); | |
2d21ac55 | 740 | } |
0c530ab8 A |
741 | return retval; |
742 | } | |
743 | ||
744 | /* | |
745 | * apple_protect_pager_reference() | |
746 | * | |
747 | * Get a reference on this memory object. | |
748 | * For external usage only. Assumes that the initial reference count is not 0, | |
749 | * i.e one should not "revive" a dead pager this way. | |
750 | */ | |
751 | void | |
752 | apple_protect_pager_reference( | |
753 | memory_object_t mem_obj) | |
754 | { | |
755 | apple_protect_pager_t pager; | |
756 | ||
757 | pager = apple_protect_pager_lookup(mem_obj); | |
758 | ||
b0d623f7 | 759 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
760 | assert(pager->ref_count > 0); |
761 | pager->ref_count++; | |
b0d623f7 | 762 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
763 | } |
764 | ||
765 | ||
766 | /* | |
767 | * apple_protect_pager_dequeue: | |
768 | * | |
769 | * Removes a pager from the list of pagers. | |
770 | * | |
771 | * The caller must hold "apple_protect_pager_lock". | |
772 | */ | |
773 | void | |
774 | apple_protect_pager_dequeue( | |
775 | apple_protect_pager_t pager) | |
776 | { | |
777 | assert(!pager->is_mapped); | |
778 | ||
779 | queue_remove(&apple_protect_pager_queue, | |
780 | pager, | |
781 | apple_protect_pager_t, | |
782 | pager_queue); | |
783 | pager->pager_queue.next = NULL; | |
784 | pager->pager_queue.prev = NULL; | |
785 | ||
786 | apple_protect_pager_count--; | |
787 | } | |
788 | ||
789 | /* | |
790 | * apple_protect_pager_terminate_internal: | |
791 | * | |
792 | * Trigger the asynchronous termination of the memory object associated | |
793 | * with this pager. | |
794 | * When the memory object is terminated, there will be one more call | |
795 | * to memory_object_deallocate() (i.e. apple_protect_pager_deallocate()) | |
796 | * to finish the clean up. | |
797 | * | |
798 | * "apple_protect_pager_lock" should not be held by the caller. | |
799 | * We don't need the lock because the pager has already been removed from | |
800 | * the pagers' list and is now ours exclusively. | |
801 | */ | |
802 | void | |
803 | apple_protect_pager_terminate_internal( | |
804 | apple_protect_pager_t pager) | |
805 | { | |
806 | assert(pager->is_ready); | |
807 | assert(!pager->is_mapped); | |
808 | ||
809 | if (pager->backing_object != VM_OBJECT_NULL) { | |
810 | vm_object_deallocate(pager->backing_object); | |
811 | pager->backing_object = VM_OBJECT_NULL; | |
812 | } | |
3e170ce0 A |
813 | |
814 | /* one less pager using this "pager_crypt_info" */ | |
815 | #if CRYPT_INFO_DEBUG | |
816 | printf("CRYPT_INFO %s: deallocate %p ref %d\n", | |
817 | __FUNCTION__, | |
818 | pager->crypt_info, | |
819 | pager->crypt_info->crypt_refcnt); | |
820 | #endif /* CRYPT_INFO_DEBUG */ | |
821 | crypt_info_deallocate(pager->crypt_info); | |
822 | pager->crypt_info = NULL; | |
6d2010ae A |
823 | |
824 | /* trigger the destruction of the memory object */ | |
5ba3f43e | 825 | memory_object_destroy(pager->ap_pgr_hdr.mo_control, 0); |
0c530ab8 A |
826 | } |
827 | ||
828 | /* | |
829 | * apple_protect_pager_deallocate_internal() | |
830 | * | |
831 | * Release a reference on this pager and free it when the last | |
832 | * reference goes away. | |
833 | * Can be called with apple_protect_pager_lock held or not but always returns | |
834 | * with it unlocked. | |
835 | */ | |
836 | void | |
837 | apple_protect_pager_deallocate_internal( | |
838 | apple_protect_pager_t pager, | |
839 | boolean_t locked) | |
840 | { | |
841 | boolean_t needs_trimming; | |
842 | int count_unmapped; | |
843 | ||
844 | if (! locked) { | |
b0d623f7 | 845 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
846 | } |
847 | ||
848 | count_unmapped = (apple_protect_pager_count - | |
849 | apple_protect_pager_count_mapped); | |
850 | if (count_unmapped > apple_protect_pager_cache_limit) { | |
851 | /* we have too many unmapped pagers: trim some */ | |
852 | needs_trimming = TRUE; | |
853 | } else { | |
854 | needs_trimming = FALSE; | |
855 | } | |
856 | ||
857 | /* drop a reference on this pager */ | |
858 | pager->ref_count--; | |
859 | ||
860 | if (pager->ref_count == 1) { | |
861 | /* | |
862 | * Only the "named" reference is left, which means that | |
2d21ac55 | 863 | * no one is really holding on to this pager anymore. |
0c530ab8 A |
864 | * Terminate it. |
865 | */ | |
866 | apple_protect_pager_dequeue(pager); | |
867 | /* the pager is all ours: no need for the lock now */ | |
b0d623f7 | 868 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
869 | apple_protect_pager_terminate_internal(pager); |
870 | } else if (pager->ref_count == 0) { | |
871 | /* | |
872 | * Dropped the existence reference; the memory object has | |
873 | * been terminated. Do some final cleanup and release the | |
874 | * pager structure. | |
875 | */ | |
b0d623f7 | 876 | lck_mtx_unlock(&apple_protect_pager_lock); |
5ba3f43e A |
877 | if (pager->ap_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { |
878 | memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control); | |
879 | pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; | |
0c530ab8 A |
880 | } |
881 | kfree(pager, sizeof (*pager)); | |
882 | pager = APPLE_PROTECT_PAGER_NULL; | |
883 | } else { | |
884 | /* there are still plenty of references: keep going... */ | |
b0d623f7 | 885 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
886 | } |
887 | ||
888 | if (needs_trimming) { | |
889 | apple_protect_pager_trim(); | |
890 | } | |
891 | /* caution: lock is not held on return... */ | |
892 | } | |
893 | ||
894 | /* | |
895 | * apple_protect_pager_deallocate() | |
896 | * | |
897 | * Release a reference on this pager and free it when the last | |
898 | * reference goes away. | |
899 | */ | |
900 | void | |
901 | apple_protect_pager_deallocate( | |
902 | memory_object_t mem_obj) | |
903 | { | |
904 | apple_protect_pager_t pager; | |
905 | ||
2d21ac55 | 906 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj)); |
0c530ab8 A |
907 | pager = apple_protect_pager_lookup(mem_obj); |
908 | apple_protect_pager_deallocate_internal(pager, FALSE); | |
909 | } | |
910 | ||
911 | /* | |
912 | * | |
913 | */ | |
914 | kern_return_t | |
915 | apple_protect_pager_terminate( | |
916 | #if !DEBUG | |
917 | __unused | |
918 | #endif | |
919 | memory_object_t mem_obj) | |
920 | { | |
2d21ac55 | 921 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj)); |
0c530ab8 A |
922 | |
923 | return KERN_SUCCESS; | |
924 | } | |
925 | ||
926 | /* | |
927 | * | |
928 | */ | |
929 | kern_return_t | |
930 | apple_protect_pager_synchronize( | |
5ba3f43e A |
931 | __unused memory_object_t mem_obj, |
932 | __unused memory_object_offset_t offset, | |
933 | __unused memory_object_size_t length, | |
0c530ab8 A |
934 | __unused vm_sync_t sync_flags) |
935 | { | |
5ba3f43e A |
936 | panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n"); |
937 | return KERN_FAILURE; | |
0c530ab8 A |
938 | } |
939 | ||
940 | /* | |
941 | * apple_protect_pager_map() | |
942 | * | |
943 | * This allows VM to let us, the EMM, know that this memory object | |
b0d623f7 A |
944 | * is currently mapped one or more times. This is called by VM each time |
945 | * the memory object gets mapped and we take one extra reference on the | |
0c530ab8 A |
946 | * memory object to account for all its mappings. |
947 | */ | |
593a1d5f | 948 | kern_return_t |
0c530ab8 | 949 | apple_protect_pager_map( |
593a1d5f A |
950 | memory_object_t mem_obj, |
951 | __unused vm_prot_t prot) | |
0c530ab8 A |
952 | { |
953 | apple_protect_pager_t pager; | |
954 | ||
2d21ac55 | 955 | PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj)); |
0c530ab8 A |
956 | |
957 | pager = apple_protect_pager_lookup(mem_obj); | |
958 | ||
b0d623f7 | 959 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
960 | assert(pager->is_ready); |
961 | assert(pager->ref_count > 0); /* pager is alive */ | |
962 | if (pager->is_mapped == FALSE) { | |
963 | /* | |
964 | * First mapping of this pager: take an extra reference | |
965 | * that will remain until all the mappings of this pager | |
966 | * are removed. | |
967 | */ | |
968 | pager->is_mapped = TRUE; | |
969 | pager->ref_count++; | |
970 | apple_protect_pager_count_mapped++; | |
971 | } | |
b0d623f7 | 972 | lck_mtx_unlock(&apple_protect_pager_lock); |
593a1d5f A |
973 | |
974 | return KERN_SUCCESS; | |
0c530ab8 A |
975 | } |
976 | ||
977 | /* | |
593a1d5f | 978 | * apple_protect_pager_last_unmap() |
0c530ab8 A |
979 | * |
980 | * This is called by VM when this memory object is no longer mapped anywhere. | |
981 | */ | |
982 | kern_return_t | |
593a1d5f | 983 | apple_protect_pager_last_unmap( |
0c530ab8 A |
984 | memory_object_t mem_obj) |
985 | { | |
986 | apple_protect_pager_t pager; | |
987 | int count_unmapped; | |
988 | ||
593a1d5f A |
989 | PAGER_DEBUG(PAGER_ALL, |
990 | ("apple_protect_pager_last_unmap: %p\n", mem_obj)); | |
0c530ab8 A |
991 | |
992 | pager = apple_protect_pager_lookup(mem_obj); | |
993 | ||
b0d623f7 | 994 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
995 | if (pager->is_mapped) { |
996 | /* | |
997 | * All the mappings are gone, so let go of the one extra | |
998 | * reference that represents all the mappings of this pager. | |
999 | */ | |
1000 | apple_protect_pager_count_mapped--; | |
1001 | count_unmapped = (apple_protect_pager_count - | |
1002 | apple_protect_pager_count_mapped); | |
1003 | if (count_unmapped > apple_protect_pager_count_unmapped_max) { | |
1004 | apple_protect_pager_count_unmapped_max = count_unmapped; | |
1005 | } | |
1006 | pager->is_mapped = FALSE; | |
1007 | apple_protect_pager_deallocate_internal(pager, TRUE); | |
1008 | /* caution: deallocate_internal() released the lock ! */ | |
1009 | } else { | |
b0d623f7 | 1010 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1011 | } |
1012 | ||
1013 | return KERN_SUCCESS; | |
1014 | } | |
1015 | ||
1016 | ||
1017 | /* | |
1018 | * | |
1019 | */ | |
1020 | apple_protect_pager_t | |
1021 | apple_protect_pager_lookup( | |
1022 | memory_object_t mem_obj) | |
1023 | { | |
1024 | apple_protect_pager_t pager; | |
1025 | ||
5ba3f43e | 1026 | assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops); |
d9a64523 | 1027 | pager = (apple_protect_pager_t)(uintptr_t) mem_obj; |
0c530ab8 A |
1028 | assert(pager->ref_count > 0); |
1029 | return pager; | |
1030 | } | |
1031 | ||
1032 | apple_protect_pager_t | |
1033 | apple_protect_pager_create( | |
3e170ce0 A |
1034 | vm_object_t backing_object, |
1035 | vm_object_offset_t backing_offset, | |
1036 | vm_object_offset_t crypto_backing_offset, | |
1037 | struct pager_crypt_info *crypt_info, | |
1038 | vm_object_offset_t crypto_start, | |
1039 | vm_object_offset_t crypto_end) | |
0c530ab8 A |
1040 | { |
1041 | apple_protect_pager_t pager, pager2; | |
1042 | memory_object_control_t control; | |
1043 | kern_return_t kr; | |
3e170ce0 | 1044 | struct pager_crypt_info *old_crypt_info; |
0c530ab8 A |
1045 | |
1046 | pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); | |
1047 | if (pager == APPLE_PROTECT_PAGER_NULL) { | |
1048 | return APPLE_PROTECT_PAGER_NULL; | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * The vm_map call takes both named entry ports and raw memory | |
1053 | * objects in the same parameter. We need to make sure that | |
1054 | * vm_map does not see this object as a named entry port. So, | |
b0d623f7 | 1055 | * we reserve the first word in the object for a fake ip_kotype |
0c530ab8 A |
1056 | * setting - that will tell vm_map to use it as a memory object. |
1057 | */ | |
5ba3f43e A |
1058 | pager->ap_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; |
1059 | pager->ap_pgr_hdr.mo_pager_ops = &apple_protect_pager_ops; | |
1060 | pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; | |
1061 | ||
0c530ab8 | 1062 | pager->is_ready = FALSE;/* not ready until it has a "name" */ |
3e170ce0 A |
1063 | pager->ref_count = 1; /* existence reference (for the cache) */ |
1064 | pager->ref_count++; /* for the caller */ | |
0c530ab8 | 1065 | pager->is_mapped = FALSE; |
0c530ab8 | 1066 | pager->backing_object = backing_object; |
3e170ce0 A |
1067 | pager->backing_offset = backing_offset; |
1068 | pager->crypto_backing_offset = crypto_backing_offset; | |
1069 | pager->crypto_start = crypto_start; | |
1070 | pager->crypto_end = crypto_end; | |
1071 | pager->crypt_info = crypt_info; /* allocated by caller */ | |
1072 | ||
1073 | #if CRYPT_INFO_DEBUG | |
1074 | printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n", | |
1075 | __FUNCTION__, | |
1076 | crypt_info, | |
1077 | crypt_info->page_decrypt, | |
1078 | crypt_info->crypt_end, | |
1079 | crypt_info->crypt_ops, | |
1080 | crypt_info->crypt_refcnt); | |
1081 | #endif /* CRYPT_INFO_DEBUG */ | |
593a1d5f | 1082 | |
0c530ab8 A |
1083 | vm_object_reference(backing_object); |
1084 | ||
3e170ce0 A |
1085 | old_crypt_info = NULL; |
1086 | ||
b0d623f7 | 1087 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
1088 | /* see if anyone raced us to create a pager for the same object */ |
1089 | queue_iterate(&apple_protect_pager_queue, | |
1090 | pager2, | |
1091 | apple_protect_pager_t, | |
1092 | pager_queue) { | |
3e170ce0 A |
1093 | if ((pager2->crypt_info->page_decrypt != |
1094 | crypt_info->page_decrypt) || | |
1095 | (pager2->crypt_info->crypt_end != | |
1096 | crypt_info->crypt_end) || | |
1097 | (pager2->crypt_info->crypt_ops != | |
1098 | crypt_info->crypt_ops)) { | |
1099 | /* crypt_info contents do not match: next pager */ | |
1100 | continue; | |
1101 | } | |
1102 | ||
1103 | /* found a match for crypt_info ... */ | |
1104 | if (old_crypt_info) { | |
1105 | /* ... already switched to that crypt_info */ | |
1106 | assert(old_crypt_info == pager2->crypt_info); | |
1107 | } else if (pager2->crypt_info != crypt_info) { | |
1108 | /* ... switch to that pager's crypt_info */ | |
1109 | #if CRYPT_INFO_DEBUG | |
1110 | printf("CRYPT_INFO %s: reference %p ref %d " | |
1111 | "(create match)\n", | |
1112 | __FUNCTION__, | |
1113 | pager2->crypt_info, | |
1114 | pager2->crypt_info->crypt_refcnt); | |
1115 | #endif /* CRYPT_INFO_DEBUG */ | |
1116 | old_crypt_info = pager2->crypt_info; | |
1117 | crypt_info_reference(old_crypt_info); | |
1118 | pager->crypt_info = old_crypt_info; | |
1119 | } | |
1120 | ||
1121 | if (pager2->backing_object == backing_object && | |
1122 | pager2->backing_offset == backing_offset && | |
1123 | pager2->crypto_backing_offset == crypto_backing_offset && | |
1124 | pager2->crypto_start == crypto_start && | |
1125 | pager2->crypto_end == crypto_end) { | |
1126 | /* full match: use that pager */ | |
0c530ab8 A |
1127 | break; |
1128 | } | |
1129 | } | |
1130 | if (! queue_end(&apple_protect_pager_queue, | |
1131 | (queue_entry_t) pager2)) { | |
0c530ab8 | 1132 | /* we lost the race, down with the loser... */ |
b0d623f7 | 1133 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1134 | vm_object_deallocate(pager->backing_object); |
1135 | pager->backing_object = VM_OBJECT_NULL; | |
3e170ce0 A |
1136 | #if CRYPT_INFO_DEBUG |
1137 | printf("CRYPT_INFO %s: %p ref %d (create pager match)\n", | |
1138 | __FUNCTION__, | |
1139 | pager->crypt_info, | |
1140 | pager->crypt_info->crypt_refcnt); | |
1141 | #endif /* CRYPT_INFO_DEBUG */ | |
1142 | crypt_info_deallocate(pager->crypt_info); | |
1143 | pager->crypt_info = NULL; | |
0c530ab8 A |
1144 | kfree(pager, sizeof (*pager)); |
1145 | /* ... and go with the winner */ | |
1146 | pager = pager2; | |
1147 | /* let the winner make sure the pager gets ready */ | |
1148 | return pager; | |
1149 | } | |
1150 | ||
1151 | /* enter new pager at the head of our list of pagers */ | |
1152 | queue_enter_first(&apple_protect_pager_queue, | |
1153 | pager, | |
1154 | apple_protect_pager_t, | |
1155 | pager_queue); | |
1156 | apple_protect_pager_count++; | |
1157 | if (apple_protect_pager_count > apple_protect_pager_count_max) { | |
1158 | apple_protect_pager_count_max = apple_protect_pager_count; | |
1159 | } | |
b0d623f7 | 1160 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1161 | |
1162 | kr = memory_object_create_named((memory_object_t) pager, | |
1163 | 0, | |
1164 | &control); | |
1165 | assert(kr == KERN_SUCCESS); | |
1166 | ||
b0d623f7 | 1167 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
1168 | /* the new pager is now ready to be used */ |
1169 | pager->is_ready = TRUE; | |
b0d623f7 | 1170 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1171 | |
1172 | /* wakeup anyone waiting for this pager to be ready */ | |
1173 | thread_wakeup(&pager->is_ready); | |
1174 | ||
3e170ce0 A |
1175 | if (old_crypt_info != NULL && |
1176 | old_crypt_info != crypt_info) { | |
1177 | /* we re-used an old crypt_info instead of using our new one */ | |
1178 | #if CRYPT_INFO_DEBUG | |
1179 | printf("CRYPT_INFO %s: deallocate %p ref %d " | |
1180 | "(create used old)\n", | |
1181 | __FUNCTION__, | |
1182 | crypt_info, | |
1183 | crypt_info->crypt_refcnt); | |
1184 | #endif /* CRYPT_INFO_DEBUG */ | |
1185 | crypt_info_deallocate(crypt_info); | |
1186 | crypt_info = NULL; | |
1187 | } | |
1188 | ||
0c530ab8 A |
1189 | return pager; |
1190 | } | |
1191 | ||
1192 | /* | |
1193 | * apple_protect_pager_setup() | |
1194 | * | |
1195 | * Provide the caller with a memory object backed by the provided | |
1196 | * "backing_object" VM object. If such a memory object already exists, | |
1197 | * re-use it, otherwise create a new memory object. | |
1198 | */ | |
1199 | memory_object_t | |
1200 | apple_protect_pager_setup( | |
3e170ce0 A |
1201 | vm_object_t backing_object, |
1202 | vm_object_offset_t backing_offset, | |
1203 | vm_object_offset_t crypto_backing_offset, | |
1204 | struct pager_crypt_info *crypt_info, | |
1205 | vm_object_offset_t crypto_start, | |
1206 | vm_object_offset_t crypto_end) | |
0c530ab8 A |
1207 | { |
1208 | apple_protect_pager_t pager; | |
3e170ce0 A |
1209 | struct pager_crypt_info *old_crypt_info, *new_crypt_info; |
1210 | ||
1211 | #if CRYPT_INFO_DEBUG | |
1212 | printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n", | |
1213 | __FUNCTION__, | |
1214 | crypt_info, | |
1215 | crypt_info->page_decrypt, | |
1216 | crypt_info->crypt_end, | |
1217 | crypt_info->crypt_ops, | |
1218 | crypt_info->crypt_refcnt); | |
1219 | #endif /* CRYPT_INFO_DEBUG */ | |
1220 | ||
1221 | old_crypt_info = NULL; | |
0c530ab8 | 1222 | |
b0d623f7 | 1223 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
1224 | |
1225 | queue_iterate(&apple_protect_pager_queue, | |
1226 | pager, | |
1227 | apple_protect_pager_t, | |
1228 | pager_queue) { | |
3e170ce0 A |
1229 | if ((pager->crypt_info->page_decrypt != |
1230 | crypt_info->page_decrypt) || | |
1231 | (pager->crypt_info->crypt_end != | |
1232 | crypt_info->crypt_end) || | |
1233 | (pager->crypt_info->crypt_ops != | |
1234 | crypt_info->crypt_ops)) { | |
1235 | /* no match for "crypt_info": next pager */ | |
1236 | continue; | |
1237 | } | |
1238 | /* found a match for crypt_info ... */ | |
1239 | if (old_crypt_info) { | |
1240 | /* ... already switched to that crypt_info */ | |
1241 | assert(old_crypt_info == pager->crypt_info); | |
1242 | } else { | |
1243 | /* ... switch to that pager's crypt_info */ | |
1244 | old_crypt_info = pager->crypt_info; | |
1245 | #if CRYPT_INFO_DEBUG | |
1246 | printf("CRYPT_INFO %s: " | |
1247 | "switching crypt_info from %p [%p,%p,%p,%d] " | |
1248 | "to %p [%p,%p,%p,%d] from pager %p\n", | |
1249 | __FUNCTION__, | |
1250 | crypt_info, | |
1251 | crypt_info->page_decrypt, | |
1252 | crypt_info->crypt_end, | |
1253 | crypt_info->crypt_ops, | |
1254 | crypt_info->crypt_refcnt, | |
1255 | old_crypt_info, | |
1256 | old_crypt_info->page_decrypt, | |
1257 | old_crypt_info->crypt_end, | |
1258 | old_crypt_info->crypt_ops, | |
1259 | old_crypt_info->crypt_refcnt, | |
1260 | pager); | |
1261 | printf("CRYPT_INFO %s: %p ref %d (setup match)\n", | |
1262 | __FUNCTION__, | |
1263 | pager->crypt_info, | |
1264 | pager->crypt_info->crypt_refcnt); | |
1265 | #endif /* CRYPT_INFO_DEBUG */ | |
1266 | crypt_info_reference(pager->crypt_info); | |
1267 | } | |
1268 | ||
1269 | if (pager->backing_object == backing_object && | |
1270 | pager->backing_offset == backing_offset && | |
1271 | pager->crypto_backing_offset == crypto_backing_offset && | |
1272 | pager->crypto_start == crypto_start && | |
1273 | pager->crypto_end == crypto_end) { | |
1274 | /* full match: use that pager! */ | |
1275 | assert(old_crypt_info == pager->crypt_info); | |
1276 | assert(old_crypt_info->crypt_refcnt > 1); | |
1277 | #if CRYPT_INFO_DEBUG | |
1278 | printf("CRYPT_INFO %s: " | |
1279 | "pager match with %p crypt_info %p\n", | |
1280 | __FUNCTION__, | |
1281 | pager, | |
1282 | pager->crypt_info); | |
1283 | printf("CRYPT_INFO %s: deallocate %p ref %d " | |
1284 | "(pager match)\n", | |
1285 | __FUNCTION__, | |
1286 | old_crypt_info, | |
1287 | old_crypt_info->crypt_refcnt); | |
1288 | #endif /* CRYPT_INFO_DEBUG */ | |
1289 | /* release the extra ref on crypt_info we got above */ | |
1290 | crypt_info_deallocate(old_crypt_info); | |
1291 | assert(old_crypt_info->crypt_refcnt > 0); | |
1292 | /* give extra reference on pager to the caller */ | |
1293 | assert(pager->ref_count > 0); | |
1294 | pager->ref_count++; | |
0c530ab8 A |
1295 | break; |
1296 | } | |
1297 | } | |
1298 | if (queue_end(&apple_protect_pager_queue, | |
1299 | (queue_entry_t) pager)) { | |
3e170ce0 | 1300 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1301 | /* no existing pager for this backing object */ |
1302 | pager = APPLE_PROTECT_PAGER_NULL; | |
3e170ce0 A |
1303 | if (old_crypt_info) { |
1304 | /* use this old crypt_info for new pager */ | |
1305 | new_crypt_info = old_crypt_info; | |
1306 | #if CRYPT_INFO_DEBUG | |
1307 | printf("CRYPT_INFO %s: " | |
1308 | "will use old_crypt_info %p for new pager\n", | |
1309 | __FUNCTION__, | |
1310 | old_crypt_info); | |
1311 | #endif /* CRYPT_INFO_DEBUG */ | |
1312 | } else { | |
1313 | /* allocate a new crypt_info for new pager */ | |
1314 | new_crypt_info = kalloc(sizeof (*new_crypt_info)); | |
1315 | *new_crypt_info = *crypt_info; | |
1316 | new_crypt_info->crypt_refcnt = 1; | |
1317 | #if CRYPT_INFO_DEBUG | |
1318 | printf("CRYPT_INFO %s: " | |
1319 | "will use new_crypt_info %p for new pager\n", | |
1320 | __FUNCTION__, | |
1321 | new_crypt_info); | |
1322 | #endif /* CRYPT_INFO_DEBUG */ | |
1323 | } | |
1324 | if (new_crypt_info == NULL) { | |
1325 | /* can't create new pager without a crypt_info */ | |
1326 | } else { | |
1327 | /* create new pager */ | |
1328 | pager = apple_protect_pager_create( | |
1329 | backing_object, | |
1330 | backing_offset, | |
1331 | crypto_backing_offset, | |
1332 | new_crypt_info, | |
1333 | crypto_start, | |
1334 | crypto_end); | |
1335 | } | |
0c530ab8 | 1336 | if (pager == APPLE_PROTECT_PAGER_NULL) { |
3e170ce0 A |
1337 | /* could not create a new pager */ |
1338 | if (new_crypt_info == old_crypt_info) { | |
1339 | /* release extra reference on old_crypt_info */ | |
1340 | #if CRYPT_INFO_DEBUG | |
1341 | printf("CRYPT_INFO %s: deallocate %p ref %d " | |
1342 | "(create fail old_crypt_info)\n", | |
1343 | __FUNCTION__, | |
1344 | old_crypt_info, | |
1345 | old_crypt_info->crypt_refcnt); | |
1346 | #endif /* CRYPT_INFO_DEBUG */ | |
1347 | crypt_info_deallocate(old_crypt_info); | |
1348 | old_crypt_info = NULL; | |
1349 | } else { | |
1350 | /* release unused new_crypt_info */ | |
1351 | assert(new_crypt_info->crypt_refcnt == 1); | |
1352 | #if CRYPT_INFO_DEBUG | |
1353 | printf("CRYPT_INFO %s: deallocate %p ref %d " | |
1354 | "(create fail new_crypt_info)\n", | |
1355 | __FUNCTION__, | |
1356 | new_crypt_info, | |
1357 | new_crypt_info->crypt_refcnt); | |
1358 | #endif /* CRYPT_INFO_DEBUG */ | |
1359 | crypt_info_deallocate(new_crypt_info); | |
1360 | new_crypt_info = NULL; | |
1361 | } | |
0c530ab8 A |
1362 | return MEMORY_OBJECT_NULL; |
1363 | } | |
3e170ce0 A |
1364 | lck_mtx_lock(&apple_protect_pager_lock); |
1365 | } else { | |
1366 | assert(old_crypt_info == pager->crypt_info); | |
0c530ab8 A |
1367 | } |
1368 | ||
0c530ab8 | 1369 | while (!pager->is_ready) { |
b0d623f7 A |
1370 | lck_mtx_sleep(&apple_protect_pager_lock, |
1371 | LCK_SLEEP_DEFAULT, | |
1372 | &pager->is_ready, | |
1373 | THREAD_UNINT); | |
0c530ab8 | 1374 | } |
b0d623f7 | 1375 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1376 | |
1377 | return (memory_object_t) pager; | |
1378 | } | |
1379 | ||
1380 | void | |
1381 | apple_protect_pager_trim(void) | |
1382 | { | |
1383 | apple_protect_pager_t pager, prev_pager; | |
1384 | queue_head_t trim_queue; | |
1385 | int num_trim; | |
1386 | int count_unmapped; | |
1387 | ||
b0d623f7 | 1388 | lck_mtx_lock(&apple_protect_pager_lock); |
0c530ab8 A |
1389 | |
1390 | /* | |
1391 | * We have too many pagers, try and trim some unused ones, | |
1392 | * starting with the oldest pager at the end of the queue. | |
1393 | */ | |
1394 | queue_init(&trim_queue); | |
1395 | num_trim = 0; | |
1396 | ||
1397 | for (pager = (apple_protect_pager_t) | |
1398 | queue_last(&apple_protect_pager_queue); | |
1399 | !queue_end(&apple_protect_pager_queue, | |
1400 | (queue_entry_t) pager); | |
1401 | pager = prev_pager) { | |
1402 | /* get prev elt before we dequeue */ | |
1403 | prev_pager = (apple_protect_pager_t) | |
1404 | queue_prev(&pager->pager_queue); | |
1405 | ||
1406 | if (pager->ref_count == 2 && | |
1407 | pager->is_ready && | |
1408 | !pager->is_mapped) { | |
1409 | /* this pager can be trimmed */ | |
1410 | num_trim++; | |
1411 | /* remove this pager from the main list ... */ | |
1412 | apple_protect_pager_dequeue(pager); | |
1413 | /* ... and add it to our trim queue */ | |
1414 | queue_enter_first(&trim_queue, | |
1415 | pager, | |
1416 | apple_protect_pager_t, | |
1417 | pager_queue); | |
1418 | ||
1419 | count_unmapped = (apple_protect_pager_count - | |
1420 | apple_protect_pager_count_mapped); | |
1421 | if (count_unmapped <= apple_protect_pager_cache_limit) { | |
1422 | /* we have enough pagers to trim */ | |
1423 | break; | |
1424 | } | |
1425 | } | |
1426 | } | |
1427 | if (num_trim > apple_protect_pager_num_trim_max) { | |
1428 | apple_protect_pager_num_trim_max = num_trim; | |
1429 | } | |
1430 | apple_protect_pager_num_trim_total += num_trim; | |
1431 | ||
b0d623f7 | 1432 | lck_mtx_unlock(&apple_protect_pager_lock); |
0c530ab8 A |
1433 | |
1434 | /* terminate the trimmed pagers */ | |
1435 | while (!queue_empty(&trim_queue)) { | |
1436 | queue_remove_first(&trim_queue, | |
1437 | pager, | |
1438 | apple_protect_pager_t, | |
1439 | pager_queue); | |
1440 | pager->pager_queue.next = NULL; | |
1441 | pager->pager_queue.prev = NULL; | |
1442 | assert(pager->ref_count == 2); | |
1443 | /* | |
1444 | * We can't call deallocate_internal() because the pager | |
1445 | * has already been dequeued, but we still need to remove | |
1446 | * a reference. | |
1447 | */ | |
1448 | pager->ref_count--; | |
1449 | apple_protect_pager_terminate_internal(pager); | |
1450 | } | |
1451 | } | |
3e170ce0 A |
1452 | |
1453 | ||
1454 | void | |
1455 | crypt_info_reference( | |
1456 | struct pager_crypt_info *crypt_info) | |
1457 | { | |
1458 | assert(crypt_info->crypt_refcnt != 0); | |
1459 | #if CRYPT_INFO_DEBUG | |
1460 | printf("CRYPT_INFO %s: %p ref %d -> %d\n", | |
1461 | __FUNCTION__, | |
1462 | crypt_info, | |
1463 | crypt_info->crypt_refcnt, | |
1464 | crypt_info->crypt_refcnt + 1); | |
1465 | #endif /* CRYPT_INFO_DEBUG */ | |
1466 | OSAddAtomic(+1, &crypt_info->crypt_refcnt); | |
1467 | } | |
1468 | ||
1469 | void | |
1470 | crypt_info_deallocate( | |
1471 | struct pager_crypt_info *crypt_info) | |
1472 | { | |
1473 | #if CRYPT_INFO_DEBUG | |
1474 | printf("CRYPT_INFO %s: %p ref %d -> %d\n", | |
1475 | __FUNCTION__, | |
1476 | crypt_info, | |
1477 | crypt_info->crypt_refcnt, | |
1478 | crypt_info->crypt_refcnt - 1); | |
1479 | #endif /* CRYPT_INFO_DEBUG */ | |
1480 | OSAddAtomic(-1, &crypt_info->crypt_refcnt); | |
1481 | if (crypt_info->crypt_refcnt == 0) { | |
1482 | /* deallocate any crypt module data */ | |
1483 | if (crypt_info->crypt_end) { | |
1484 | crypt_info->crypt_end(crypt_info->crypt_ops); | |
1485 | crypt_info->crypt_end = NULL; | |
1486 | } | |
1487 | #if CRYPT_INFO_DEBUG | |
1488 | printf("CRYPT_INFO %s: freeing %p\n", | |
1489 | __FUNCTION__, | |
1490 | crypt_info); | |
1491 | #endif /* CRYPT_INFO_DEBUG */ | |
1492 | kfree(crypt_info, sizeof (*crypt_info)); | |
1493 | crypt_info = NULL; | |
1494 | } | |
1495 | } |