]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/default_freezer.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / vm / default_freezer.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if CONFIG_FREEZE
30
31#include "default_freezer.h"
32
33/*
34 * Indicates that a page has been faulted back in.
35 */
36#define FREEZER_OFFSET_ABSENT ((vm_object_offset_t)(-1))
37
38/*
39 * Create the mapping table that will
40 * tell us the object/offset pair that
41 * corresponds to the page being sent
42 * out or being brought back in.
43 */
44
45void*
46default_freezer_mapping_create(vm_object_t object, vm_offset_t offset)
47{
48 default_freezer_mapping_table_t table;
49
50 table = kalloc(sizeof(struct default_freezer_mapping_table));
51 if (table) {
52 memset(table, 0, sizeof(*table));
53 } else {
54 panic("Could not allocate mapping table\n");
55 }
56
57 table->object = object;
58 table->offset = offset;
59
60 return (void*)table;
61}
62
63void
64default_freezer_mapping_free(void **table, boolean_t all)
65{
66 default_freezer_mapping_table_t freezer_table = *((default_freezer_mapping_table_t *)table);
67 assert(freezer_table);
68
69 if (all) {
70 do {
71 default_freezer_mapping_table_t next = freezer_table->next;
72 kfree(freezer_table, sizeof(*freezer_table));
73 freezer_table = next;
74 } while (freezer_table);
75 } else {
76 kfree(freezer_table, sizeof(*freezer_table));
77 }
78}
79
80kern_return_t
81default_freezer_mapping_store(
82 default_freezer_mapping_table_t *table,
83 memory_object_offset_t table_offset,
84 memory_object_t memory_object,
85 memory_object_offset_t offset)
86{
87 default_freezer_mapping_table_entry_t entry;
88 uint32_t index;
89
90 assert(*table);
91
92 if ((*table)->index >= MAX_FREEZE_TABLE_ENTRIES) {
93 vm_object_t compact_object = (*table)->object;
94 default_freezer_mapping_table_t next;
95
96 next = default_freezer_mapping_create(compact_object, table_offset);
97 if (!next) {
98 return KERN_FAILURE;
99 }
100
101 (*table)->next = next;
102 *table = next;
103 }
104
105 index = (*table)->index++;
106 entry = &(*table)->entry[index];
107
108 entry->memory_object = memory_object;
109 entry->offset = offset;
110
111 return KERN_SUCCESS;
112}
113
114kern_return_t
115default_freezer_mapping_update(
116 default_freezer_mapping_table_t table,
117 memory_object_t memory_object,
118 memory_object_offset_t offset,
119 memory_object_offset_t *table_offset, /*OUT: contains the offset into the compact object*/
120 boolean_t remove_entry)
121{
122
123 kern_return_t kr = KERN_SUCCESS;
124 vm_object_offset_t compact_offset;
125 default_freezer_mapping_table_entry_t entry;
126 uint32_t index = 0;
127
128 if (table == NULL){
129 return KERN_FAILURE;
130 }
131
132 compact_offset = table->offset;
133
134 while (1) {
135 if (index >= table->index) {
136 if (table->next) {
137 table = table->next;
138 index = 0;
139 } else {
140 /* End of tables and we didn't find our candidate entry */
141 kr = KERN_FAILURE;
142 break;
143 }
144 }
145
146 entry = &table->entry[index];
147
148 if (memory_object == entry->memory_object && offset == entry->offset) {
149 if (remove_entry == TRUE) {
150 /*
151 * Mark the page absent whilst retaining the object
152 * for cleanup during thaw.
153 */
154 entry->offset = FREEZER_OFFSET_ABSENT;
155 }
156 if (table_offset != NULL) {
157 *table_offset = compact_offset;
158 }
159 break;
160 }
161
162 index++;
163 compact_offset += PAGE_SIZE;
164 }
165 return kr;
166}
167
168/*
169 * Create a freezer memory object for this
170 * vm object.
171 */
172void
173default_freezer_memory_object_create(
174 vm_object_t object,
175 vm_object_t compact_object,
176 default_freezer_mapping_table_t table)
177{
178
179 default_freezer_memory_object_t fo = NULL;
180
181 fo = kalloc(sizeof(struct default_freezer_memory_object));
182
183 if (fo) {
184 memory_object_control_t control = NULL;
185
186 memset(fo, 0, sizeof(*fo));
187
188 control = memory_object_control_allocate(object);
189 assert (control != MEMORY_OBJECT_CONTROL_NULL);
190
191 df_memory_object_init((memory_object_t)fo, control, 0);
192 fo->fo_compact_object = compact_object;
193 fo->fo_table = table;
194
195 object->pager = (memory_object_t)fo;
196 object->pager_created = TRUE;
197 object->pager_initialized = TRUE;
198 object->pager_ready = TRUE;
199 object->pager_trusted = TRUE;
200 object->pager_control = control;
201 } else {
202 panic(" Could not allocate freezer object\n");
203 }
204}
205
206void
207default_freezer_pack_page(
208 vm_page_t p,
209 vm_object_t compact_object,
210 vm_object_offset_t offset,
211 void **table)
212{
213
214 default_freezer_mapping_table_t *freeze_table = (default_freezer_mapping_table_t *)table;
215 memory_object_t memory_object = p->object->pager;
216
217 if (memory_object == NULL) {
218 default_freezer_memory_object_create(p->object, compact_object, *freeze_table);
219 memory_object = p->object->pager;
220 } else {
221 default_freezer_memory_object_t fo = (default_freezer_memory_object_t)memory_object;
222 if (fo->fo_compact_object == VM_OBJECT_NULL) {
223 fo->fo_compact_object = compact_object;
224 fo->fo_table = *freeze_table;
225 }
226 }
227
228 default_freezer_mapping_store(freeze_table, offset, memory_object, p->offset + p->object->paging_offset);
229
230 /* Remove from the original and insert into the compact destination object */
231 vm_page_rename(p, compact_object, offset, FALSE);
232}
233
234void
235default_freezer_unpack(
236 vm_object_t object,
237 void **table)
238{
239
240 vm_page_t p = VM_PAGE_NULL;
241 uint32_t index = 0;
242 vm_object_t src_object = VM_OBJECT_NULL;
243 memory_object_t src_mem_object = MEMORY_OBJECT_NULL;
244 memory_object_offset_t src_offset = 0;
245 vm_object_offset_t compact_offset = 0;
246 default_freezer_memory_object_t fo = NULL;
247 default_freezer_memory_object_t last_memory_object_thawed = NULL;
248 default_freezer_mapping_table_t freeze_table = *(default_freezer_mapping_table_t *)table;
249
250 assert(freeze_table);
251
252 vm_object_lock(object);
253
254 for (index = 0, compact_offset = 0; ; index++, compact_offset += PAGE_SIZE){
255 if (index >= freeze_table->index) {
256 default_freezer_mapping_table_t table_next;
257
258 table_next = freeze_table->next;
259
260 /* Free the tables as we go along */
261 default_freezer_mapping_free((void**)&freeze_table, FALSE);
262
263 if (table_next == NULL){
264 break;
265 }
266
267 freeze_table = table_next;
268 index = 0;
269 }
270
271 /*
272 * Skip slots that represent deallocated memory objects.
273 */
274 src_mem_object = freeze_table->entry[index].memory_object;
275 if (src_mem_object == MEMORY_OBJECT_NULL)
276 continue;
277
278 /*
279 * Skip slots that represent faulted pages.
280 */
281 src_offset = freeze_table->entry[index].offset;
282 if (src_offset != FREEZER_OFFSET_ABSENT) {
283
284 p = vm_page_lookup(object, compact_offset);
285 assert(p);
286
287 fo = (default_freezer_memory_object_t)src_mem_object;
288
289 src_object = memory_object_control_to_vm_object(fo->fo_pager_control);
290
291 /* Move back over from the freeze object to the original */
292 vm_object_lock(src_object);
293 vm_page_rename(p, src_object, src_offset - src_object->paging_offset, FALSE);
294 vm_object_unlock(src_object);
295 }
296
297 if (src_mem_object != ((memory_object_t)last_memory_object_thawed)){
298 if (last_memory_object_thawed != NULL){
299 last_memory_object_thawed->fo_compact_object = VM_OBJECT_NULL;
300 last_memory_object_thawed->fo_table = NULL;
301 }
302 last_memory_object_thawed = (default_freezer_memory_object_t)src_mem_object;
303 }
304 }
305
306 if (last_memory_object_thawed != NULL){
307 last_memory_object_thawed->fo_compact_object = VM_OBJECT_NULL;
308 last_memory_object_thawed->fo_table = NULL;
309 }
310
311 vm_object_unlock(object);
312}
313
314vm_object_t
315default_freezer_get_compact_vm_object(void** table)
316{
317 default_freezer_mapping_table_t freeze_table = *((default_freezer_mapping_table_t *)table);
318 assert(freeze_table);
319 return ((vm_object_t)(freeze_table->object));
320}
321
322void
323df_memory_object_reference(__unused memory_object_t mem_obj)
324{
325
326 /* No-op */
327}
328
329void
330df_memory_object_deallocate(memory_object_t mem_obj)
331{
332
333 default_freezer_memory_object_t fo = (default_freezer_memory_object_t)mem_obj;
334 vm_object_t compact_object = fo->fo_compact_object;
335
336 assert(fo);
337
338 if (compact_object != VM_OBJECT_NULL) {
339
340 default_freezer_mapping_table_t fo_table = fo->fo_table;
341 default_freezer_mapping_table_entry_t entry;
342 boolean_t found = FALSE;
343 uint32_t index = 0;
344
345 vm_object_lock(compact_object);
346
347 /* Remove from table */
348 while (1) {
349 if (index >= fo_table->index) {
350 if (fo_table->next) {
351 fo_table = fo_table->next;
352 index = 0;
353 } else {
354 /* End of tables */
355 break;
356 }
357 }
358
359 entry = &fo_table->entry[index];
360 if (mem_obj == entry->memory_object) {
361 /* It matches, so clear the entry */
362 if (!found) {
363 found = TRUE;
364 }
365 entry->memory_object = MEMORY_OBJECT_NULL;
366 entry->offset = 0;
367 } else if (MEMORY_OBJECT_NULL != entry->memory_object) {
368 /* We have a different valid object; we're done */
369 if (found) {
370 break;
371 }
372 }
373
374 index++;
375 }
376
377 vm_object_unlock(compact_object);
378 }
379
380 kfree(fo, sizeof(*fo));
381}
382
383kern_return_t
384df_memory_object_init(
385 memory_object_t mem_obj,
386 memory_object_control_t control,
387 __unused memory_object_cluster_size_t pager_page_size)
388{
389
390 default_freezer_memory_object_t fo = (default_freezer_memory_object_t)mem_obj;
391 assert(fo);
392
393 fo->fo_pager_ops = &default_freezer_ops;
394 fo->fo_pager_header.io_bits = IKOT_MEMORY_OBJECT;
395 fo->fo_pager_control = control;
396
397 return KERN_SUCCESS;
398}
399
400kern_return_t
401df_memory_object_terminate(memory_object_t mem_obj)
402{
403
404 default_freezer_memory_object_t fo = (default_freezer_memory_object_t)mem_obj;
405 assert(fo);
406 memory_object_control_deallocate(fo->fo_pager_control);
407 return KERN_SUCCESS;
408}
409
410kern_return_t
411df_memory_object_data_request(
412 memory_object_t mem_obj,
413 memory_object_offset_t offset,
414 memory_object_cluster_size_t length,
415 vm_prot_t protection_required,
416 memory_object_fault_info_t fault_info)
417{
418
419 vm_object_t src_object = VM_OBJECT_NULL, compact_object = VM_OBJECT_NULL;
420 memory_object_offset_t compact_offset = 0;
421 memory_object_t pager = NULL;
422 kern_return_t kr = KERN_SUCCESS;
423
424 default_freezer_memory_object_t fo = (default_freezer_memory_object_t)mem_obj;
425
426 src_object = memory_object_control_to_vm_object(fo->fo_pager_control);
427 compact_object = fo->fo_compact_object;
428
429 if (compact_object != VM_OBJECT_NULL) {
430
431 vm_object_lock(compact_object);
432
433 kr = default_freezer_mapping_update(fo->fo_table,
434 mem_obj,
435 offset,
436 &compact_offset,
437 FALSE);
438
439 vm_object_unlock(compact_object);
440 } else {
441 kr = KERN_FAILURE;
442 }
443
444 if (length == 0){
445 /*Caller is just querying to see if we have the page*/
446 return kr;
447 }
448
449 if (kr != KERN_SUCCESS){
450
451 unsigned int request_flags;
452 upl_t upl;
453 unsigned int page_list_count = 0;
454
455 request_flags = UPL_NO_SYNC | UPL_RET_ONLY_ABSENT | UPL_SET_LITE;
456 /*
457 * Should we decide to activate USE_PRECIOUS (from default_pager_internal.h)
458 * here, then the request_flags will need to add these to the ones above:
459 *
460 * request_flags |= UPL_PRECIOUS | UPL_CLEAN_IN_PLACE
461 */
462 request_flags |= UPL_REQUEST_SET_DIRTY;
463
464 memory_object_super_upl_request(fo->fo_pager_control,
465 (memory_object_offset_t)offset,
466 PAGE_SIZE, PAGE_SIZE,
467 &upl, NULL, &page_list_count,
468 request_flags);
469
470 upl_abort(upl, UPL_ABORT_UNAVAILABLE);
471 upl_deallocate(upl);
472
473 return KERN_SUCCESS;
474 }
475
476 vm_object_lock(compact_object);
477
478 pager = (memory_object_t)compact_object->pager;
479
480 if (!compact_object->pager_ready || pager == MEMORY_OBJECT_NULL){
481 vm_object_unlock(compact_object);
482 return KERN_FAILURE;
483 }
484
485 vm_object_paging_wait(compact_object, THREAD_UNINT);
486 vm_object_paging_begin(compact_object);
487
488 compact_object->blocked_access = TRUE;
489 vm_object_unlock(compact_object);
490
491 ((vm_object_fault_info_t) fault_info)->io_sync = TRUE;
492
493 kr = dp_memory_object_data_request(pager,
494 compact_offset,
495 length,
496 protection_required,
497 fault_info);
498 if (kr == KERN_SUCCESS){
499
500 vm_page_t src_page = VM_PAGE_NULL, dst_page = VM_PAGE_NULL;
501
502 vm_object_lock(compact_object);
503
504 compact_object->blocked_access = FALSE;
505 vm_object_paging_end(compact_object);
506
507 vm_object_lock(src_object);
508
509 if ((src_page = vm_page_lookup(compact_object, compact_offset)) != VM_PAGE_NULL){
510
511 dst_page = vm_page_lookup(src_object, offset - src_object->paging_offset);
512
513 VM_PAGE_FREE(dst_page);
514 vm_page_rename(src_page, src_object, offset - src_object->paging_offset, FALSE);
515
516 if (default_freezer_mapping_update(fo->fo_table,
517 mem_obj,
518 offset,
519 NULL,
520 TRUE) != KERN_SUCCESS) {
521 printf("Page for object: 0x%lx at offset: 0x%lx not found in table\n", (uintptr_t)src_object, (uintptr_t)offset);
522 }
523
524 PAGE_WAKEUP_DONE(src_page);
525 } else {
526 printf("%d: default_freezer: compact_object doesn't have the page for object 0x%lx at offset 0x%lx \n", kr, (uintptr_t)compact_object, (uintptr_t)compact_offset);
527 kr = KERN_FAILURE;
528 }
529 vm_object_unlock(src_object);
530 vm_object_unlock(compact_object);
531 } else {
532 panic("%d: default_freezer TOC pointed us to default_pager incorrectly\n", kr);
533 }
534 return kr;
535}
536
537kern_return_t
538df_memory_object_data_return(
539 __unused memory_object_t mem_obj,
540 __unused memory_object_offset_t offset,
541 __unused memory_object_cluster_size_t size,
542 __unused memory_object_offset_t *resid_offset,
543 __unused int *io_error,
544 __unused boolean_t dirty,
545 __unused boolean_t kernel_copy,
546 __unused int upl_flags)
547{
548
549 panic(" default_freezer: df_memory_object_data_return should not be called\n");
550 return KERN_SUCCESS;
551}
552
553kern_return_t
554df_memory_object_data_initialize(
555 __unused memory_object_t mem_obj,
556 __unused memory_object_offset_t offset,
557 __unused memory_object_cluster_size_t size)
558{
559
560 panic(" default_freezer: df_memory_object_data_initialize should not be called\n");
561 return KERN_SUCCESS;
562}
563
564kern_return_t
565df_memory_object_data_unlock(
566 __unused memory_object_t mem_obj,
567 __unused memory_object_offset_t offset,
568 __unused memory_object_size_t length,
569 __unused vm_prot_t prot)
570{
571
572 panic(" default_freezer: df_memory_object_data_unlock should not be called\n");
573 return KERN_FAILURE;
574}
575
576kern_return_t
577df_memory_object_synchronize(
578 __unused memory_object_t mem_obj,
579 __unused memory_object_offset_t offset,
580 __unused memory_object_size_t length,
581 __unused vm_sync_t flags)
582{
583
584 panic(" default_freezer: df_memory_object_synchronize should not be called\n");
585 return KERN_FAILURE;
586}
587
588kern_return_t
589df_memory_object_map(
590 __unused memory_object_t mem_obj,
591 __unused vm_prot_t prot)
592{
593
594 panic(" default_freezer: df_memory_object_map should not be called\n");
595 return KERN_FAILURE;
596}
597
598kern_return_t
599df_memory_object_last_unmap(__unused memory_object_t mem_obj)
600{
601
602 panic(" default_freezer: df_memory_object_last_unmap should not be called\n");
603 return KERN_FAILURE;
604}
605
606
607kern_return_t
608df_memory_object_data_reclaim(
609 __unused memory_object_t mem_obj,
610 __unused boolean_t reclaim_backing_store)
611{
612
613 panic("df_memory_object_data_reclaim\n");
614 return KERN_SUCCESS;
615}
616#endif /* CONFIG_FREEZE */