]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor_pager.c
6eda976840f17a04fa91e3dd0d40504da5ec12d2
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
1 /*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <mach/memory_object_control.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/upl.h>
69
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
75
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj);
79 kern_return_t compressor_memory_object_init(
80 memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pager_page_size);
83 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
84 kern_return_t compressor_memory_object_data_request(
85 memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 __unused vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t compressor_memory_object_data_return(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t size,
94 __unused memory_object_offset_t *resid_offset,
95 __unused int *io_error,
96 __unused boolean_t dirty,
97 __unused boolean_t kernel_copy,
98 __unused int upl_flags);
99 kern_return_t compressor_memory_object_data_initialize(
100 memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_cluster_size_t size);
103 kern_return_t compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj,
105 __unused memory_object_offset_t offset,
106 __unused memory_object_size_t size,
107 __unused vm_prot_t desired_access);
108 kern_return_t compressor_memory_object_synchronize(
109 memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 __unused vm_sync_t flags);
113 kern_return_t compressor_memory_object_map(
114 __unused memory_object_t mem_obj,
115 __unused vm_prot_t prot);
116 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
117 kern_return_t compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj,
119 __unused boolean_t reclaim_backing_store);
120
121 const struct memory_object_pager_ops compressor_pager_ops = {
122 compressor_memory_object_reference,
123 compressor_memory_object_deallocate,
124 compressor_memory_object_init,
125 compressor_memory_object_terminate,
126 compressor_memory_object_data_request,
127 compressor_memory_object_data_return,
128 compressor_memory_object_data_initialize,
129 compressor_memory_object_data_unlock,
130 compressor_memory_object_synchronize,
131 compressor_memory_object_map,
132 compressor_memory_object_last_unmap,
133 compressor_memory_object_data_reclaim,
134 "compressor pager"
135 };
136
137 /* internal data structures */
138
139 struct {
140 uint64_t data_returns;
141 uint64_t data_requests;
142 uint64_t put;
143 uint64_t get;
144 uint64_t state_clr;
145 uint64_t state_get;
146 uint64_t transfer;
147 } compressor_pager_stats;
148
149 typedef int compressor_slot_t;
150
151 typedef struct compressor_pager {
152 /* mandatory generic header */
153 struct memory_object cpgr_hdr;
154
155 /* pager-specific data */
156 lck_mtx_t cpgr_lock;
157 unsigned int cpgr_references;
158 unsigned int cpgr_num_slots;
159 unsigned int cpgr_num_slots_occupied;
160 union {
161 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
162 compressor_slot_t *cpgr_dslots; /* direct slots */
163 compressor_slot_t **cpgr_islots; /* indirect slots */
164 } cpgr_slots;
165 } *compressor_pager_t;
166
167 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
168 MACRO_BEGIN \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
171 _cpgr_ = NULL; \
172 } else { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
174 } \
175 MACRO_END
176
177 zone_t compressor_pager_zone;
178
179 lck_grp_t compressor_pager_lck_grp;
180 lck_grp_attr_t compressor_pager_lck_grp_attr;
181 lck_attr_t compressor_pager_lck_attr;
182
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
191
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
194
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
197 int num_slots,
198 int flags,
199 int *failures);
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager,
202 boolean_t do_alloc,
203 memory_object_offset_t offset,
204 compressor_slot_t **slot_pp);
205
206 kern_return_t
207 compressor_memory_object_init(
208 memory_object_t mem_obj,
209 memory_object_control_t control,
210 __unused memory_object_cluster_size_t pager_page_size)
211 {
212 compressor_pager_t pager;
213
214 assert(pager_page_size == PAGE_SIZE);
215
216 memory_object_control_reference(control);
217
218 compressor_pager_lookup(mem_obj, pager);
219 compressor_pager_lock(pager);
220
221 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
222 panic("compressor_memory_object_init: bad request");
223 }
224 pager->cpgr_hdr.mo_control = control;
225
226 compressor_pager_unlock(pager);
227
228 return KERN_SUCCESS;
229 }
230
231 kern_return_t
232 compressor_memory_object_synchronize(
233 __unused memory_object_t mem_obj,
234 __unused memory_object_offset_t offset,
235 __unused memory_object_size_t length,
236 __unused vm_sync_t flags)
237 {
238 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
239 return KERN_FAILURE;
240 }
241
242 kern_return_t
243 compressor_memory_object_map(
244 __unused memory_object_t mem_obj,
245 __unused vm_prot_t prot)
246 {
247 panic("compressor_memory_object_map");
248 return KERN_FAILURE;
249 }
250
251 kern_return_t
252 compressor_memory_object_last_unmap(
253 __unused memory_object_t mem_obj)
254 {
255 panic("compressor_memory_object_last_unmap");
256 return KERN_FAILURE;
257 }
258
259 kern_return_t
260 compressor_memory_object_data_reclaim(
261 __unused memory_object_t mem_obj,
262 __unused boolean_t reclaim_backing_store)
263 {
264 panic("compressor_memory_object_data_reclaim");
265 return KERN_FAILURE;
266 }
267
268 kern_return_t
269 compressor_memory_object_terminate(
270 memory_object_t mem_obj)
271 {
272 memory_object_control_t control;
273 compressor_pager_t pager;
274
275 /*
276 * control port is a receive right, not a send right.
277 */
278
279 compressor_pager_lookup(mem_obj, pager);
280 compressor_pager_lock(pager);
281
282 /*
283 * After memory_object_terminate both memory_object_init
284 * and a no-senders notification are possible, so we need
285 * to clean up our reference to the memory_object_control
286 * to prepare for a new init.
287 */
288
289 control = pager->cpgr_hdr.mo_control;
290 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
291
292 compressor_pager_unlock(pager);
293
294 /*
295 * Now we deallocate our reference on the control.
296 */
297 memory_object_control_deallocate(control);
298 return KERN_SUCCESS;
299 }
300
301 void
302 compressor_memory_object_reference(
303 memory_object_t mem_obj)
304 {
305 compressor_pager_t pager;
306
307 compressor_pager_lookup(mem_obj, pager);
308 if (pager == NULL) {
309 return;
310 }
311
312 compressor_pager_lock(pager);
313 assert(pager->cpgr_references > 0);
314 pager->cpgr_references++;
315 compressor_pager_unlock(pager);
316 }
317
318 void
319 compressor_memory_object_deallocate(
320 memory_object_t mem_obj)
321 {
322 compressor_pager_t pager;
323 unsigned int num_slots_freed;
324
325 /*
326 * Because we don't give out multiple first references
327 * for a memory object, there can't be a race
328 * between getting a deallocate call and creating
329 * a new reference for the object.
330 */
331
332 compressor_pager_lookup(mem_obj, pager);
333 if (pager == NULL) {
334 return;
335 }
336
337 compressor_pager_lock(pager);
338 if (--pager->cpgr_references > 0) {
339 compressor_pager_unlock(pager);
340 return;
341 }
342
343 /*
344 * We shouldn't get a deallocation call
345 * when the kernel has the object cached.
346 */
347 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
348 panic("compressor_memory_object_deallocate(): bad request");
349 }
350
351 /*
352 * Unlock the pager (though there should be no one
353 * waiting for it).
354 */
355 compressor_pager_unlock(pager);
356
357 /* free the compressor slots */
358 int num_chunks;
359 int i;
360 compressor_slot_t *chunk;
361
362 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
363 if (num_chunks > 1) {
364 /* we have an array of chunks */
365 for (i = 0; i < num_chunks; i++) {
366 chunk = pager->cpgr_slots.cpgr_islots[i];
367 if (chunk != NULL) {
368 num_slots_freed =
369 compressor_pager_slots_chunk_free(
370 chunk,
371 COMPRESSOR_SLOTS_PER_CHUNK,
372 0,
373 NULL);
374 pager->cpgr_slots.cpgr_islots[i] = NULL;
375 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
376 }
377 }
378 kfree(pager->cpgr_slots.cpgr_islots,
379 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
380 pager->cpgr_slots.cpgr_islots = NULL;
381 } else if (pager->cpgr_num_slots > 2) {
382 chunk = pager->cpgr_slots.cpgr_dslots;
383 num_slots_freed =
384 compressor_pager_slots_chunk_free(
385 chunk,
386 pager->cpgr_num_slots,
387 0,
388 NULL);
389 pager->cpgr_slots.cpgr_dslots = NULL;
390 kfree(chunk,
391 (pager->cpgr_num_slots *
392 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
393 } else {
394 chunk = &pager->cpgr_slots.cpgr_eslots[0];
395 num_slots_freed =
396 compressor_pager_slots_chunk_free(
397 chunk,
398 pager->cpgr_num_slots,
399 0,
400 NULL);
401 }
402
403 compressor_pager_lock_destroy(pager);
404 zfree(compressor_pager_zone, pager);
405 }
406
407 kern_return_t
408 compressor_memory_object_data_request(
409 memory_object_t mem_obj,
410 memory_object_offset_t offset,
411 memory_object_cluster_size_t length,
412 __unused vm_prot_t protection_required,
413 __unused memory_object_fault_info_t fault_info)
414 {
415 compressor_pager_t pager;
416 kern_return_t kr;
417 compressor_slot_t *slot_p;
418
419 compressor_pager_stats.data_requests++;
420
421 /*
422 * Request must be on a page boundary and a multiple of pages.
423 */
424 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
425 panic("compressor_memory_object_data_request(): bad alignment");
426 }
427
428 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
429 panic("%s: offset 0x%llx overflow\n",
430 __FUNCTION__, (uint64_t) offset);
431 return KERN_FAILURE;
432 }
433
434 compressor_pager_lookup(mem_obj, pager);
435
436 if (length == 0) {
437 /* we're only querying the pager for this page */
438 } else {
439 panic("compressor: data_request");
440 }
441
442 /* find the compressor slot for that page */
443 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
444
445 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
446 /* out of range */
447 kr = KERN_FAILURE;
448 } else if (slot_p == NULL || *slot_p == 0) {
449 /* compressor does not have this page */
450 kr = KERN_FAILURE;
451 } else {
452 /* compressor does have this page */
453 kr = KERN_SUCCESS;
454 }
455 return kr;
456 }
457
458 /*
459 * memory_object_data_initialize: check whether we already have each page, and
460 * write it if we do not. The implementation is far from optimized, and
461 * also assumes that the default_pager is single-threaded.
462 */
463 /* It is questionable whether or not a pager should decide what is relevant */
464 /* and what is not in data sent from the kernel. Data initialize has been */
465 /* changed to copy back all data sent to it in preparation for its eventual */
466 /* merge with data return. It is the kernel that should decide what pages */
467 /* to write back. As of the writing of this note, this is indeed the case */
468 /* the kernel writes back one page at a time through this interface */
469
470 kern_return_t
471 compressor_memory_object_data_initialize(
472 memory_object_t mem_obj,
473 memory_object_offset_t offset,
474 memory_object_cluster_size_t size)
475 {
476 compressor_pager_t pager;
477 memory_object_offset_t cur_offset;
478
479 compressor_pager_lookup(mem_obj, pager);
480 compressor_pager_lock(pager);
481
482 for (cur_offset = offset;
483 cur_offset < offset + size;
484 cur_offset += PAGE_SIZE) {
485 panic("do a data_return() if slot for this page is empty");
486 }
487
488 compressor_pager_unlock(pager);
489
490 return KERN_SUCCESS;
491 }
492
493 kern_return_t
494 compressor_memory_object_data_unlock(
495 __unused memory_object_t mem_obj,
496 __unused memory_object_offset_t offset,
497 __unused memory_object_size_t size,
498 __unused vm_prot_t desired_access)
499 {
500 panic("compressor_memory_object_data_unlock()");
501 return KERN_FAILURE;
502 }
503
504
505 /*ARGSUSED*/
506 kern_return_t
507 compressor_memory_object_data_return(
508 __unused memory_object_t mem_obj,
509 __unused memory_object_offset_t offset,
510 __unused memory_object_cluster_size_t size,
511 __unused memory_object_offset_t *resid_offset,
512 __unused int *io_error,
513 __unused boolean_t dirty,
514 __unused boolean_t kernel_copy,
515 __unused int upl_flags)
516 {
517 panic("compressor: data_return");
518 return KERN_FAILURE;
519 }
520
521 /*
522 * Routine: default_pager_memory_object_create
523 * Purpose:
524 * Handle requests for memory objects from the
525 * kernel.
526 * Notes:
527 * Because we only give out the default memory
528 * manager port to the kernel, we don't have to
529 * be so paranoid about the contents.
530 */
531 kern_return_t
532 compressor_memory_object_create(
533 memory_object_size_t new_size,
534 memory_object_t *new_mem_obj)
535 {
536 compressor_pager_t pager;
537 int num_chunks;
538
539 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
540 /* 32-bit overflow for number of pages */
541 panic("%s: size 0x%llx overflow\n",
542 __FUNCTION__, (uint64_t) new_size);
543 return KERN_INVALID_ARGUMENT;
544 }
545
546 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
547 if (pager == NULL) {
548 return KERN_RESOURCE_SHORTAGE;
549 }
550
551 compressor_pager_lock_init(pager);
552 pager->cpgr_references = 1;
553 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
554 pager->cpgr_num_slots_occupied = 0;
555
556 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
557 if (num_chunks > 1) {
558 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
559 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
560 } else if (pager->cpgr_num_slots > 2) {
561 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
562 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
563 } else {
564 pager->cpgr_slots.cpgr_eslots[0] = 0;
565 pager->cpgr_slots.cpgr_eslots[1] = 0;
566 }
567
568 /*
569 * Set up associations between this memory object
570 * and this compressor_pager structure
571 */
572 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
573 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
574 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
575
576 *new_mem_obj = (memory_object_t) pager;
577 return KERN_SUCCESS;
578 }
579
580
581 unsigned int
582 compressor_pager_slots_chunk_free(
583 compressor_slot_t *chunk,
584 int num_slots,
585 int flags,
586 int *failures)
587 {
588 int i;
589 int retval;
590 unsigned int num_slots_freed;
591
592 if (failures) {
593 *failures = 0;
594 }
595 num_slots_freed = 0;
596 for (i = 0; i < num_slots; i++) {
597 if (chunk[i] != 0) {
598 retval = vm_compressor_free(&chunk[i], flags);
599
600 if (retval == 0) {
601 num_slots_freed++;
602 } else {
603 if (retval == -2) {
604 assert(flags & C_DONT_BLOCK);
605 }
606
607 if (failures) {
608 *failures += 1;
609 }
610 }
611 }
612 }
613 return num_slots_freed;
614 }
615
616 void
617 compressor_pager_slot_lookup(
618 compressor_pager_t pager,
619 boolean_t do_alloc,
620 memory_object_offset_t offset,
621 compressor_slot_t **slot_pp)
622 {
623 int num_chunks;
624 uint32_t page_num;
625 int chunk_idx;
626 int slot_idx;
627 compressor_slot_t *chunk;
628 compressor_slot_t *t_chunk;
629
630 page_num = (uint32_t)(offset / PAGE_SIZE);
631 if (page_num != (offset / PAGE_SIZE)) {
632 /* overflow */
633 panic("%s: offset 0x%llx overflow\n",
634 __FUNCTION__, (uint64_t) offset);
635 *slot_pp = NULL;
636 return;
637 }
638 if (page_num >= pager->cpgr_num_slots) {
639 /* out of range */
640 *slot_pp = NULL;
641 return;
642 }
643 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
644 if (num_chunks > 1) {
645 /* we have an array of chunks */
646 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
647 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
648
649 if (chunk == NULL && do_alloc) {
650 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
651 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
652
653 compressor_pager_lock(pager);
654
655 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
656 /*
657 * On some platforms, the memory stores from
658 * the bzero(t_chunk) above might not have been
659 * made visible and another thread might see
660 * the contents of this new chunk before it's
661 * been fully zero-filled.
662 * This memory barrier should take care of this
663 * according to the platform requirements.
664 */
665 __c11_atomic_thread_fence(memory_order_release);
666
667 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
668 t_chunk = NULL;
669 }
670 compressor_pager_unlock(pager);
671
672 if (t_chunk) {
673 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
674 }
675 }
676 if (chunk == NULL) {
677 *slot_pp = NULL;
678 } else {
679 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
680 *slot_pp = &chunk[slot_idx];
681 }
682 } else if (pager->cpgr_num_slots > 2) {
683 slot_idx = page_num;
684 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
685 } else {
686 slot_idx = page_num;
687 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
688 }
689 }
690
691 void
692 vm_compressor_pager_init(void)
693 {
694 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
695 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
696 lck_attr_setdefault(&compressor_pager_lck_attr);
697
698 compressor_pager_zone = zinit(sizeof(struct compressor_pager),
699 10000 * sizeof(struct compressor_pager),
700 8192, "compressor_pager");
701 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
702 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
703
704 vm_compressor_init();
705 }
706
707 kern_return_t
708 vm_compressor_pager_put(
709 memory_object_t mem_obj,
710 memory_object_offset_t offset,
711 ppnum_t ppnum,
712 void **current_chead,
713 char *scratch_buf,
714 int *compressed_count_delta_p)
715 {
716 compressor_pager_t pager;
717 compressor_slot_t *slot_p;
718
719 compressor_pager_stats.put++;
720
721 *compressed_count_delta_p = 0;
722
723 /* This routine is called by the pageout thread. The pageout thread */
724 /* cannot be blocked by read activities unless the read activities */
725 /* Therefore the grant of vs lock must be done on a try versus a */
726 /* blocking basis. The code below relies on the fact that the */
727 /* interface is synchronous. Should this interface be again async */
728 /* for some type of pager in the future the pages will have to be */
729 /* returned through a separate, asynchronous path. */
730
731 compressor_pager_lookup(mem_obj, pager);
732
733 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
734 /* overflow */
735 panic("%s: offset 0x%llx overflow\n",
736 __FUNCTION__, (uint64_t) offset);
737 return KERN_RESOURCE_SHORTAGE;
738 }
739
740 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
741
742 if (slot_p == NULL) {
743 /* out of range ? */
744 panic("vm_compressor_pager_put: out of range");
745 }
746 if (*slot_p != 0) {
747 /*
748 * Already compressed: forget about the old one.
749 *
750 * This can happen after a vm_object_do_collapse() when
751 * the "backing_object" had some pages paged out and the
752 * "object" had an equivalent page resident.
753 */
754 vm_compressor_free(slot_p, 0);
755 *compressed_count_delta_p -= 1;
756 }
757
758 /*
759 * If the compressor operation succeeds, we presumably don't need to
760 * undo any previous WIMG update, as all live mappings should be
761 * disconnected.
762 */
763
764 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
765 return KERN_RESOURCE_SHORTAGE;
766 }
767 *compressed_count_delta_p += 1;
768
769 return KERN_SUCCESS;
770 }
771
772
773 kern_return_t
774 vm_compressor_pager_get(
775 memory_object_t mem_obj,
776 memory_object_offset_t offset,
777 ppnum_t ppnum,
778 int *my_fault_type,
779 int flags,
780 int *compressed_count_delta_p)
781 {
782 compressor_pager_t pager;
783 kern_return_t kr;
784 compressor_slot_t *slot_p;
785
786 compressor_pager_stats.get++;
787
788 *compressed_count_delta_p = 0;
789
790 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
791 panic("%s: offset 0x%llx overflow\n",
792 __FUNCTION__, (uint64_t) offset);
793 return KERN_MEMORY_ERROR;
794 }
795
796 compressor_pager_lookup(mem_obj, pager);
797
798 /* find the compressor slot for that page */
799 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
800
801 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
802 /* out of range */
803 kr = KERN_MEMORY_FAILURE;
804 } else if (slot_p == NULL || *slot_p == 0) {
805 /* compressor does not have this page */
806 kr = KERN_MEMORY_ERROR;
807 } else {
808 /* compressor does have this page */
809 kr = KERN_SUCCESS;
810 }
811 *my_fault_type = DBG_COMPRESSOR_FAULT;
812
813 if (kr == KERN_SUCCESS) {
814 int retval;
815
816 /* get the page from the compressor */
817 retval = vm_compressor_get(ppnum, slot_p, flags);
818 if (retval == -1) {
819 kr = KERN_MEMORY_FAILURE;
820 } else if (retval == 1) {
821 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
822 } else if (retval == -2) {
823 assert((flags & C_DONT_BLOCK));
824 kr = KERN_FAILURE;
825 }
826 }
827
828 if (kr == KERN_SUCCESS) {
829 assert(slot_p != NULL);
830 if (*slot_p != 0) {
831 /*
832 * We got the page for a copy-on-write fault
833 * and we kept the original in place. Slot
834 * is still occupied.
835 */
836 } else {
837 *compressed_count_delta_p -= 1;
838 }
839 }
840
841 return kr;
842 }
843
844 unsigned int
845 vm_compressor_pager_state_clr(
846 memory_object_t mem_obj,
847 memory_object_offset_t offset)
848 {
849 compressor_pager_t pager;
850 compressor_slot_t *slot_p;
851 unsigned int num_slots_freed;
852
853 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
854
855 compressor_pager_stats.state_clr++;
856
857 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
858 /* overflow */
859 panic("%s: offset 0x%llx overflow\n",
860 __FUNCTION__, (uint64_t) offset);
861 return 0;
862 }
863
864 compressor_pager_lookup(mem_obj, pager);
865
866 /* find the compressor slot for that page */
867 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
868
869 num_slots_freed = 0;
870 if (slot_p && *slot_p != 0) {
871 vm_compressor_free(slot_p, 0);
872 num_slots_freed++;
873 assert(*slot_p == 0);
874 }
875
876 return num_slots_freed;
877 }
878
879 vm_external_state_t
880 vm_compressor_pager_state_get(
881 memory_object_t mem_obj,
882 memory_object_offset_t offset)
883 {
884 compressor_pager_t pager;
885 compressor_slot_t *slot_p;
886
887 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
888
889 compressor_pager_stats.state_get++;
890
891 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
892 /* overflow */
893 panic("%s: offset 0x%llx overflow\n",
894 __FUNCTION__, (uint64_t) offset);
895 return VM_EXTERNAL_STATE_ABSENT;
896 }
897
898 compressor_pager_lookup(mem_obj, pager);
899
900 /* find the compressor slot for that page */
901 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
902
903 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
904 /* out of range */
905 return VM_EXTERNAL_STATE_ABSENT;
906 } else if (slot_p == NULL || *slot_p == 0) {
907 /* compressor does not have this page */
908 return VM_EXTERNAL_STATE_ABSENT;
909 } else {
910 /* compressor does have this page */
911 return VM_EXTERNAL_STATE_EXISTS;
912 }
913 }
914
915 unsigned int
916 vm_compressor_pager_reap_pages(
917 memory_object_t mem_obj,
918 int flags)
919 {
920 compressor_pager_t pager;
921 int num_chunks;
922 int failures;
923 int i;
924 compressor_slot_t *chunk;
925 unsigned int num_slots_freed;
926
927 compressor_pager_lookup(mem_obj, pager);
928 if (pager == NULL) {
929 return 0;
930 }
931
932 compressor_pager_lock(pager);
933
934 /* reap the compressor slots */
935 num_slots_freed = 0;
936
937 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
938 if (num_chunks > 1) {
939 /* we have an array of chunks */
940 for (i = 0; i < num_chunks; i++) {
941 chunk = pager->cpgr_slots.cpgr_islots[i];
942 if (chunk != NULL) {
943 num_slots_freed +=
944 compressor_pager_slots_chunk_free(
945 chunk,
946 COMPRESSOR_SLOTS_PER_CHUNK,
947 flags,
948 &failures);
949 if (failures == 0) {
950 pager->cpgr_slots.cpgr_islots[i] = NULL;
951 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
952 }
953 }
954 }
955 } else if (pager->cpgr_num_slots > 2) {
956 chunk = pager->cpgr_slots.cpgr_dslots;
957 num_slots_freed +=
958 compressor_pager_slots_chunk_free(
959 chunk,
960 pager->cpgr_num_slots,
961 flags,
962 NULL);
963 } else {
964 chunk = &pager->cpgr_slots.cpgr_eslots[0];
965 num_slots_freed +=
966 compressor_pager_slots_chunk_free(
967 chunk,
968 pager->cpgr_num_slots,
969 flags,
970 NULL);
971 }
972
973 compressor_pager_unlock(pager);
974
975 return num_slots_freed;
976 }
977
978 void
979 vm_compressor_pager_transfer(
980 memory_object_t dst_mem_obj,
981 memory_object_offset_t dst_offset,
982 memory_object_t src_mem_obj,
983 memory_object_offset_t src_offset)
984 {
985 compressor_pager_t src_pager, dst_pager;
986 compressor_slot_t *src_slot_p, *dst_slot_p;
987
988 compressor_pager_stats.transfer++;
989
990 /* find the compressor slot for the destination */
991 assert((uint32_t) dst_offset == dst_offset);
992 compressor_pager_lookup(dst_mem_obj, dst_pager);
993 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
994 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
995 &dst_slot_p);
996 assert(dst_slot_p != NULL);
997 assert(*dst_slot_p == 0);
998
999 /* find the compressor slot for the source */
1000 assert((uint32_t) src_offset == src_offset);
1001 compressor_pager_lookup(src_mem_obj, src_pager);
1002 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1003 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
1004 &src_slot_p);
1005 assert(src_slot_p != NULL);
1006 assert(*src_slot_p != 0);
1007
1008 /* transfer the slot from source to destination */
1009 vm_compressor_transfer(dst_slot_p, src_slot_p);
1010 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1011 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1012 }
1013
1014 memory_object_offset_t
1015 vm_compressor_pager_next_compressed(
1016 memory_object_t mem_obj,
1017 memory_object_offset_t offset)
1018 {
1019 compressor_pager_t pager;
1020 uint32_t num_chunks;
1021 uint32_t page_num;
1022 uint32_t chunk_idx;
1023 uint32_t slot_idx;
1024 compressor_slot_t *chunk;
1025
1026 compressor_pager_lookup(mem_obj, pager);
1027
1028 page_num = (uint32_t)(offset / PAGE_SIZE);
1029 if (page_num != (offset / PAGE_SIZE)) {
1030 /* overflow */
1031 return (memory_object_offset_t) -1;
1032 }
1033 if (page_num >= pager->cpgr_num_slots) {
1034 /* out of range */
1035 return (memory_object_offset_t) -1;
1036 }
1037
1038 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1039 COMPRESSOR_SLOTS_PER_CHUNK);
1040
1041 if (num_chunks == 1) {
1042 if (pager->cpgr_num_slots > 2) {
1043 chunk = pager->cpgr_slots.cpgr_dslots;
1044 } else {
1045 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1046 }
1047 for (slot_idx = page_num;
1048 slot_idx < pager->cpgr_num_slots;
1049 slot_idx++) {
1050 if (chunk[slot_idx] != 0) {
1051 /* found a non-NULL slot in this chunk */
1052 return (memory_object_offset_t) (slot_idx *
1053 PAGE_SIZE);
1054 }
1055 }
1056 return (memory_object_offset_t) -1;
1057 }
1058
1059 /* we have an array of chunks; find the next non-NULL chunk */
1060 chunk = NULL;
1061 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1062 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1063 chunk_idx < num_chunks;
1064 chunk_idx++,
1065 slot_idx = 0) {
1066 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1067 if (chunk == NULL) {
1068 /* no chunk here: try the next one */
1069 continue;
1070 }
1071 /* search for an occupied slot in this chunk */
1072 for (;
1073 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1074 slot_idx++) {
1075 if (chunk[slot_idx] != 0) {
1076 /* found an occupied slot in this chunk */
1077 uint32_t next_slot;
1078
1079 next_slot = ((chunk_idx *
1080 COMPRESSOR_SLOTS_PER_CHUNK) +
1081 slot_idx);
1082 if (next_slot >= pager->cpgr_num_slots) {
1083 /* went beyond end of object */
1084 return (memory_object_offset_t) -1;
1085 }
1086 return (memory_object_offset_t) (next_slot *
1087 PAGE_SIZE);
1088 }
1089 }
1090 }
1091 return (memory_object_offset_t) -1;
1092 }
1093
1094 unsigned int
1095 vm_compressor_pager_get_count(
1096 memory_object_t mem_obj)
1097 {
1098 compressor_pager_t pager;
1099
1100 compressor_pager_lookup(mem_obj, pager);
1101 if (pager == NULL) {
1102 return 0;
1103 }
1104
1105 /*
1106 * The caller should have the VM object locked and one
1107 * needs that lock to do a page-in or page-out, so no
1108 * need to lock the pager here.
1109 */
1110 assert(pager->cpgr_num_slots_occupied >= 0);
1111
1112 return pager->cpgr_num_slots_occupied;
1113 }
1114
1115 void
1116 vm_compressor_pager_count(
1117 memory_object_t mem_obj,
1118 int compressed_count_delta,
1119 boolean_t shared_lock,
1120 vm_object_t object __unused)
1121 {
1122 compressor_pager_t pager;
1123
1124 if (compressed_count_delta == 0) {
1125 return;
1126 }
1127
1128 compressor_pager_lookup(mem_obj, pager);
1129 if (pager == NULL) {
1130 return;
1131 }
1132
1133 if (compressed_count_delta < 0) {
1134 assert(pager->cpgr_num_slots_occupied >=
1135 (unsigned int) -compressed_count_delta);
1136 }
1137
1138 /*
1139 * The caller should have the VM object locked,
1140 * shared or exclusive.
1141 */
1142 if (shared_lock) {
1143 vm_object_lock_assert_shared(object);
1144 OSAddAtomic(compressed_count_delta,
1145 &pager->cpgr_num_slots_occupied);
1146 } else {
1147 vm_object_lock_assert_exclusive(object);
1148 pager->cpgr_num_slots_occupied += compressed_count_delta;
1149 }
1150 }
1151
1152 #if CONFIG_FREEZE
1153 kern_return_t
1154 vm_compressor_pager_relocate(
1155 memory_object_t mem_obj,
1156 memory_object_offset_t offset,
1157 void **current_chead)
1158 {
1159 /*
1160 * Has the page at this offset been compressed?
1161 */
1162
1163 compressor_slot_t *slot_p;
1164 compressor_pager_t dst_pager;
1165
1166 assert(mem_obj);
1167
1168 compressor_pager_lookup(mem_obj, dst_pager);
1169 if (dst_pager == NULL) {
1170 return KERN_FAILURE;
1171 }
1172
1173 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1174 return vm_compressor_relocate(current_chead, slot_p);
1175 }
1176 #endif /* CONFIG_FREEZE */