]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor_pager.c
ae0195c861f7e4fc9256f5eb13c942be921d832e
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
1 /*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <mach/memory_object_control.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/upl.h>
69
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
75
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj);
79 kern_return_t compressor_memory_object_init(
80 memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pager_page_size);
83 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
84 kern_return_t compressor_memory_object_data_request(
85 memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 __unused vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t compressor_memory_object_data_return(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t size,
94 __unused memory_object_offset_t *resid_offset,
95 __unused int *io_error,
96 __unused boolean_t dirty,
97 __unused boolean_t kernel_copy,
98 __unused int upl_flags);
99 kern_return_t compressor_memory_object_data_initialize(
100 memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_cluster_size_t size);
103 kern_return_t compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj,
105 __unused memory_object_offset_t offset,
106 __unused memory_object_size_t size,
107 __unused vm_prot_t desired_access);
108 kern_return_t compressor_memory_object_synchronize(
109 memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 __unused vm_sync_t flags);
113 kern_return_t compressor_memory_object_map(
114 __unused memory_object_t mem_obj,
115 __unused vm_prot_t prot);
116 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
117 kern_return_t compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj,
119 __unused boolean_t reclaim_backing_store);
120
121 const struct memory_object_pager_ops compressor_pager_ops = {
122 compressor_memory_object_reference,
123 compressor_memory_object_deallocate,
124 compressor_memory_object_init,
125 compressor_memory_object_terminate,
126 compressor_memory_object_data_request,
127 compressor_memory_object_data_return,
128 compressor_memory_object_data_initialize,
129 compressor_memory_object_data_unlock,
130 compressor_memory_object_synchronize,
131 compressor_memory_object_map,
132 compressor_memory_object_last_unmap,
133 compressor_memory_object_data_reclaim,
134 "compressor pager"
135 };
136
137 /* internal data structures */
138
139 struct {
140 uint64_t data_returns;
141 uint64_t data_requests;
142 uint64_t put;
143 uint64_t get;
144 uint64_t state_clr;
145 uint64_t state_get;
146 uint64_t transfer;
147 } compressor_pager_stats;
148
149 typedef int compressor_slot_t;
150
151 typedef struct compressor_pager {
152 /* mandatory generic header */
153 struct memory_object cpgr_hdr;
154
155 /* pager-specific data */
156 lck_mtx_t cpgr_lock;
157 unsigned int cpgr_references;
158 unsigned int cpgr_num_slots;
159 unsigned int cpgr_num_slots_occupied;
160 union {
161 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
162 compressor_slot_t *cpgr_dslots; /* direct slots */
163 compressor_slot_t **cpgr_islots; /* indirect slots */
164 } cpgr_slots;
165 } *compressor_pager_t;
166
167 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
168 MACRO_BEGIN \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
171 _cpgr_ = NULL; \
172 } else { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
174 } \
175 MACRO_END
176
177 zone_t compressor_pager_zone;
178
179 lck_grp_t compressor_pager_lck_grp;
180 lck_grp_attr_t compressor_pager_lck_grp_attr;
181 lck_attr_t compressor_pager_lck_attr;
182
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
191
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
194
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
197 int num_slots,
198 int flags,
199 int *failures);
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager,
202 boolean_t do_alloc,
203 memory_object_offset_t offset,
204 compressor_slot_t **slot_pp);
205
206 kern_return_t
207 compressor_memory_object_init(
208 memory_object_t mem_obj,
209 memory_object_control_t control,
210 __unused memory_object_cluster_size_t pager_page_size)
211 {
212 compressor_pager_t pager;
213
214 assert(pager_page_size == PAGE_SIZE);
215
216 memory_object_control_reference(control);
217
218 compressor_pager_lookup(mem_obj, pager);
219 compressor_pager_lock(pager);
220
221 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL)
222 panic("compressor_memory_object_init: bad request");
223 pager->cpgr_hdr.mo_control = control;
224
225 compressor_pager_unlock(pager);
226
227 return KERN_SUCCESS;
228 }
229
230 kern_return_t
231 compressor_memory_object_synchronize(
232 __unused memory_object_t mem_obj,
233 __unused memory_object_offset_t offset,
234 __unused memory_object_size_t length,
235 __unused vm_sync_t flags)
236 {
237 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
238 return KERN_FAILURE;
239 }
240
241 kern_return_t
242 compressor_memory_object_map(
243 __unused memory_object_t mem_obj,
244 __unused vm_prot_t prot)
245 {
246 panic("compressor_memory_object_map");
247 return KERN_FAILURE;
248 }
249
250 kern_return_t
251 compressor_memory_object_last_unmap(
252 __unused memory_object_t mem_obj)
253 {
254 panic("compressor_memory_object_last_unmap");
255 return KERN_FAILURE;
256 }
257
258 kern_return_t
259 compressor_memory_object_data_reclaim(
260 __unused memory_object_t mem_obj,
261 __unused boolean_t reclaim_backing_store)
262 {
263 panic("compressor_memory_object_data_reclaim");
264 return KERN_FAILURE;
265 }
266
267 kern_return_t
268 compressor_memory_object_terminate(
269 memory_object_t mem_obj)
270 {
271 memory_object_control_t control;
272 compressor_pager_t pager;
273
274 /*
275 * control port is a receive right, not a send right.
276 */
277
278 compressor_pager_lookup(mem_obj, pager);
279 compressor_pager_lock(pager);
280
281 /*
282 * After memory_object_terminate both memory_object_init
283 * and a no-senders notification are possible, so we need
284 * to clean up our reference to the memory_object_control
285 * to prepare for a new init.
286 */
287
288 control = pager->cpgr_hdr.mo_control;
289 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
290
291 compressor_pager_unlock(pager);
292
293 /*
294 * Now we deallocate our reference on the control.
295 */
296 memory_object_control_deallocate(control);
297 return KERN_SUCCESS;
298 }
299
300 void
301 compressor_memory_object_reference(
302 memory_object_t mem_obj)
303 {
304 compressor_pager_t pager;
305
306 compressor_pager_lookup(mem_obj, pager);
307 if (pager == NULL)
308 return;
309
310 compressor_pager_lock(pager);
311 assert(pager->cpgr_references > 0);
312 pager->cpgr_references++;
313 compressor_pager_unlock(pager);
314 }
315
316 void
317 compressor_memory_object_deallocate(
318 memory_object_t mem_obj)
319 {
320 compressor_pager_t pager;
321 unsigned int num_slots_freed;
322
323 /*
324 * Because we don't give out multiple first references
325 * for a memory object, there can't be a race
326 * between getting a deallocate call and creating
327 * a new reference for the object.
328 */
329
330 compressor_pager_lookup(mem_obj, pager);
331 if (pager == NULL)
332 return;
333
334 compressor_pager_lock(pager);
335 if (--pager->cpgr_references > 0) {
336 compressor_pager_unlock(pager);
337 return;
338 }
339
340 /*
341 * We shouldn't get a deallocation call
342 * when the kernel has the object cached.
343 */
344 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL)
345 panic("compressor_memory_object_deallocate(): bad request");
346
347 /*
348 * Unlock the pager (though there should be no one
349 * waiting for it).
350 */
351 compressor_pager_unlock(pager);
352
353 /* free the compressor slots */
354 int num_chunks;
355 int i;
356 compressor_slot_t *chunk;
357
358 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
359 if (num_chunks > 1) {
360 /* we have an array of chunks */
361 for (i = 0; i < num_chunks; i++) {
362 chunk = pager->cpgr_slots.cpgr_islots[i];
363 if (chunk != NULL) {
364 num_slots_freed =
365 compressor_pager_slots_chunk_free(
366 chunk,
367 COMPRESSOR_SLOTS_PER_CHUNK,
368 0,
369 NULL);
370 pager->cpgr_slots.cpgr_islots[i] = NULL;
371 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
372 }
373 }
374 kfree(pager->cpgr_slots.cpgr_islots,
375 num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
376 pager->cpgr_slots.cpgr_islots = NULL;
377 } else if (pager->cpgr_num_slots > 2) {
378 chunk = pager->cpgr_slots.cpgr_dslots;
379 num_slots_freed =
380 compressor_pager_slots_chunk_free(
381 chunk,
382 pager->cpgr_num_slots,
383 0,
384 NULL);
385 pager->cpgr_slots.cpgr_dslots = NULL;
386 kfree(chunk,
387 (pager->cpgr_num_slots *
388 sizeof (pager->cpgr_slots.cpgr_dslots[0])));
389 } else {
390 chunk = &pager->cpgr_slots.cpgr_eslots[0];
391 num_slots_freed =
392 compressor_pager_slots_chunk_free(
393 chunk,
394 pager->cpgr_num_slots,
395 0,
396 NULL);
397 }
398
399 compressor_pager_lock_destroy(pager);
400 zfree(compressor_pager_zone, pager);
401 }
402
403 kern_return_t
404 compressor_memory_object_data_request(
405 memory_object_t mem_obj,
406 memory_object_offset_t offset,
407 memory_object_cluster_size_t length,
408 __unused vm_prot_t protection_required,
409 __unused memory_object_fault_info_t fault_info)
410 {
411 compressor_pager_t pager;
412 kern_return_t kr;
413 compressor_slot_t *slot_p;
414
415 compressor_pager_stats.data_requests++;
416
417 /*
418 * Request must be on a page boundary and a multiple of pages.
419 */
420 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
421 panic("compressor_memory_object_data_request(): bad alignment");
422
423 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
424 panic("%s: offset 0x%llx overflow\n",
425 __FUNCTION__, (uint64_t) offset);
426 return KERN_FAILURE;
427 }
428
429 compressor_pager_lookup(mem_obj, pager);
430
431 if (length == 0) {
432 /* we're only querying the pager for this page */
433 } else {
434 panic("compressor: data_request");
435 }
436
437 /* find the compressor slot for that page */
438 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
439
440 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
441 /* out of range */
442 kr = KERN_FAILURE;
443 } else if (slot_p == NULL || *slot_p == 0) {
444 /* compressor does not have this page */
445 kr = KERN_FAILURE;
446 } else {
447 /* compressor does have this page */
448 kr = KERN_SUCCESS;
449 }
450 return kr;
451 }
452
453 /*
454 * memory_object_data_initialize: check whether we already have each page, and
455 * write it if we do not. The implementation is far from optimized, and
456 * also assumes that the default_pager is single-threaded.
457 */
458 /* It is questionable whether or not a pager should decide what is relevant */
459 /* and what is not in data sent from the kernel. Data initialize has been */
460 /* changed to copy back all data sent to it in preparation for its eventual */
461 /* merge with data return. It is the kernel that should decide what pages */
462 /* to write back. As of the writing of this note, this is indeed the case */
463 /* the kernel writes back one page at a time through this interface */
464
465 kern_return_t
466 compressor_memory_object_data_initialize(
467 memory_object_t mem_obj,
468 memory_object_offset_t offset,
469 memory_object_cluster_size_t size)
470 {
471 compressor_pager_t pager;
472 memory_object_offset_t cur_offset;
473
474 compressor_pager_lookup(mem_obj, pager);
475 compressor_pager_lock(pager);
476
477 for (cur_offset = offset;
478 cur_offset < offset + size;
479 cur_offset += PAGE_SIZE) {
480 panic("do a data_return() if slot for this page is empty");
481 }
482
483 compressor_pager_unlock(pager);
484
485 return KERN_SUCCESS;
486 }
487
488 kern_return_t
489 compressor_memory_object_data_unlock(
490 __unused memory_object_t mem_obj,
491 __unused memory_object_offset_t offset,
492 __unused memory_object_size_t size,
493 __unused vm_prot_t desired_access)
494 {
495 panic("compressor_memory_object_data_unlock()");
496 return KERN_FAILURE;
497 }
498
499
500 /*ARGSUSED*/
501 kern_return_t
502 compressor_memory_object_data_return(
503 __unused memory_object_t mem_obj,
504 __unused memory_object_offset_t offset,
505 __unused memory_object_cluster_size_t size,
506 __unused memory_object_offset_t *resid_offset,
507 __unused int *io_error,
508 __unused boolean_t dirty,
509 __unused boolean_t kernel_copy,
510 __unused int upl_flags)
511 {
512 panic("compressor: data_return");
513 return KERN_FAILURE;
514 }
515
516 /*
517 * Routine: default_pager_memory_object_create
518 * Purpose:
519 * Handle requests for memory objects from the
520 * kernel.
521 * Notes:
522 * Because we only give out the default memory
523 * manager port to the kernel, we don't have to
524 * be so paranoid about the contents.
525 */
526 kern_return_t
527 compressor_memory_object_create(
528 memory_object_size_t new_size,
529 memory_object_t *new_mem_obj)
530 {
531 compressor_pager_t pager;
532 int num_chunks;
533
534 if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
535 /* 32-bit overflow for number of pages */
536 panic("%s: size 0x%llx overflow\n",
537 __FUNCTION__, (uint64_t) new_size);
538 return KERN_INVALID_ARGUMENT;
539 }
540
541 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
542 if (pager == NULL) {
543 return KERN_RESOURCE_SHORTAGE;
544 }
545
546 compressor_pager_lock_init(pager);
547 pager->cpgr_references = 1;
548 pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
549 pager->cpgr_num_slots_occupied = 0;
550
551 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
552 if (num_chunks > 1) {
553 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
554 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
555 } else if (pager->cpgr_num_slots > 2) {
556 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
557 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
558 } else {
559 pager->cpgr_slots.cpgr_eslots[0] = 0;
560 pager->cpgr_slots.cpgr_eslots[1] = 0;
561 }
562
563 /*
564 * Set up associations between this memory object
565 * and this compressor_pager structure
566 */
567 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
568 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
569 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
570
571 *new_mem_obj = (memory_object_t) pager;
572 return KERN_SUCCESS;
573 }
574
575
576 unsigned int
577 compressor_pager_slots_chunk_free(
578 compressor_slot_t *chunk,
579 int num_slots,
580 int flags,
581 int *failures)
582 {
583 int i;
584 int retval;
585 unsigned int num_slots_freed;
586
587 if (failures)
588 *failures = 0;
589 num_slots_freed = 0;
590 for (i = 0; i < num_slots; i++) {
591 if (chunk[i] != 0) {
592 retval = vm_compressor_free(&chunk[i], flags);
593
594 if (retval == 0)
595 num_slots_freed++;
596 else {
597 if (retval == -2)
598 assert(flags & C_DONT_BLOCK);
599
600 if (failures)
601 *failures += 1;
602 }
603 }
604 }
605 return num_slots_freed;
606 }
607
608 void
609 compressor_pager_slot_lookup(
610 compressor_pager_t pager,
611 boolean_t do_alloc,
612 memory_object_offset_t offset,
613 compressor_slot_t **slot_pp)
614 {
615 int num_chunks;
616 uint32_t page_num;
617 int chunk_idx;
618 int slot_idx;
619 compressor_slot_t *chunk;
620 compressor_slot_t *t_chunk;
621
622 page_num = (uint32_t)(offset/PAGE_SIZE);
623 if (page_num != (offset/PAGE_SIZE)) {
624 /* overflow */
625 panic("%s: offset 0x%llx overflow\n",
626 __FUNCTION__, (uint64_t) offset);
627 *slot_pp = NULL;
628 return;
629 }
630 if (page_num >= pager->cpgr_num_slots) {
631 /* out of range */
632 *slot_pp = NULL;
633 return;
634 }
635 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
636 if (num_chunks > 1) {
637 /* we have an array of chunks */
638 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
639 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
640
641 if (chunk == NULL && do_alloc) {
642 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
643 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
644
645 compressor_pager_lock(pager);
646
647 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
648
649 /*
650 * On some platforms, the memory stores from
651 * the bzero(t_chunk) above might not have been
652 * made visible and another thread might see
653 * the contents of this new chunk before it's
654 * been fully zero-filled.
655 * This memory barrier should take care of this
656 * according to the platform requirements.
657 */
658 __c11_atomic_thread_fence(memory_order_release);
659
660 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
661 t_chunk = NULL;
662 }
663 compressor_pager_unlock(pager);
664
665 if (t_chunk)
666 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
667 }
668 if (chunk == NULL) {
669 *slot_pp = NULL;
670 } else {
671 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
672 *slot_pp = &chunk[slot_idx];
673 }
674 } else if (pager->cpgr_num_slots > 2) {
675 slot_idx = page_num;
676 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
677 } else {
678 slot_idx = page_num;
679 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
680 }
681 }
682
683 void
684 vm_compressor_pager_init(void)
685 {
686 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
687 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
688 lck_attr_setdefault(&compressor_pager_lck_attr);
689
690 compressor_pager_zone = zinit(sizeof (struct compressor_pager),
691 10000 * sizeof (struct compressor_pager),
692 8192, "compressor_pager");
693 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
694 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
695
696 vm_compressor_init();
697 }
698
699 kern_return_t
700 vm_compressor_pager_put(
701 memory_object_t mem_obj,
702 memory_object_offset_t offset,
703 ppnum_t ppnum,
704 void **current_chead,
705 char *scratch_buf,
706 int *compressed_count_delta_p)
707 {
708 compressor_pager_t pager;
709 compressor_slot_t *slot_p;
710
711 compressor_pager_stats.put++;
712
713 *compressed_count_delta_p = 0;
714
715 /* This routine is called by the pageout thread. The pageout thread */
716 /* cannot be blocked by read activities unless the read activities */
717 /* Therefore the grant of vs lock must be done on a try versus a */
718 /* blocking basis. The code below relies on the fact that the */
719 /* interface is synchronous. Should this interface be again async */
720 /* for some type of pager in the future the pages will have to be */
721 /* returned through a separate, asynchronous path. */
722
723 compressor_pager_lookup(mem_obj, pager);
724
725 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
726 /* overflow */
727 panic("%s: offset 0x%llx overflow\n",
728 __FUNCTION__, (uint64_t) offset);
729 return KERN_RESOURCE_SHORTAGE;
730 }
731
732 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
733
734 if (slot_p == NULL) {
735 /* out of range ? */
736 panic("vm_compressor_pager_put: out of range");
737 }
738 if (*slot_p != 0) {
739 /*
740 * Already compressed: forget about the old one.
741 *
742 * This can happen after a vm_object_do_collapse() when
743 * the "backing_object" had some pages paged out and the
744 * "object" had an equivalent page resident.
745 */
746 vm_compressor_free(slot_p, 0);
747 *compressed_count_delta_p -= 1;
748 }
749 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf))
750 return (KERN_RESOURCE_SHORTAGE);
751 *compressed_count_delta_p += 1;
752
753 return (KERN_SUCCESS);
754 }
755
756
757 kern_return_t
758 vm_compressor_pager_get(
759 memory_object_t mem_obj,
760 memory_object_offset_t offset,
761 ppnum_t ppnum,
762 int *my_fault_type,
763 int flags,
764 int *compressed_count_delta_p)
765 {
766 compressor_pager_t pager;
767 kern_return_t kr;
768 compressor_slot_t *slot_p;
769
770 compressor_pager_stats.get++;
771
772 *compressed_count_delta_p = 0;
773
774 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
775 panic("%s: offset 0x%llx overflow\n",
776 __FUNCTION__, (uint64_t) offset);
777 return KERN_MEMORY_ERROR;
778 }
779
780 compressor_pager_lookup(mem_obj, pager);
781
782 /* find the compressor slot for that page */
783 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
784
785 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
786 /* out of range */
787 kr = KERN_MEMORY_FAILURE;
788 } else if (slot_p == NULL || *slot_p == 0) {
789 /* compressor does not have this page */
790 kr = KERN_MEMORY_ERROR;
791 } else {
792 /* compressor does have this page */
793 kr = KERN_SUCCESS;
794 }
795 *my_fault_type = DBG_COMPRESSOR_FAULT;
796
797 if (kr == KERN_SUCCESS) {
798 int retval;
799
800 /* get the page from the compressor */
801 retval = vm_compressor_get(ppnum, slot_p, flags);
802 if (retval == -1)
803 kr = KERN_MEMORY_FAILURE;
804 else if (retval == 1)
805 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
806 else if (retval == -2) {
807 assert((flags & C_DONT_BLOCK));
808 kr = KERN_FAILURE;
809 }
810 }
811
812 if (kr == KERN_SUCCESS) {
813 assert(slot_p != NULL);
814 if (*slot_p != 0) {
815 /*
816 * We got the page for a copy-on-write fault
817 * and we kept the original in place. Slot
818 * is still occupied.
819 */
820 } else {
821 *compressed_count_delta_p -= 1;
822 }
823 }
824
825 return kr;
826 }
827
828 unsigned int
829 vm_compressor_pager_state_clr(
830 memory_object_t mem_obj,
831 memory_object_offset_t offset)
832 {
833 compressor_pager_t pager;
834 compressor_slot_t *slot_p;
835 unsigned int num_slots_freed;
836
837 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
838
839 compressor_pager_stats.state_clr++;
840
841 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
842 /* overflow */
843 panic("%s: offset 0x%llx overflow\n",
844 __FUNCTION__, (uint64_t) offset);
845 return 0;
846 }
847
848 compressor_pager_lookup(mem_obj, pager);
849
850 /* find the compressor slot for that page */
851 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
852
853 num_slots_freed = 0;
854 if (slot_p && *slot_p != 0) {
855 vm_compressor_free(slot_p, 0);
856 num_slots_freed++;
857 assert(*slot_p == 0);
858 }
859
860 return num_slots_freed;
861 }
862
863 vm_external_state_t
864 vm_compressor_pager_state_get(
865 memory_object_t mem_obj,
866 memory_object_offset_t offset)
867 {
868 compressor_pager_t pager;
869 compressor_slot_t *slot_p;
870
871 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
872
873 compressor_pager_stats.state_get++;
874
875 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
876 /* overflow */
877 panic("%s: offset 0x%llx overflow\n",
878 __FUNCTION__, (uint64_t) offset);
879 return VM_EXTERNAL_STATE_ABSENT;
880 }
881
882 compressor_pager_lookup(mem_obj, pager);
883
884 /* find the compressor slot for that page */
885 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
886
887 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
888 /* out of range */
889 return VM_EXTERNAL_STATE_ABSENT;
890 } else if (slot_p == NULL || *slot_p == 0) {
891 /* compressor does not have this page */
892 return VM_EXTERNAL_STATE_ABSENT;
893 } else {
894 /* compressor does have this page */
895 return VM_EXTERNAL_STATE_EXISTS;
896 }
897 }
898
899 unsigned int
900 vm_compressor_pager_reap_pages(
901 memory_object_t mem_obj,
902 int flags)
903 {
904 compressor_pager_t pager;
905 int num_chunks;
906 int failures;
907 int i;
908 compressor_slot_t *chunk;
909 unsigned int num_slots_freed;
910
911 compressor_pager_lookup(mem_obj, pager);
912 if (pager == NULL)
913 return 0;
914
915 compressor_pager_lock(pager);
916
917 /* reap the compressor slots */
918 num_slots_freed = 0;
919
920 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
921 if (num_chunks > 1) {
922 /* we have an array of chunks */
923 for (i = 0; i < num_chunks; i++) {
924 chunk = pager->cpgr_slots.cpgr_islots[i];
925 if (chunk != NULL) {
926 num_slots_freed +=
927 compressor_pager_slots_chunk_free(
928 chunk,
929 COMPRESSOR_SLOTS_PER_CHUNK,
930 flags,
931 &failures);
932 if (failures == 0) {
933 pager->cpgr_slots.cpgr_islots[i] = NULL;
934 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
935 }
936 }
937 }
938 } else if (pager->cpgr_num_slots > 2) {
939 chunk = pager->cpgr_slots.cpgr_dslots;
940 num_slots_freed +=
941 compressor_pager_slots_chunk_free(
942 chunk,
943 pager->cpgr_num_slots,
944 flags,
945 NULL);
946 } else {
947 chunk = &pager->cpgr_slots.cpgr_eslots[0];
948 num_slots_freed +=
949 compressor_pager_slots_chunk_free(
950 chunk,
951 pager->cpgr_num_slots,
952 flags,
953 NULL);
954 }
955
956 compressor_pager_unlock(pager);
957
958 return num_slots_freed;
959 }
960
961 void
962 vm_compressor_pager_transfer(
963 memory_object_t dst_mem_obj,
964 memory_object_offset_t dst_offset,
965 memory_object_t src_mem_obj,
966 memory_object_offset_t src_offset)
967 {
968 compressor_pager_t src_pager, dst_pager;
969 compressor_slot_t *src_slot_p, *dst_slot_p;
970
971 compressor_pager_stats.transfer++;
972
973 /* find the compressor slot for the destination */
974 assert((uint32_t) dst_offset == dst_offset);
975 compressor_pager_lookup(dst_mem_obj, dst_pager);
976 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
977 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
978 &dst_slot_p);
979 assert(dst_slot_p != NULL);
980 assert(*dst_slot_p == 0);
981
982 /* find the compressor slot for the source */
983 assert((uint32_t) src_offset == src_offset);
984 compressor_pager_lookup(src_mem_obj, src_pager);
985 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
986 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
987 &src_slot_p);
988 assert(src_slot_p != NULL);
989 assert(*src_slot_p != 0);
990
991 /* transfer the slot from source to destination */
992 vm_compressor_transfer(dst_slot_p, src_slot_p);
993 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
994 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
995 }
996
997 memory_object_offset_t
998 vm_compressor_pager_next_compressed(
999 memory_object_t mem_obj,
1000 memory_object_offset_t offset)
1001 {
1002 compressor_pager_t pager;
1003 uint32_t num_chunks;
1004 uint32_t page_num;
1005 uint32_t chunk_idx;
1006 uint32_t slot_idx;
1007 compressor_slot_t *chunk;
1008
1009 compressor_pager_lookup(mem_obj, pager);
1010
1011 page_num = (uint32_t)(offset / PAGE_SIZE);
1012 if (page_num != (offset/PAGE_SIZE)) {
1013 /* overflow */
1014 return (memory_object_offset_t) -1;
1015 }
1016 if (page_num >= pager->cpgr_num_slots) {
1017 /* out of range */
1018 return (memory_object_offset_t) -1;
1019 }
1020
1021 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1022 COMPRESSOR_SLOTS_PER_CHUNK);
1023
1024 if (num_chunks == 1) {
1025 if (pager->cpgr_num_slots > 2) {
1026 chunk = pager->cpgr_slots.cpgr_dslots;
1027 } else {
1028 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1029 }
1030 for (slot_idx = page_num;
1031 slot_idx < pager->cpgr_num_slots;
1032 slot_idx++) {
1033 if (chunk[slot_idx] != 0) {
1034 /* found a non-NULL slot in this chunk */
1035 return (memory_object_offset_t) (slot_idx *
1036 PAGE_SIZE);
1037 }
1038 }
1039 return (memory_object_offset_t) -1;
1040 }
1041
1042 /* we have an array of chunks; find the next non-NULL chunk */
1043 chunk = NULL;
1044 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1045 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1046 chunk_idx < num_chunks;
1047 chunk_idx++,
1048 slot_idx = 0) {
1049 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1050 if (chunk == NULL) {
1051 /* no chunk here: try the next one */
1052 continue;
1053 }
1054 /* search for an occupied slot in this chunk */
1055 for (;
1056 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1057 slot_idx++) {
1058 if (chunk[slot_idx] != 0) {
1059 /* found an occupied slot in this chunk */
1060 uint32_t next_slot;
1061
1062 next_slot = ((chunk_idx *
1063 COMPRESSOR_SLOTS_PER_CHUNK) +
1064 slot_idx);
1065 if (next_slot >= pager->cpgr_num_slots) {
1066 /* went beyond end of object */
1067 return (memory_object_offset_t) -1;
1068 }
1069 return (memory_object_offset_t) (next_slot *
1070 PAGE_SIZE);
1071 }
1072 }
1073 }
1074 return (memory_object_offset_t) -1;
1075 }
1076
1077 unsigned int
1078 vm_compressor_pager_get_count(
1079 memory_object_t mem_obj)
1080 {
1081 compressor_pager_t pager;
1082
1083 compressor_pager_lookup(mem_obj, pager);
1084 if (pager == NULL)
1085 return 0;
1086
1087 /*
1088 * The caller should have the VM object locked and one
1089 * needs that lock to do a page-in or page-out, so no
1090 * need to lock the pager here.
1091 */
1092 assert(pager->cpgr_num_slots_occupied >= 0);
1093
1094 return pager->cpgr_num_slots_occupied;
1095 }
1096
1097 void
1098 vm_compressor_pager_count(
1099 memory_object_t mem_obj,
1100 int compressed_count_delta,
1101 boolean_t shared_lock,
1102 vm_object_t object __unused)
1103 {
1104 compressor_pager_t pager;
1105
1106 if (compressed_count_delta == 0) {
1107 return;
1108 }
1109
1110 compressor_pager_lookup(mem_obj, pager);
1111 if (pager == NULL)
1112 return;
1113
1114 if (compressed_count_delta < 0) {
1115 assert(pager->cpgr_num_slots_occupied >=
1116 (unsigned int) -compressed_count_delta);
1117 }
1118
1119 /*
1120 * The caller should have the VM object locked,
1121 * shared or exclusive.
1122 */
1123 if (shared_lock) {
1124 vm_object_lock_assert_shared(object);
1125 OSAddAtomic(compressed_count_delta,
1126 &pager->cpgr_num_slots_occupied);
1127 } else {
1128 vm_object_lock_assert_exclusive(object);
1129 pager->cpgr_num_slots_occupied += compressed_count_delta;
1130 }
1131 }
1132
1133 #if CONFIG_FREEZE
1134 kern_return_t
1135 vm_compressor_pager_relocate(
1136 memory_object_t mem_obj,
1137 memory_object_offset_t offset,
1138 void **current_chead)
1139 {
1140 /*
1141 * Has the page at this offset been compressed?
1142 */
1143
1144 compressor_slot_t *slot_p;
1145 compressor_pager_t dst_pager;
1146
1147 assert(mem_obj);
1148
1149 compressor_pager_lookup(mem_obj, dst_pager);
1150 if (dst_pager == NULL)
1151 return KERN_FAILURE;
1152
1153 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1154 return (vm_compressor_relocate(current_chead, slot_p));
1155 }
1156 #endif /* CONFIG_FREEZE */
1157