]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor_pager.c
f4a1124cec5645ea7580aeca30c62fb8eee51965
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
1 /*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64
65 #include <mach/memory_object_control.h>
66 #include <mach/memory_object_types.h>
67 #include <mach/memory_object_server.h>
68 #include <mach/upl.h>
69
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
75
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj);
79 kern_return_t compressor_memory_object_init(
80 memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pager_page_size);
83 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
84 kern_return_t compressor_memory_object_data_request(
85 memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 __unused vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t compressor_memory_object_data_return(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t size,
94 __unused memory_object_offset_t *resid_offset,
95 __unused int *io_error,
96 __unused boolean_t dirty,
97 __unused boolean_t kernel_copy,
98 __unused int upl_flags);
99 kern_return_t compressor_memory_object_data_initialize(
100 memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_cluster_size_t size);
103 kern_return_t compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj,
105 __unused memory_object_offset_t offset,
106 __unused memory_object_size_t size,
107 __unused vm_prot_t desired_access);
108 kern_return_t compressor_memory_object_synchronize(
109 memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 __unused vm_sync_t flags);
113 kern_return_t compressor_memory_object_map(
114 __unused memory_object_t mem_obj,
115 __unused vm_prot_t prot);
116 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
117 kern_return_t compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj,
119 __unused boolean_t reclaim_backing_store);
120
121 const struct memory_object_pager_ops compressor_pager_ops = {
122 compressor_memory_object_reference,
123 compressor_memory_object_deallocate,
124 compressor_memory_object_init,
125 compressor_memory_object_terminate,
126 compressor_memory_object_data_request,
127 compressor_memory_object_data_return,
128 compressor_memory_object_data_initialize,
129 compressor_memory_object_data_unlock,
130 compressor_memory_object_synchronize,
131 compressor_memory_object_map,
132 compressor_memory_object_last_unmap,
133 compressor_memory_object_data_reclaim,
134 "compressor pager"
135 };
136
137 /* internal data structures */
138
139 struct {
140 uint64_t data_returns;
141 uint64_t data_requests;
142 uint64_t put;
143 uint64_t get;
144 uint64_t state_clr;
145 uint64_t state_get;
146 uint64_t transfer;
147 } compressor_pager_stats;
148
149 typedef int compressor_slot_t;
150
151 typedef struct compressor_pager {
152 struct ipc_object_header cpgr_pager_header; /* fake ip_kotype */
153 memory_object_pager_ops_t cpgr_pager_ops; /* == &compressor_pager_ops */
154 memory_object_control_t cpgr_control;
155 lck_mtx_t cpgr_lock;
156
157 unsigned int cpgr_references;
158 unsigned int cpgr_num_slots;
159 unsigned int cpgr_num_slots_occupied;
160 union {
161 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
162 compressor_slot_t *cpgr_dslots; /* direct slots */
163 compressor_slot_t **cpgr_islots; /* indirect slots */
164 } cpgr_slots;
165 } *compressor_pager_t;
166
167 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
168 MACRO_BEGIN \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
171 _cpgr_ = NULL; \
172 } else { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
174 } \
175 MACRO_END
176
177 zone_t compressor_pager_zone;
178
179 lck_grp_t compressor_pager_lck_grp;
180 lck_grp_attr_t compressor_pager_lck_grp_attr;
181 lck_attr_t compressor_pager_lck_attr;
182
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
191
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
194
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
197 int num_slots,
198 int flags,
199 int *failures);
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager,
202 boolean_t do_alloc,
203 memory_object_offset_t offset,
204 compressor_slot_t **slot_pp);
205
206 kern_return_t
207 compressor_memory_object_init(
208 memory_object_t mem_obj,
209 memory_object_control_t control,
210 __unused memory_object_cluster_size_t pager_page_size)
211 {
212 compressor_pager_t pager;
213
214 assert(pager_page_size == PAGE_SIZE);
215
216 memory_object_control_reference(control);
217
218 compressor_pager_lookup(mem_obj, pager);
219 compressor_pager_lock(pager);
220
221 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
222 panic("compressor_memory_object_init: bad request");
223 pager->cpgr_control = control;
224
225 compressor_pager_unlock(pager);
226
227 return KERN_SUCCESS;
228 }
229
230 kern_return_t
231 compressor_memory_object_synchronize(
232 memory_object_t mem_obj,
233 memory_object_offset_t offset,
234 memory_object_size_t length,
235 __unused vm_sync_t flags)
236 {
237 compressor_pager_t pager;
238
239 compressor_pager_lookup(mem_obj, pager);
240
241 memory_object_synchronize_completed(pager->cpgr_control, offset, length);
242
243 return KERN_SUCCESS;
244 }
245
246 kern_return_t
247 compressor_memory_object_map(
248 __unused memory_object_t mem_obj,
249 __unused vm_prot_t prot)
250 {
251 panic("compressor_memory_object_map");
252 return KERN_FAILURE;
253 }
254
255 kern_return_t
256 compressor_memory_object_last_unmap(
257 __unused memory_object_t mem_obj)
258 {
259 panic("compressor_memory_object_last_unmap");
260 return KERN_FAILURE;
261 }
262
263 kern_return_t
264 compressor_memory_object_data_reclaim(
265 __unused memory_object_t mem_obj,
266 __unused boolean_t reclaim_backing_store)
267 {
268 panic("compressor_memory_object_data_reclaim");
269 return KERN_FAILURE;
270 }
271
272 kern_return_t
273 compressor_memory_object_terminate(
274 memory_object_t mem_obj)
275 {
276 memory_object_control_t control;
277 compressor_pager_t pager;
278
279 /*
280 * control port is a receive right, not a send right.
281 */
282
283 compressor_pager_lookup(mem_obj, pager);
284 compressor_pager_lock(pager);
285
286 /*
287 * After memory_object_terminate both memory_object_init
288 * and a no-senders notification are possible, so we need
289 * to clean up our reference to the memory_object_control
290 * to prepare for a new init.
291 */
292
293 control = pager->cpgr_control;
294 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
295
296 compressor_pager_unlock(pager);
297
298 /*
299 * Now we deallocate our reference on the control.
300 */
301 memory_object_control_deallocate(control);
302 return KERN_SUCCESS;
303 }
304
305 void
306 compressor_memory_object_reference(
307 memory_object_t mem_obj)
308 {
309 compressor_pager_t pager;
310
311 compressor_pager_lookup(mem_obj, pager);
312 if (pager == NULL)
313 return;
314
315 compressor_pager_lock(pager);
316 assert(pager->cpgr_references > 0);
317 pager->cpgr_references++;
318 compressor_pager_unlock(pager);
319 }
320
321 void
322 compressor_memory_object_deallocate(
323 memory_object_t mem_obj)
324 {
325 compressor_pager_t pager;
326 unsigned int num_slots_freed;
327
328 /*
329 * Because we don't give out multiple first references
330 * for a memory object, there can't be a race
331 * between getting a deallocate call and creating
332 * a new reference for the object.
333 */
334
335 compressor_pager_lookup(mem_obj, pager);
336 if (pager == NULL)
337 return;
338
339 compressor_pager_lock(pager);
340 if (--pager->cpgr_references > 0) {
341 compressor_pager_unlock(pager);
342 return;
343 }
344
345 /*
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
348 */
349 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
350 panic("compressor_memory_object_deallocate(): bad request");
351
352 /*
353 * Unlock the pager (though there should be no one
354 * waiting for it).
355 */
356 compressor_pager_unlock(pager);
357
358 /* free the compressor slots */
359 int num_chunks;
360 int i;
361 compressor_slot_t *chunk;
362
363 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
364 if (num_chunks > 1) {
365 /* we have an array of chunks */
366 for (i = 0; i < num_chunks; i++) {
367 chunk = pager->cpgr_slots.cpgr_islots[i];
368 if (chunk != NULL) {
369 num_slots_freed =
370 compressor_pager_slots_chunk_free(
371 chunk,
372 COMPRESSOR_SLOTS_PER_CHUNK,
373 0,
374 NULL);
375 pager->cpgr_slots.cpgr_islots[i] = NULL;
376 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
377 }
378 }
379 kfree(pager->cpgr_slots.cpgr_islots,
380 num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
381 pager->cpgr_slots.cpgr_islots = NULL;
382 } else if (pager->cpgr_num_slots > 2) {
383 chunk = pager->cpgr_slots.cpgr_dslots;
384 num_slots_freed =
385 compressor_pager_slots_chunk_free(
386 chunk,
387 pager->cpgr_num_slots,
388 0,
389 NULL);
390 pager->cpgr_slots.cpgr_dslots = NULL;
391 kfree(chunk,
392 (pager->cpgr_num_slots *
393 sizeof (pager->cpgr_slots.cpgr_dslots[0])));
394 } else {
395 chunk = &pager->cpgr_slots.cpgr_eslots[0];
396 num_slots_freed =
397 compressor_pager_slots_chunk_free(
398 chunk,
399 pager->cpgr_num_slots,
400 0,
401 NULL);
402 }
403
404 compressor_pager_lock_destroy(pager);
405 zfree(compressor_pager_zone, pager);
406 }
407
408 kern_return_t
409 compressor_memory_object_data_request(
410 memory_object_t mem_obj,
411 memory_object_offset_t offset,
412 memory_object_cluster_size_t length,
413 __unused vm_prot_t protection_required,
414 __unused memory_object_fault_info_t fault_info)
415 {
416 compressor_pager_t pager;
417 kern_return_t kr;
418 compressor_slot_t *slot_p;
419
420 compressor_pager_stats.data_requests++;
421
422 /*
423 * Request must be on a page boundary and a multiple of pages.
424 */
425 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
426 panic("compressor_memory_object_data_request(): bad alignment");
427
428 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
429 panic("%s: offset 0x%llx overflow\n",
430 __FUNCTION__, (uint64_t) offset);
431 return KERN_FAILURE;
432 }
433
434 compressor_pager_lookup(mem_obj, pager);
435
436 if (length == 0) {
437 /* we're only querying the pager for this page */
438 } else {
439 panic("compressor: data_request");
440 }
441
442 /* find the compressor slot for that page */
443 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
444
445 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
446 /* out of range */
447 kr = KERN_FAILURE;
448 } else if (slot_p == NULL || *slot_p == 0) {
449 /* compressor does not have this page */
450 kr = KERN_FAILURE;
451 } else {
452 /* compressor does have this page */
453 kr = KERN_SUCCESS;
454 }
455 return kr;
456 }
457
458 /*
459 * memory_object_data_initialize: check whether we already have each page, and
460 * write it if we do not. The implementation is far from optimized, and
461 * also assumes that the default_pager is single-threaded.
462 */
463 /* It is questionable whether or not a pager should decide what is relevant */
464 /* and what is not in data sent from the kernel. Data initialize has been */
465 /* changed to copy back all data sent to it in preparation for its eventual */
466 /* merge with data return. It is the kernel that should decide what pages */
467 /* to write back. As of the writing of this note, this is indeed the case */
468 /* the kernel writes back one page at a time through this interface */
469
470 kern_return_t
471 compressor_memory_object_data_initialize(
472 memory_object_t mem_obj,
473 memory_object_offset_t offset,
474 memory_object_cluster_size_t size)
475 {
476 compressor_pager_t pager;
477 memory_object_offset_t cur_offset;
478
479 compressor_pager_lookup(mem_obj, pager);
480 compressor_pager_lock(pager);
481
482 for (cur_offset = offset;
483 cur_offset < offset + size;
484 cur_offset += PAGE_SIZE) {
485 panic("do a data_return() if slot for this page is empty");
486 }
487
488 compressor_pager_unlock(pager);
489
490 return KERN_SUCCESS;
491 }
492
493 kern_return_t
494 compressor_memory_object_data_unlock(
495 __unused memory_object_t mem_obj,
496 __unused memory_object_offset_t offset,
497 __unused memory_object_size_t size,
498 __unused vm_prot_t desired_access)
499 {
500 panic("compressor_memory_object_data_unlock()");
501 return KERN_FAILURE;
502 }
503
504
505 /*ARGSUSED*/
506 kern_return_t
507 compressor_memory_object_data_return(
508 __unused memory_object_t mem_obj,
509 __unused memory_object_offset_t offset,
510 __unused memory_object_cluster_size_t size,
511 __unused memory_object_offset_t *resid_offset,
512 __unused int *io_error,
513 __unused boolean_t dirty,
514 __unused boolean_t kernel_copy,
515 __unused int upl_flags)
516 {
517 panic("compressor: data_return");
518 return KERN_FAILURE;
519 }
520
521 /*
522 * Routine: default_pager_memory_object_create
523 * Purpose:
524 * Handle requests for memory objects from the
525 * kernel.
526 * Notes:
527 * Because we only give out the default memory
528 * manager port to the kernel, we don't have to
529 * be so paranoid about the contents.
530 */
531 kern_return_t
532 compressor_memory_object_create(
533 memory_object_size_t new_size,
534 memory_object_t *new_mem_obj)
535 {
536 compressor_pager_t pager;
537 int num_chunks;
538
539 if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
540 /* 32-bit overflow for number of pages */
541 panic("%s: size 0x%llx overflow\n",
542 __FUNCTION__, (uint64_t) new_size);
543 return KERN_INVALID_ARGUMENT;
544 }
545
546 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
547 if (pager == NULL) {
548 return KERN_RESOURCE_SHORTAGE;
549 }
550
551 compressor_pager_lock_init(pager);
552 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
553 pager->cpgr_references = 1;
554 pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
555 pager->cpgr_num_slots_occupied = 0;
556
557 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
558 if (num_chunks > 1) {
559 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
560 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
561 } else if (pager->cpgr_num_slots > 2) {
562 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
563 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
564 } else {
565 pager->cpgr_slots.cpgr_eslots[0] = 0;
566 pager->cpgr_slots.cpgr_eslots[1] = 0;
567 }
568
569 /*
570 * Set up associations between this memory object
571 * and this compressor_pager structure
572 */
573
574 pager->cpgr_pager_ops = &compressor_pager_ops;
575 pager->cpgr_pager_header.io_bits = IKOT_MEMORY_OBJECT;
576
577 *new_mem_obj = (memory_object_t) pager;
578 return KERN_SUCCESS;
579 }
580
581
582 unsigned int
583 compressor_pager_slots_chunk_free(
584 compressor_slot_t *chunk,
585 int num_slots,
586 int flags,
587 int *failures)
588 {
589 int i;
590 int retval;
591 unsigned int num_slots_freed;
592
593 if (failures)
594 *failures = 0;
595 num_slots_freed = 0;
596 for (i = 0; i < num_slots; i++) {
597 if (chunk[i] != 0) {
598 retval = vm_compressor_free(&chunk[i], flags);
599
600 if (retval == 0)
601 num_slots_freed++;
602 else {
603 if (retval == -2)
604 assert(flags & C_DONT_BLOCK);
605
606 if (failures)
607 *failures += 1;
608 }
609 }
610 }
611 return num_slots_freed;
612 }
613
614 void
615 compressor_pager_slot_lookup(
616 compressor_pager_t pager,
617 boolean_t do_alloc,
618 memory_object_offset_t offset,
619 compressor_slot_t **slot_pp)
620 {
621 int num_chunks;
622 uint32_t page_num;
623 int chunk_idx;
624 int slot_idx;
625 compressor_slot_t *chunk;
626 compressor_slot_t *t_chunk;
627
628 page_num = (uint32_t)(offset/PAGE_SIZE);
629 if (page_num != (offset/PAGE_SIZE)) {
630 /* overflow */
631 panic("%s: offset 0x%llx overflow\n",
632 __FUNCTION__, (uint64_t) offset);
633 *slot_pp = NULL;
634 return;
635 }
636 if (page_num > pager->cpgr_num_slots) {
637 /* out of range */
638 *slot_pp = NULL;
639 return;
640 }
641 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
642 if (num_chunks > 1) {
643 /* we have an array of chunks */
644 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
645 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
646
647 if (chunk == NULL && do_alloc) {
648 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
649 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
650
651 compressor_pager_lock(pager);
652
653 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
654 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
655 t_chunk = NULL;
656 }
657 compressor_pager_unlock(pager);
658
659 if (t_chunk)
660 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
661 }
662 if (chunk == NULL) {
663 *slot_pp = NULL;
664 } else {
665 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
666 *slot_pp = &chunk[slot_idx];
667 }
668 } else if (pager->cpgr_num_slots > 2) {
669 slot_idx = page_num;
670 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
671 } else {
672 slot_idx = page_num;
673 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
674 }
675 }
676
677 void
678 vm_compressor_pager_init(void)
679 {
680 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
681 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
682 lck_attr_setdefault(&compressor_pager_lck_attr);
683
684 compressor_pager_zone = zinit(sizeof (struct compressor_pager),
685 10000 * sizeof (struct compressor_pager),
686 8192, "compressor_pager");
687 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
688 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
689
690 vm_compressor_init();
691 }
692
693 kern_return_t
694 vm_compressor_pager_put(
695 memory_object_t mem_obj,
696 memory_object_offset_t offset,
697 ppnum_t ppnum,
698 void **current_chead,
699 char *scratch_buf,
700 int *compressed_count_delta_p)
701 {
702 compressor_pager_t pager;
703 compressor_slot_t *slot_p;
704
705 compressor_pager_stats.put++;
706
707 *compressed_count_delta_p = 0;
708
709 /* This routine is called by the pageout thread. The pageout thread */
710 /* cannot be blocked by read activities unless the read activities */
711 /* Therefore the grant of vs lock must be done on a try versus a */
712 /* blocking basis. The code below relies on the fact that the */
713 /* interface is synchronous. Should this interface be again async */
714 /* for some type of pager in the future the pages will have to be */
715 /* returned through a separate, asynchronous path. */
716
717 compressor_pager_lookup(mem_obj, pager);
718
719 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
720 /* overflow */
721 panic("%s: offset 0x%llx overflow\n",
722 __FUNCTION__, (uint64_t) offset);
723 return KERN_RESOURCE_SHORTAGE;
724 }
725
726 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
727
728 if (slot_p == NULL) {
729 /* out of range ? */
730 panic("vm_compressor_pager_put: out of range");
731 }
732 if (*slot_p != 0) {
733 /*
734 * Already compressed: forget about the old one.
735 *
736 * This can happen after a vm_object_do_collapse() when
737 * the "backing_object" had some pages paged out and the
738 * "object" had an equivalent page resident.
739 */
740 vm_compressor_free(slot_p, 0);
741 *compressed_count_delta_p -= 1;
742 }
743 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf))
744 return (KERN_RESOURCE_SHORTAGE);
745 *compressed_count_delta_p += 1;
746
747 return (KERN_SUCCESS);
748 }
749
750
751 kern_return_t
752 vm_compressor_pager_get(
753 memory_object_t mem_obj,
754 memory_object_offset_t offset,
755 ppnum_t ppnum,
756 int *my_fault_type,
757 int flags,
758 int *compressed_count_delta_p)
759 {
760 compressor_pager_t pager;
761 kern_return_t kr;
762 compressor_slot_t *slot_p;
763
764 compressor_pager_stats.get++;
765
766 *compressed_count_delta_p = 0;
767
768 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
769 panic("%s: offset 0x%llx overflow\n",
770 __FUNCTION__, (uint64_t) offset);
771 return KERN_MEMORY_ERROR;
772 }
773
774 compressor_pager_lookup(mem_obj, pager);
775
776 /* find the compressor slot for that page */
777 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
778
779 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
780 /* out of range */
781 kr = KERN_MEMORY_FAILURE;
782 } else if (slot_p == NULL || *slot_p == 0) {
783 /* compressor does not have this page */
784 kr = KERN_MEMORY_ERROR;
785 } else {
786 /* compressor does have this page */
787 kr = KERN_SUCCESS;
788 }
789 *my_fault_type = DBG_COMPRESSOR_FAULT;
790
791 if (kr == KERN_SUCCESS) {
792 int retval;
793
794 /* get the page from the compressor */
795 retval = vm_compressor_get(ppnum, slot_p, flags);
796 if (retval == -1)
797 kr = KERN_MEMORY_FAILURE;
798 else if (retval == 1)
799 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
800 else if (retval == -2) {
801 assert((flags & C_DONT_BLOCK));
802 kr = KERN_FAILURE;
803 }
804 }
805
806 if (kr == KERN_SUCCESS) {
807 assert(slot_p != NULL);
808 if (*slot_p != 0) {
809 /*
810 * We got the page for a copy-on-write fault
811 * and we kept the original in place. Slot
812 * is still occupied.
813 */
814 } else {
815 *compressed_count_delta_p -= 1;
816 }
817 }
818
819 return kr;
820 }
821
822 unsigned int
823 vm_compressor_pager_state_clr(
824 memory_object_t mem_obj,
825 memory_object_offset_t offset)
826 {
827 compressor_pager_t pager;
828 compressor_slot_t *slot_p;
829 unsigned int num_slots_freed;
830
831 compressor_pager_stats.state_clr++;
832
833 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
834 /* overflow */
835 panic("%s: offset 0x%llx overflow\n",
836 __FUNCTION__, (uint64_t) offset);
837 return 0;
838 }
839
840 compressor_pager_lookup(mem_obj, pager);
841
842 /* find the compressor slot for that page */
843 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
844
845 num_slots_freed = 0;
846 if (slot_p && *slot_p != 0) {
847 vm_compressor_free(slot_p, 0);
848 num_slots_freed++;
849 assert(*slot_p == 0);
850 }
851
852 return num_slots_freed;
853 }
854
855 vm_external_state_t
856 vm_compressor_pager_state_get(
857 memory_object_t mem_obj,
858 memory_object_offset_t offset)
859 {
860 compressor_pager_t pager;
861 compressor_slot_t *slot_p;
862
863 compressor_pager_stats.state_get++;
864
865 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
866 /* overflow */
867 panic("%s: offset 0x%llx overflow\n",
868 __FUNCTION__, (uint64_t) offset);
869 return VM_EXTERNAL_STATE_ABSENT;
870 }
871
872 compressor_pager_lookup(mem_obj, pager);
873
874 /* find the compressor slot for that page */
875 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
876
877 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
878 /* out of range */
879 return VM_EXTERNAL_STATE_ABSENT;
880 } else if (slot_p == NULL || *slot_p == 0) {
881 /* compressor does not have this page */
882 return VM_EXTERNAL_STATE_ABSENT;
883 } else {
884 /* compressor does have this page */
885 return VM_EXTERNAL_STATE_EXISTS;
886 }
887 }
888
889 unsigned int
890 vm_compressor_pager_reap_pages(
891 memory_object_t mem_obj,
892 int flags)
893 {
894 compressor_pager_t pager;
895 int num_chunks;
896 int failures;
897 int i;
898 compressor_slot_t *chunk;
899 unsigned int num_slots_freed;
900
901 compressor_pager_lookup(mem_obj, pager);
902 if (pager == NULL)
903 return 0;
904
905 compressor_pager_lock(pager);
906
907 /* reap the compressor slots */
908 num_slots_freed = 0;
909
910 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
911 if (num_chunks > 1) {
912 /* we have an array of chunks */
913 for (i = 0; i < num_chunks; i++) {
914 chunk = pager->cpgr_slots.cpgr_islots[i];
915 if (chunk != NULL) {
916 num_slots_freed +=
917 compressor_pager_slots_chunk_free(
918 chunk,
919 COMPRESSOR_SLOTS_PER_CHUNK,
920 flags,
921 &failures);
922 if (failures == 0) {
923 pager->cpgr_slots.cpgr_islots[i] = NULL;
924 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
925 }
926 }
927 }
928 } else if (pager->cpgr_num_slots > 2) {
929 chunk = pager->cpgr_slots.cpgr_dslots;
930 num_slots_freed +=
931 compressor_pager_slots_chunk_free(
932 chunk,
933 pager->cpgr_num_slots,
934 flags,
935 NULL);
936 } else {
937 chunk = &pager->cpgr_slots.cpgr_eslots[0];
938 num_slots_freed +=
939 compressor_pager_slots_chunk_free(
940 chunk,
941 pager->cpgr_num_slots,
942 flags,
943 NULL);
944 }
945
946 compressor_pager_unlock(pager);
947
948 return num_slots_freed;
949 }
950
951 void
952 vm_compressor_pager_transfer(
953 memory_object_t dst_mem_obj,
954 memory_object_offset_t dst_offset,
955 memory_object_t src_mem_obj,
956 memory_object_offset_t src_offset)
957 {
958 compressor_pager_t src_pager, dst_pager;
959 compressor_slot_t *src_slot_p, *dst_slot_p;
960
961 compressor_pager_stats.transfer++;
962
963 /* find the compressor slot for the destination */
964 assert((uint32_t) dst_offset == dst_offset);
965 compressor_pager_lookup(dst_mem_obj, dst_pager);
966 assert(dst_offset / PAGE_SIZE <= dst_pager->cpgr_num_slots);
967 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
968 &dst_slot_p);
969 assert(dst_slot_p != NULL);
970 assert(*dst_slot_p == 0);
971
972 /* find the compressor slot for the source */
973 assert((uint32_t) src_offset == src_offset);
974 compressor_pager_lookup(src_mem_obj, src_pager);
975 assert(src_offset / PAGE_SIZE <= src_pager->cpgr_num_slots);
976 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
977 &src_slot_p);
978 assert(src_slot_p != NULL);
979 assert(*src_slot_p != 0);
980
981 /* transfer the slot from source to destination */
982 vm_compressor_transfer(dst_slot_p, src_slot_p);
983 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
984 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
985 }
986
987 memory_object_offset_t
988 vm_compressor_pager_next_compressed(
989 memory_object_t mem_obj,
990 memory_object_offset_t offset)
991 {
992 compressor_pager_t pager;
993 uint32_t num_chunks;
994 uint32_t page_num;
995 uint32_t chunk_idx;
996 uint32_t slot_idx;
997 compressor_slot_t *chunk;
998
999 compressor_pager_lookup(mem_obj, pager);
1000
1001 page_num = (uint32_t)(offset / PAGE_SIZE);
1002 if (page_num != (offset/PAGE_SIZE)) {
1003 /* overflow */
1004 return (memory_object_offset_t) -1;
1005 }
1006 if (page_num > pager->cpgr_num_slots) {
1007 /* out of range */
1008 return (memory_object_offset_t) -1;
1009 }
1010
1011 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1012 COMPRESSOR_SLOTS_PER_CHUNK);
1013
1014 if (num_chunks == 1) {
1015 if (pager->cpgr_num_slots > 2) {
1016 chunk = pager->cpgr_slots.cpgr_dslots;
1017 } else {
1018 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1019 }
1020 for (slot_idx = page_num;
1021 slot_idx < pager->cpgr_num_slots;
1022 slot_idx++) {
1023 if (chunk[slot_idx] != 0) {
1024 /* found a non-NULL slot in this chunk */
1025 return (memory_object_offset_t) (slot_idx *
1026 PAGE_SIZE);
1027 }
1028 }
1029 return (memory_object_offset_t) -1;
1030 }
1031
1032 /* we have an array of chunks; find the next non-NULL chunk */
1033 chunk = NULL;
1034 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1035 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1036 chunk_idx < num_chunks;
1037 chunk_idx++,
1038 slot_idx = 0) {
1039 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1040 if (chunk == NULL) {
1041 /* no chunk here: try the next one */
1042 continue;
1043 }
1044 /* search for an occupied slot in this chunk */
1045 for (;
1046 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1047 slot_idx++) {
1048 if (chunk[slot_idx] != 0) {
1049 /* found an occupied slot in this chunk */
1050 uint32_t next_slot;
1051
1052 next_slot = ((chunk_idx *
1053 COMPRESSOR_SLOTS_PER_CHUNK) +
1054 slot_idx);
1055 if (next_slot > pager->cpgr_num_slots) {
1056 /* went beyond end of object */
1057 return (memory_object_offset_t) -1;
1058 }
1059 return (memory_object_offset_t) (next_slot *
1060 PAGE_SIZE);
1061 }
1062 }
1063 }
1064 return (memory_object_offset_t) -1;
1065 }
1066
1067 unsigned int
1068 vm_compressor_pager_get_count(
1069 memory_object_t mem_obj)
1070 {
1071 compressor_pager_t pager;
1072
1073 compressor_pager_lookup(mem_obj, pager);
1074 if (pager == NULL)
1075 return 0;
1076
1077 /*
1078 * The caller should have the VM object locked and one
1079 * needs that lock to do a page-in or page-out, so no
1080 * need to lock the pager here.
1081 */
1082 assert(pager->cpgr_num_slots_occupied >= 0);
1083
1084 return pager->cpgr_num_slots_occupied;
1085 }
1086
1087 void
1088 vm_compressor_pager_count(
1089 memory_object_t mem_obj,
1090 int compressed_count_delta,
1091 boolean_t shared_lock,
1092 vm_object_t object __unused)
1093 {
1094 compressor_pager_t pager;
1095
1096 if (compressed_count_delta == 0) {
1097 return;
1098 }
1099
1100 compressor_pager_lookup(mem_obj, pager);
1101 if (pager == NULL)
1102 return;
1103
1104 if (compressed_count_delta < 0) {
1105 assert(pager->cpgr_num_slots_occupied >=
1106 (unsigned int) -compressed_count_delta);
1107 }
1108
1109 /*
1110 * The caller should have the VM object locked,
1111 * shared or exclusive.
1112 */
1113 if (shared_lock) {
1114 vm_object_lock_assert_shared(object);
1115 OSAddAtomic(compressed_count_delta,
1116 &pager->cpgr_num_slots_occupied);
1117 } else {
1118 vm_object_lock_assert_exclusive(object);
1119 pager->cpgr_num_slots_occupied += compressed_count_delta;
1120 }
1121 }
1122
1123 #if CONFIG_FREEZE
1124 kern_return_t
1125 vm_compressor_pager_relocate(
1126 memory_object_t mem_obj,
1127 memory_object_offset_t offset,
1128 void **current_chead)
1129 {
1130 /*
1131 * Has the page at this offset been compressed?
1132 */
1133
1134 compressor_slot_t *slot_p;
1135 compressor_pager_t dst_pager;
1136
1137 assert(mem_obj);
1138
1139 compressor_pager_lookup(mem_obj, dst_pager);
1140 if (dst_pager == NULL)
1141 return KERN_FAILURE;
1142
1143 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1144 return (vm_compressor_relocate(current_chead, slot_p));
1145 }
1146 #endif /* CONFIG_FREEZE */
1147