]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor_pager.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <machine/atomic.h>
67
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_protos.h>
77
78 /* memory_object interfaces */
79 void compressor_memory_object_reference(memory_object_t mem_obj);
80 void compressor_memory_object_deallocate(memory_object_t mem_obj);
81 kern_return_t compressor_memory_object_init(
82 memory_object_t mem_obj,
83 memory_object_control_t control,
84 memory_object_cluster_size_t pager_page_size);
85 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
86 kern_return_t compressor_memory_object_data_request(
87 memory_object_t mem_obj,
88 memory_object_offset_t offset,
89 memory_object_cluster_size_t length,
90 __unused vm_prot_t protection_required,
91 memory_object_fault_info_t fault_info);
92 kern_return_t compressor_memory_object_data_return(
93 memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t size,
96 __unused memory_object_offset_t *resid_offset,
97 __unused int *io_error,
98 __unused boolean_t dirty,
99 __unused boolean_t kernel_copy,
100 __unused int upl_flags);
101 kern_return_t compressor_memory_object_data_initialize(
102 memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t size);
105 kern_return_t compressor_memory_object_data_unlock(
106 __unused memory_object_t mem_obj,
107 __unused memory_object_offset_t offset,
108 __unused memory_object_size_t size,
109 __unused vm_prot_t desired_access);
110 kern_return_t compressor_memory_object_synchronize(
111 memory_object_t mem_obj,
112 memory_object_offset_t offset,
113 memory_object_size_t length,
114 __unused vm_sync_t flags);
115 kern_return_t compressor_memory_object_map(
116 __unused memory_object_t mem_obj,
117 __unused vm_prot_t prot);
118 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
119 kern_return_t compressor_memory_object_data_reclaim(
120 __unused memory_object_t mem_obj,
121 __unused boolean_t reclaim_backing_store);
122
123 const struct memory_object_pager_ops compressor_pager_ops = {
124 .memory_object_reference = compressor_memory_object_reference,
125 .memory_object_deallocate = compressor_memory_object_deallocate,
126 .memory_object_init = compressor_memory_object_init,
127 .memory_object_terminate = compressor_memory_object_terminate,
128 .memory_object_data_request = compressor_memory_object_data_request,
129 .memory_object_data_return = compressor_memory_object_data_return,
130 .memory_object_data_initialize = compressor_memory_object_data_initialize,
131 .memory_object_data_unlock = compressor_memory_object_data_unlock,
132 .memory_object_synchronize = compressor_memory_object_synchronize,
133 .memory_object_map = compressor_memory_object_map,
134 .memory_object_last_unmap = compressor_memory_object_last_unmap,
135 .memory_object_data_reclaim = compressor_memory_object_data_reclaim,
136 .memory_object_pager_name = "compressor pager"
137 };
138
139 /* internal data structures */
140
141 struct {
142 uint64_t data_returns;
143 uint64_t data_requests;
144 uint64_t put;
145 uint64_t get;
146 uint64_t state_clr;
147 uint64_t state_get;
148 uint64_t transfer;
149 } compressor_pager_stats;
150
151 typedef int compressor_slot_t;
152
153 typedef struct compressor_pager {
154 /* mandatory generic header */
155 struct memory_object cpgr_hdr;
156
157 /* pager-specific data */
158 lck_mtx_t cpgr_lock;
159 unsigned int cpgr_references;
160 unsigned int cpgr_num_slots;
161 unsigned int cpgr_num_slots_occupied;
162 union {
163 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
164 compressor_slot_t *cpgr_dslots; /* direct slots */
165 compressor_slot_t **cpgr_islots; /* indirect slots */
166 } cpgr_slots;
167 } *compressor_pager_t;
168
169 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
170 MACRO_BEGIN \
171 if (_mem_obj_ == NULL || \
172 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
173 _cpgr_ = NULL; \
174 } else { \
175 _cpgr_ = (compressor_pager_t) _mem_obj_; \
176 } \
177 MACRO_END
178
179 zone_t compressor_pager_zone;
180
181 lck_grp_t compressor_pager_lck_grp;
182 lck_grp_attr_t compressor_pager_lck_grp_attr;
183 lck_attr_t compressor_pager_lck_attr;
184
185 #define compressor_pager_lock(_cpgr_) \
186 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_unlock(_cpgr_) \
188 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
189 #define compressor_pager_lock_init(_cpgr_) \
190 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
191 #define compressor_pager_lock_destroy(_cpgr_) \
192 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
193
194 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
195 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
196
197 /* forward declarations */
198 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
199 int num_slots,
200 int flags,
201 int *failures);
202 void compressor_pager_slot_lookup(
203 compressor_pager_t pager,
204 boolean_t do_alloc,
205 memory_object_offset_t offset,
206 compressor_slot_t **slot_pp);
207
208 kern_return_t
209 compressor_memory_object_init(
210 memory_object_t mem_obj,
211 memory_object_control_t control,
212 __unused memory_object_cluster_size_t pager_page_size)
213 {
214 compressor_pager_t pager;
215
216 assert(pager_page_size == PAGE_SIZE);
217
218 memory_object_control_reference(control);
219
220 compressor_pager_lookup(mem_obj, pager);
221 compressor_pager_lock(pager);
222
223 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
224 panic("compressor_memory_object_init: bad request");
225 }
226 pager->cpgr_hdr.mo_control = control;
227
228 compressor_pager_unlock(pager);
229
230 return KERN_SUCCESS;
231 }
232
233 kern_return_t
234 compressor_memory_object_synchronize(
235 __unused memory_object_t mem_obj,
236 __unused memory_object_offset_t offset,
237 __unused memory_object_size_t length,
238 __unused vm_sync_t flags)
239 {
240 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
241 return KERN_FAILURE;
242 }
243
244 kern_return_t
245 compressor_memory_object_map(
246 __unused memory_object_t mem_obj,
247 __unused vm_prot_t prot)
248 {
249 panic("compressor_memory_object_map");
250 return KERN_FAILURE;
251 }
252
253 kern_return_t
254 compressor_memory_object_last_unmap(
255 __unused memory_object_t mem_obj)
256 {
257 panic("compressor_memory_object_last_unmap");
258 return KERN_FAILURE;
259 }
260
261 kern_return_t
262 compressor_memory_object_data_reclaim(
263 __unused memory_object_t mem_obj,
264 __unused boolean_t reclaim_backing_store)
265 {
266 panic("compressor_memory_object_data_reclaim");
267 return KERN_FAILURE;
268 }
269
270 kern_return_t
271 compressor_memory_object_terminate(
272 memory_object_t mem_obj)
273 {
274 memory_object_control_t control;
275 compressor_pager_t pager;
276
277 /*
278 * control port is a receive right, not a send right.
279 */
280
281 compressor_pager_lookup(mem_obj, pager);
282 compressor_pager_lock(pager);
283
284 /*
285 * After memory_object_terminate both memory_object_init
286 * and a no-senders notification are possible, so we need
287 * to clean up our reference to the memory_object_control
288 * to prepare for a new init.
289 */
290
291 control = pager->cpgr_hdr.mo_control;
292 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
293
294 compressor_pager_unlock(pager);
295
296 /*
297 * Now we deallocate our reference on the control.
298 */
299 memory_object_control_deallocate(control);
300 return KERN_SUCCESS;
301 }
302
303 void
304 compressor_memory_object_reference(
305 memory_object_t mem_obj)
306 {
307 compressor_pager_t pager;
308
309 compressor_pager_lookup(mem_obj, pager);
310 if (pager == NULL) {
311 return;
312 }
313
314 compressor_pager_lock(pager);
315 assert(pager->cpgr_references > 0);
316 pager->cpgr_references++;
317 compressor_pager_unlock(pager);
318 }
319
320 void
321 compressor_memory_object_deallocate(
322 memory_object_t mem_obj)
323 {
324 compressor_pager_t pager;
325 unsigned int num_slots_freed;
326
327 /*
328 * Because we don't give out multiple first references
329 * for a memory object, there can't be a race
330 * between getting a deallocate call and creating
331 * a new reference for the object.
332 */
333
334 compressor_pager_lookup(mem_obj, pager);
335 if (pager == NULL) {
336 return;
337 }
338
339 compressor_pager_lock(pager);
340 if (--pager->cpgr_references > 0) {
341 compressor_pager_unlock(pager);
342 return;
343 }
344
345 /*
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
348 */
349 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
350 panic("compressor_memory_object_deallocate(): bad request");
351 }
352
353 /*
354 * Unlock the pager (though there should be no one
355 * waiting for it).
356 */
357 compressor_pager_unlock(pager);
358
359 /* free the compressor slots */
360 int num_chunks;
361 int i;
362 compressor_slot_t *chunk;
363
364 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
365 if (num_chunks > 1) {
366 /* we have an array of chunks */
367 for (i = 0; i < num_chunks; i++) {
368 chunk = pager->cpgr_slots.cpgr_islots[i];
369 if (chunk != NULL) {
370 num_slots_freed =
371 compressor_pager_slots_chunk_free(
372 chunk,
373 COMPRESSOR_SLOTS_PER_CHUNK,
374 0,
375 NULL);
376 pager->cpgr_slots.cpgr_islots[i] = NULL;
377 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
378 }
379 }
380 kfree(pager->cpgr_slots.cpgr_islots,
381 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
382 pager->cpgr_slots.cpgr_islots = NULL;
383 } else if (pager->cpgr_num_slots > 2) {
384 chunk = pager->cpgr_slots.cpgr_dslots;
385 num_slots_freed =
386 compressor_pager_slots_chunk_free(
387 chunk,
388 pager->cpgr_num_slots,
389 0,
390 NULL);
391 pager->cpgr_slots.cpgr_dslots = NULL;
392 kfree(chunk,
393 (pager->cpgr_num_slots *
394 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
395 } else {
396 chunk = &pager->cpgr_slots.cpgr_eslots[0];
397 num_slots_freed =
398 compressor_pager_slots_chunk_free(
399 chunk,
400 pager->cpgr_num_slots,
401 0,
402 NULL);
403 }
404
405 compressor_pager_lock_destroy(pager);
406 zfree(compressor_pager_zone, pager);
407 }
408
409 kern_return_t
410 compressor_memory_object_data_request(
411 memory_object_t mem_obj,
412 memory_object_offset_t offset,
413 memory_object_cluster_size_t length,
414 __unused vm_prot_t protection_required,
415 __unused memory_object_fault_info_t fault_info)
416 {
417 compressor_pager_t pager;
418 kern_return_t kr;
419 compressor_slot_t *slot_p;
420
421 compressor_pager_stats.data_requests++;
422
423 /*
424 * Request must be on a page boundary and a multiple of pages.
425 */
426 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
427 panic("compressor_memory_object_data_request(): bad alignment");
428 }
429
430 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
431 panic("%s: offset 0x%llx overflow\n",
432 __FUNCTION__, (uint64_t) offset);
433 return KERN_FAILURE;
434 }
435
436 compressor_pager_lookup(mem_obj, pager);
437
438 if (length == 0) {
439 /* we're only querying the pager for this page */
440 } else {
441 panic("compressor: data_request");
442 }
443
444 /* find the compressor slot for that page */
445 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
446
447 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
448 /* out of range */
449 kr = KERN_FAILURE;
450 } else if (slot_p == NULL || *slot_p == 0) {
451 /* compressor does not have this page */
452 kr = KERN_FAILURE;
453 } else {
454 /* compressor does have this page */
455 kr = KERN_SUCCESS;
456 }
457 return kr;
458 }
459
460 /*
461 * memory_object_data_initialize: check whether we already have each page, and
462 * write it if we do not. The implementation is far from optimized, and
463 * also assumes that the default_pager is single-threaded.
464 */
465 /* It is questionable whether or not a pager should decide what is relevant */
466 /* and what is not in data sent from the kernel. Data initialize has been */
467 /* changed to copy back all data sent to it in preparation for its eventual */
468 /* merge with data return. It is the kernel that should decide what pages */
469 /* to write back. As of the writing of this note, this is indeed the case */
470 /* the kernel writes back one page at a time through this interface */
471
472 kern_return_t
473 compressor_memory_object_data_initialize(
474 memory_object_t mem_obj,
475 memory_object_offset_t offset,
476 memory_object_cluster_size_t size)
477 {
478 compressor_pager_t pager;
479 memory_object_offset_t cur_offset;
480
481 compressor_pager_lookup(mem_obj, pager);
482 compressor_pager_lock(pager);
483
484 for (cur_offset = offset;
485 cur_offset < offset + size;
486 cur_offset += PAGE_SIZE) {
487 panic("do a data_return() if slot for this page is empty");
488 }
489
490 compressor_pager_unlock(pager);
491
492 return KERN_SUCCESS;
493 }
494
495 kern_return_t
496 compressor_memory_object_data_unlock(
497 __unused memory_object_t mem_obj,
498 __unused memory_object_offset_t offset,
499 __unused memory_object_size_t size,
500 __unused vm_prot_t desired_access)
501 {
502 panic("compressor_memory_object_data_unlock()");
503 return KERN_FAILURE;
504 }
505
506
507 /*ARGSUSED*/
508 kern_return_t
509 compressor_memory_object_data_return(
510 __unused memory_object_t mem_obj,
511 __unused memory_object_offset_t offset,
512 __unused memory_object_cluster_size_t size,
513 __unused memory_object_offset_t *resid_offset,
514 __unused int *io_error,
515 __unused boolean_t dirty,
516 __unused boolean_t kernel_copy,
517 __unused int upl_flags)
518 {
519 panic("compressor: data_return");
520 return KERN_FAILURE;
521 }
522
523 /*
524 * Routine: default_pager_memory_object_create
525 * Purpose:
526 * Handle requests for memory objects from the
527 * kernel.
528 * Notes:
529 * Because we only give out the default memory
530 * manager port to the kernel, we don't have to
531 * be so paranoid about the contents.
532 */
533 kern_return_t
534 compressor_memory_object_create(
535 memory_object_size_t new_size,
536 memory_object_t *new_mem_obj)
537 {
538 compressor_pager_t pager;
539 int num_chunks;
540
541 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
542 /* 32-bit overflow for number of pages */
543 panic("%s: size 0x%llx overflow\n",
544 __FUNCTION__, (uint64_t) new_size);
545 return KERN_INVALID_ARGUMENT;
546 }
547
548 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
549 if (pager == NULL) {
550 return KERN_RESOURCE_SHORTAGE;
551 }
552
553 compressor_pager_lock_init(pager);
554 pager->cpgr_references = 1;
555 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
556 pager->cpgr_num_slots_occupied = 0;
557
558 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
559 if (num_chunks > 1) {
560 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
561 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
562 } else if (pager->cpgr_num_slots > 2) {
563 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
564 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
565 } else {
566 pager->cpgr_slots.cpgr_eslots[0] = 0;
567 pager->cpgr_slots.cpgr_eslots[1] = 0;
568 }
569
570 /*
571 * Set up associations between this memory object
572 * and this compressor_pager structure
573 */
574 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
575 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
576 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
577
578 *new_mem_obj = (memory_object_t) pager;
579 return KERN_SUCCESS;
580 }
581
582
583 unsigned int
584 compressor_pager_slots_chunk_free(
585 compressor_slot_t *chunk,
586 int num_slots,
587 int flags,
588 int *failures)
589 {
590 int i;
591 int retval;
592 unsigned int num_slots_freed;
593
594 if (failures) {
595 *failures = 0;
596 }
597 num_slots_freed = 0;
598 for (i = 0; i < num_slots; i++) {
599 if (chunk[i] != 0) {
600 retval = vm_compressor_free(&chunk[i], flags);
601
602 if (retval == 0) {
603 num_slots_freed++;
604 } else {
605 if (retval == -2) {
606 assert(flags & C_DONT_BLOCK);
607 }
608
609 if (failures) {
610 *failures += 1;
611 }
612 }
613 }
614 }
615 return num_slots_freed;
616 }
617
618 void
619 compressor_pager_slot_lookup(
620 compressor_pager_t pager,
621 boolean_t do_alloc,
622 memory_object_offset_t offset,
623 compressor_slot_t **slot_pp)
624 {
625 int num_chunks;
626 uint32_t page_num;
627 int chunk_idx;
628 int slot_idx;
629 compressor_slot_t *chunk;
630 compressor_slot_t *t_chunk;
631
632 page_num = (uint32_t)(offset / PAGE_SIZE);
633 if (page_num != (offset / PAGE_SIZE)) {
634 /* overflow */
635 panic("%s: offset 0x%llx overflow\n",
636 __FUNCTION__, (uint64_t) offset);
637 *slot_pp = NULL;
638 return;
639 }
640 if (page_num >= pager->cpgr_num_slots) {
641 /* out of range */
642 *slot_pp = NULL;
643 return;
644 }
645 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
646 if (num_chunks > 1) {
647 /* we have an array of chunks */
648 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
649 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
650
651 if (chunk == NULL && do_alloc) {
652 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
653 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
654
655 compressor_pager_lock(pager);
656
657 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
658 /*
659 * On some platforms, the memory stores from
660 * the bzero(t_chunk) above might not have been
661 * made visible and another thread might see
662 * the contents of this new chunk before it's
663 * been fully zero-filled.
664 * This memory barrier should take care of this
665 * according to the platform requirements.
666 */
667 os_atomic_thread_fence(release);
668
669 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
670 t_chunk = NULL;
671 }
672 compressor_pager_unlock(pager);
673
674 if (t_chunk) {
675 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
676 }
677 }
678 if (chunk == NULL) {
679 *slot_pp = NULL;
680 } else {
681 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
682 *slot_pp = &chunk[slot_idx];
683 }
684 } else if (pager->cpgr_num_slots > 2) {
685 slot_idx = page_num;
686 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
687 } else {
688 slot_idx = page_num;
689 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
690 }
691 }
692
693 void
694 vm_compressor_pager_init(void)
695 {
696 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
697 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
698 lck_attr_setdefault(&compressor_pager_lck_attr);
699
700 compressor_pager_zone = zinit(sizeof(struct compressor_pager),
701 10000 * sizeof(struct compressor_pager),
702 8192, "compressor_pager");
703 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
704 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
705
706 vm_compressor_init();
707 }
708
709 kern_return_t
710 vm_compressor_pager_put(
711 memory_object_t mem_obj,
712 memory_object_offset_t offset,
713 ppnum_t ppnum,
714 void **current_chead,
715 char *scratch_buf,
716 int *compressed_count_delta_p)
717 {
718 compressor_pager_t pager;
719 compressor_slot_t *slot_p;
720
721 compressor_pager_stats.put++;
722
723 *compressed_count_delta_p = 0;
724
725 /* This routine is called by the pageout thread. The pageout thread */
726 /* cannot be blocked by read activities unless the read activities */
727 /* Therefore the grant of vs lock must be done on a try versus a */
728 /* blocking basis. The code below relies on the fact that the */
729 /* interface is synchronous. Should this interface be again async */
730 /* for some type of pager in the future the pages will have to be */
731 /* returned through a separate, asynchronous path. */
732
733 compressor_pager_lookup(mem_obj, pager);
734
735 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
736 /* overflow */
737 panic("%s: offset 0x%llx overflow\n",
738 __FUNCTION__, (uint64_t) offset);
739 return KERN_RESOURCE_SHORTAGE;
740 }
741
742 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
743
744 if (slot_p == NULL) {
745 /* out of range ? */
746 panic("vm_compressor_pager_put: out of range");
747 }
748 if (*slot_p != 0) {
749 /*
750 * Already compressed: forget about the old one.
751 *
752 * This can happen after a vm_object_do_collapse() when
753 * the "backing_object" had some pages paged out and the
754 * "object" had an equivalent page resident.
755 */
756 vm_compressor_free(slot_p, 0);
757 *compressed_count_delta_p -= 1;
758 }
759
760 /*
761 * If the compressor operation succeeds, we presumably don't need to
762 * undo any previous WIMG update, as all live mappings should be
763 * disconnected.
764 */
765
766 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
767 return KERN_RESOURCE_SHORTAGE;
768 }
769 *compressed_count_delta_p += 1;
770
771 return KERN_SUCCESS;
772 }
773
774
775 kern_return_t
776 vm_compressor_pager_get(
777 memory_object_t mem_obj,
778 memory_object_offset_t offset,
779 ppnum_t ppnum,
780 int *my_fault_type,
781 int flags,
782 int *compressed_count_delta_p)
783 {
784 compressor_pager_t pager;
785 kern_return_t kr;
786 compressor_slot_t *slot_p;
787
788 compressor_pager_stats.get++;
789
790 *compressed_count_delta_p = 0;
791
792 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
793 panic("%s: offset 0x%llx overflow\n",
794 __FUNCTION__, (uint64_t) offset);
795 return KERN_MEMORY_ERROR;
796 }
797
798 compressor_pager_lookup(mem_obj, pager);
799
800 /* find the compressor slot for that page */
801 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
802
803 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
804 /* out of range */
805 kr = KERN_MEMORY_FAILURE;
806 } else if (slot_p == NULL || *slot_p == 0) {
807 /* compressor does not have this page */
808 kr = KERN_MEMORY_ERROR;
809 } else {
810 /* compressor does have this page */
811 kr = KERN_SUCCESS;
812 }
813 *my_fault_type = DBG_COMPRESSOR_FAULT;
814
815 if (kr == KERN_SUCCESS) {
816 int retval;
817
818 /* get the page from the compressor */
819 retval = vm_compressor_get(ppnum, slot_p, flags);
820 if (retval == -1) {
821 kr = KERN_MEMORY_FAILURE;
822 } else if (retval == 1) {
823 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
824 } else if (retval == -2) {
825 assert((flags & C_DONT_BLOCK));
826 kr = KERN_FAILURE;
827 }
828 }
829
830 if (kr == KERN_SUCCESS) {
831 assert(slot_p != NULL);
832 if (*slot_p != 0) {
833 /*
834 * We got the page for a copy-on-write fault
835 * and we kept the original in place. Slot
836 * is still occupied.
837 */
838 } else {
839 *compressed_count_delta_p -= 1;
840 }
841 }
842
843 return kr;
844 }
845
846 unsigned int
847 vm_compressor_pager_state_clr(
848 memory_object_t mem_obj,
849 memory_object_offset_t offset)
850 {
851 compressor_pager_t pager;
852 compressor_slot_t *slot_p;
853 unsigned int num_slots_freed;
854
855 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
856
857 compressor_pager_stats.state_clr++;
858
859 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
860 /* overflow */
861 panic("%s: offset 0x%llx overflow\n",
862 __FUNCTION__, (uint64_t) offset);
863 return 0;
864 }
865
866 compressor_pager_lookup(mem_obj, pager);
867
868 /* find the compressor slot for that page */
869 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
870
871 num_slots_freed = 0;
872 if (slot_p && *slot_p != 0) {
873 vm_compressor_free(slot_p, 0);
874 num_slots_freed++;
875 assert(*slot_p == 0);
876 }
877
878 return num_slots_freed;
879 }
880
881 vm_external_state_t
882 vm_compressor_pager_state_get(
883 memory_object_t mem_obj,
884 memory_object_offset_t offset)
885 {
886 compressor_pager_t pager;
887 compressor_slot_t *slot_p;
888
889 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
890
891 compressor_pager_stats.state_get++;
892
893 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
894 /* overflow */
895 panic("%s: offset 0x%llx overflow\n",
896 __FUNCTION__, (uint64_t) offset);
897 return VM_EXTERNAL_STATE_ABSENT;
898 }
899
900 compressor_pager_lookup(mem_obj, pager);
901
902 /* find the compressor slot for that page */
903 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
904
905 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
906 /* out of range */
907 return VM_EXTERNAL_STATE_ABSENT;
908 } else if (slot_p == NULL || *slot_p == 0) {
909 /* compressor does not have this page */
910 return VM_EXTERNAL_STATE_ABSENT;
911 } else {
912 /* compressor does have this page */
913 return VM_EXTERNAL_STATE_EXISTS;
914 }
915 }
916
917 unsigned int
918 vm_compressor_pager_reap_pages(
919 memory_object_t mem_obj,
920 int flags)
921 {
922 compressor_pager_t pager;
923 int num_chunks;
924 int failures;
925 int i;
926 compressor_slot_t *chunk;
927 unsigned int num_slots_freed;
928
929 compressor_pager_lookup(mem_obj, pager);
930 if (pager == NULL) {
931 return 0;
932 }
933
934 compressor_pager_lock(pager);
935
936 /* reap the compressor slots */
937 num_slots_freed = 0;
938
939 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
940 if (num_chunks > 1) {
941 /* we have an array of chunks */
942 for (i = 0; i < num_chunks; i++) {
943 chunk = pager->cpgr_slots.cpgr_islots[i];
944 if (chunk != NULL) {
945 num_slots_freed +=
946 compressor_pager_slots_chunk_free(
947 chunk,
948 COMPRESSOR_SLOTS_PER_CHUNK,
949 flags,
950 &failures);
951 if (failures == 0) {
952 pager->cpgr_slots.cpgr_islots[i] = NULL;
953 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
954 }
955 }
956 }
957 } else if (pager->cpgr_num_slots > 2) {
958 chunk = pager->cpgr_slots.cpgr_dslots;
959 num_slots_freed +=
960 compressor_pager_slots_chunk_free(
961 chunk,
962 pager->cpgr_num_slots,
963 flags,
964 NULL);
965 } else {
966 chunk = &pager->cpgr_slots.cpgr_eslots[0];
967 num_slots_freed +=
968 compressor_pager_slots_chunk_free(
969 chunk,
970 pager->cpgr_num_slots,
971 flags,
972 NULL);
973 }
974
975 compressor_pager_unlock(pager);
976
977 return num_slots_freed;
978 }
979
980 void
981 vm_compressor_pager_transfer(
982 memory_object_t dst_mem_obj,
983 memory_object_offset_t dst_offset,
984 memory_object_t src_mem_obj,
985 memory_object_offset_t src_offset)
986 {
987 compressor_pager_t src_pager, dst_pager;
988 compressor_slot_t *src_slot_p, *dst_slot_p;
989
990 compressor_pager_stats.transfer++;
991
992 /* find the compressor slot for the destination */
993 assert((uint32_t) dst_offset == dst_offset);
994 compressor_pager_lookup(dst_mem_obj, dst_pager);
995 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
996 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
997 &dst_slot_p);
998 assert(dst_slot_p != NULL);
999 assert(*dst_slot_p == 0);
1000
1001 /* find the compressor slot for the source */
1002 assert((uint32_t) src_offset == src_offset);
1003 compressor_pager_lookup(src_mem_obj, src_pager);
1004 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1005 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
1006 &src_slot_p);
1007 assert(src_slot_p != NULL);
1008 assert(*src_slot_p != 0);
1009
1010 /* transfer the slot from source to destination */
1011 vm_compressor_transfer(dst_slot_p, src_slot_p);
1012 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1013 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1014 }
1015
1016 memory_object_offset_t
1017 vm_compressor_pager_next_compressed(
1018 memory_object_t mem_obj,
1019 memory_object_offset_t offset)
1020 {
1021 compressor_pager_t pager;
1022 uint32_t num_chunks;
1023 uint32_t page_num;
1024 uint32_t chunk_idx;
1025 uint32_t slot_idx;
1026 compressor_slot_t *chunk;
1027
1028 compressor_pager_lookup(mem_obj, pager);
1029
1030 page_num = (uint32_t)(offset / PAGE_SIZE);
1031 if (page_num != (offset / PAGE_SIZE)) {
1032 /* overflow */
1033 return (memory_object_offset_t) -1;
1034 }
1035 if (page_num >= pager->cpgr_num_slots) {
1036 /* out of range */
1037 return (memory_object_offset_t) -1;
1038 }
1039
1040 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1041 COMPRESSOR_SLOTS_PER_CHUNK);
1042
1043 if (num_chunks == 1) {
1044 if (pager->cpgr_num_slots > 2) {
1045 chunk = pager->cpgr_slots.cpgr_dslots;
1046 } else {
1047 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1048 }
1049 for (slot_idx = page_num;
1050 slot_idx < pager->cpgr_num_slots;
1051 slot_idx++) {
1052 if (chunk[slot_idx] != 0) {
1053 /* found a non-NULL slot in this chunk */
1054 return (memory_object_offset_t) (slot_idx *
1055 PAGE_SIZE);
1056 }
1057 }
1058 return (memory_object_offset_t) -1;
1059 }
1060
1061 /* we have an array of chunks; find the next non-NULL chunk */
1062 chunk = NULL;
1063 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1064 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1065 chunk_idx < num_chunks;
1066 chunk_idx++,
1067 slot_idx = 0) {
1068 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1069 if (chunk == NULL) {
1070 /* no chunk here: try the next one */
1071 continue;
1072 }
1073 /* search for an occupied slot in this chunk */
1074 for (;
1075 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1076 slot_idx++) {
1077 if (chunk[slot_idx] != 0) {
1078 /* found an occupied slot in this chunk */
1079 uint32_t next_slot;
1080
1081 next_slot = ((chunk_idx *
1082 COMPRESSOR_SLOTS_PER_CHUNK) +
1083 slot_idx);
1084 if (next_slot >= pager->cpgr_num_slots) {
1085 /* went beyond end of object */
1086 return (memory_object_offset_t) -1;
1087 }
1088 return (memory_object_offset_t) (next_slot *
1089 PAGE_SIZE);
1090 }
1091 }
1092 }
1093 return (memory_object_offset_t) -1;
1094 }
1095
1096 unsigned int
1097 vm_compressor_pager_get_count(
1098 memory_object_t mem_obj)
1099 {
1100 compressor_pager_t pager;
1101
1102 compressor_pager_lookup(mem_obj, pager);
1103 if (pager == NULL) {
1104 return 0;
1105 }
1106
1107 /*
1108 * The caller should have the VM object locked and one
1109 * needs that lock to do a page-in or page-out, so no
1110 * need to lock the pager here.
1111 */
1112 assert(pager->cpgr_num_slots_occupied >= 0);
1113
1114 return pager->cpgr_num_slots_occupied;
1115 }
1116
1117 void
1118 vm_compressor_pager_count(
1119 memory_object_t mem_obj,
1120 int compressed_count_delta,
1121 boolean_t shared_lock,
1122 vm_object_t object __unused)
1123 {
1124 compressor_pager_t pager;
1125
1126 if (compressed_count_delta == 0) {
1127 return;
1128 }
1129
1130 compressor_pager_lookup(mem_obj, pager);
1131 if (pager == NULL) {
1132 return;
1133 }
1134
1135 if (compressed_count_delta < 0) {
1136 assert(pager->cpgr_num_slots_occupied >=
1137 (unsigned int) -compressed_count_delta);
1138 }
1139
1140 /*
1141 * The caller should have the VM object locked,
1142 * shared or exclusive.
1143 */
1144 if (shared_lock) {
1145 vm_object_lock_assert_shared(object);
1146 OSAddAtomic(compressed_count_delta,
1147 &pager->cpgr_num_slots_occupied);
1148 } else {
1149 vm_object_lock_assert_exclusive(object);
1150 pager->cpgr_num_slots_occupied += compressed_count_delta;
1151 }
1152 }
1153
1154 #if CONFIG_FREEZE
1155 kern_return_t
1156 vm_compressor_pager_relocate(
1157 memory_object_t mem_obj,
1158 memory_object_offset_t offset,
1159 void **current_chead)
1160 {
1161 /*
1162 * Has the page at this offset been compressed?
1163 */
1164
1165 compressor_slot_t *slot_p;
1166 compressor_pager_t dst_pager;
1167
1168 assert(mem_obj);
1169
1170 compressor_pager_lookup(mem_obj, dst_pager);
1171 if (dst_pager == NULL) {
1172 return KERN_FAILURE;
1173 }
1174
1175 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1176 return vm_compressor_relocate(current_chead, slot_p);
1177 }
1178 #endif /* CONFIG_FREEZE */