]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor_pager.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
65
66 #include <machine/atomic.h>
67
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/upl.h>
71
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_protos.h>
77
78 /* memory_object interfaces */
79 void compressor_memory_object_reference(memory_object_t mem_obj);
80 void compressor_memory_object_deallocate(memory_object_t mem_obj);
81 kern_return_t compressor_memory_object_init(
82 memory_object_t mem_obj,
83 memory_object_control_t control,
84 memory_object_cluster_size_t pager_page_size);
85 kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
86 kern_return_t compressor_memory_object_data_request(
87 memory_object_t mem_obj,
88 memory_object_offset_t offset,
89 memory_object_cluster_size_t length,
90 __unused vm_prot_t protection_required,
91 memory_object_fault_info_t fault_info);
92 kern_return_t compressor_memory_object_data_return(
93 memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t size,
96 __unused memory_object_offset_t *resid_offset,
97 __unused int *io_error,
98 __unused boolean_t dirty,
99 __unused boolean_t kernel_copy,
100 __unused int upl_flags);
101 kern_return_t compressor_memory_object_data_initialize(
102 memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t size);
105 kern_return_t compressor_memory_object_data_unlock(
106 __unused memory_object_t mem_obj,
107 __unused memory_object_offset_t offset,
108 __unused memory_object_size_t size,
109 __unused vm_prot_t desired_access);
110 kern_return_t compressor_memory_object_synchronize(
111 memory_object_t mem_obj,
112 memory_object_offset_t offset,
113 memory_object_size_t length,
114 __unused vm_sync_t flags);
115 kern_return_t compressor_memory_object_map(
116 __unused memory_object_t mem_obj,
117 __unused vm_prot_t prot);
118 kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
119 kern_return_t compressor_memory_object_data_reclaim(
120 __unused memory_object_t mem_obj,
121 __unused boolean_t reclaim_backing_store);
122
123 const struct memory_object_pager_ops compressor_pager_ops = {
124 .memory_object_reference = compressor_memory_object_reference,
125 .memory_object_deallocate = compressor_memory_object_deallocate,
126 .memory_object_init = compressor_memory_object_init,
127 .memory_object_terminate = compressor_memory_object_terminate,
128 .memory_object_data_request = compressor_memory_object_data_request,
129 .memory_object_data_return = compressor_memory_object_data_return,
130 .memory_object_data_initialize = compressor_memory_object_data_initialize,
131 .memory_object_data_unlock = compressor_memory_object_data_unlock,
132 .memory_object_synchronize = compressor_memory_object_synchronize,
133 .memory_object_map = compressor_memory_object_map,
134 .memory_object_last_unmap = compressor_memory_object_last_unmap,
135 .memory_object_data_reclaim = compressor_memory_object_data_reclaim,
136 .memory_object_pager_name = "compressor pager"
137 };
138
139 /* internal data structures */
140
141 struct {
142 uint64_t data_returns;
143 uint64_t data_requests;
144 uint64_t put;
145 uint64_t get;
146 uint64_t state_clr;
147 uint64_t state_get;
148 uint64_t transfer;
149 } compressor_pager_stats;
150
151 typedef int compressor_slot_t;
152
153 typedef struct compressor_pager {
154 /* mandatory generic header */
155 struct memory_object cpgr_hdr;
156
157 /* pager-specific data */
158 lck_mtx_t cpgr_lock;
159 unsigned int cpgr_references;
160 unsigned int cpgr_num_slots;
161 unsigned int cpgr_num_slots_occupied;
162 union {
163 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
164 compressor_slot_t *cpgr_dslots; /* direct slots */
165 compressor_slot_t **cpgr_islots; /* indirect slots */
166 } cpgr_slots;
167 } *compressor_pager_t;
168
169 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
170 MACRO_BEGIN \
171 if (_mem_obj_ == NULL || \
172 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
173 _cpgr_ = NULL; \
174 } else { \
175 _cpgr_ = (compressor_pager_t) _mem_obj_; \
176 } \
177 MACRO_END
178
179 zone_t compressor_pager_zone;
180
181 LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
182
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
191
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
194
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
197 int num_slots,
198 int flags,
199 int *failures);
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager,
202 boolean_t do_alloc,
203 memory_object_offset_t offset,
204 compressor_slot_t **slot_pp);
205
206 #if defined(__LP64__)
207
208 /* restricted VA zones for slots */
209
210 #define NUM_SLOTS_ZONES 3
211
212 static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
213 16,
214 64,
215 COMPRESSOR_SLOTS_CHUNK_SIZE
216 };
217
218 static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
219 "compressor_slots.16",
220 "compressor_slots.64",
221 "compressor_slots.512"
222 };
223
224 static zone_t
225 compressor_slots_zones[NUM_SLOTS_ZONES];
226
227 #endif /* defined(__LP64__) */
228
229 static void
230 zfree_slot_array(compressor_slot_t *slots, size_t size);
231 static compressor_slot_t *
232 zalloc_slot_array(size_t size, zalloc_flags_t);
233
234
235 kern_return_t
236 compressor_memory_object_init(
237 memory_object_t mem_obj,
238 memory_object_control_t control,
239 __unused memory_object_cluster_size_t pager_page_size)
240 {
241 compressor_pager_t pager;
242
243 assert(pager_page_size == PAGE_SIZE);
244
245 memory_object_control_reference(control);
246
247 compressor_pager_lookup(mem_obj, pager);
248 compressor_pager_lock(pager);
249
250 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
251 panic("compressor_memory_object_init: bad request");
252 }
253 pager->cpgr_hdr.mo_control = control;
254
255 compressor_pager_unlock(pager);
256
257 return KERN_SUCCESS;
258 }
259
260 kern_return_t
261 compressor_memory_object_synchronize(
262 __unused memory_object_t mem_obj,
263 __unused memory_object_offset_t offset,
264 __unused memory_object_size_t length,
265 __unused vm_sync_t flags)
266 {
267 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
268 return KERN_FAILURE;
269 }
270
271 kern_return_t
272 compressor_memory_object_map(
273 __unused memory_object_t mem_obj,
274 __unused vm_prot_t prot)
275 {
276 panic("compressor_memory_object_map");
277 return KERN_FAILURE;
278 }
279
280 kern_return_t
281 compressor_memory_object_last_unmap(
282 __unused memory_object_t mem_obj)
283 {
284 panic("compressor_memory_object_last_unmap");
285 return KERN_FAILURE;
286 }
287
288 kern_return_t
289 compressor_memory_object_data_reclaim(
290 __unused memory_object_t mem_obj,
291 __unused boolean_t reclaim_backing_store)
292 {
293 panic("compressor_memory_object_data_reclaim");
294 return KERN_FAILURE;
295 }
296
297 kern_return_t
298 compressor_memory_object_terminate(
299 memory_object_t mem_obj)
300 {
301 memory_object_control_t control;
302 compressor_pager_t pager;
303
304 /*
305 * control port is a receive right, not a send right.
306 */
307
308 compressor_pager_lookup(mem_obj, pager);
309 compressor_pager_lock(pager);
310
311 /*
312 * After memory_object_terminate both memory_object_init
313 * and a no-senders notification are possible, so we need
314 * to clean up our reference to the memory_object_control
315 * to prepare for a new init.
316 */
317
318 control = pager->cpgr_hdr.mo_control;
319 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
320
321 compressor_pager_unlock(pager);
322
323 /*
324 * Now we deallocate our reference on the control.
325 */
326 memory_object_control_deallocate(control);
327 return KERN_SUCCESS;
328 }
329
330 void
331 compressor_memory_object_reference(
332 memory_object_t mem_obj)
333 {
334 compressor_pager_t pager;
335
336 compressor_pager_lookup(mem_obj, pager);
337 if (pager == NULL) {
338 return;
339 }
340
341 compressor_pager_lock(pager);
342 assert(pager->cpgr_references > 0);
343 pager->cpgr_references++;
344 compressor_pager_unlock(pager);
345 }
346
347 void
348 compressor_memory_object_deallocate(
349 memory_object_t mem_obj)
350 {
351 compressor_pager_t pager;
352 unsigned int num_slots_freed;
353
354 /*
355 * Because we don't give out multiple first references
356 * for a memory object, there can't be a race
357 * between getting a deallocate call and creating
358 * a new reference for the object.
359 */
360
361 compressor_pager_lookup(mem_obj, pager);
362 if (pager == NULL) {
363 return;
364 }
365
366 compressor_pager_lock(pager);
367 if (--pager->cpgr_references > 0) {
368 compressor_pager_unlock(pager);
369 return;
370 }
371
372 /*
373 * We shouldn't get a deallocation call
374 * when the kernel has the object cached.
375 */
376 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
377 panic("compressor_memory_object_deallocate(): bad request");
378 }
379
380 /*
381 * Unlock the pager (though there should be no one
382 * waiting for it).
383 */
384 compressor_pager_unlock(pager);
385
386 /* free the compressor slots */
387 int num_chunks;
388 int i;
389 compressor_slot_t *chunk;
390
391 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
392 if (num_chunks > 1) {
393 /* we have an array of chunks */
394 for (i = 0; i < num_chunks; i++) {
395 chunk = pager->cpgr_slots.cpgr_islots[i];
396 if (chunk != NULL) {
397 num_slots_freed =
398 compressor_pager_slots_chunk_free(
399 chunk,
400 COMPRESSOR_SLOTS_PER_CHUNK,
401 0,
402 NULL);
403 pager->cpgr_slots.cpgr_islots[i] = NULL;
404 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
405 }
406 }
407 kheap_free(KHEAP_DEFAULT, pager->cpgr_slots.cpgr_islots,
408 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
409 pager->cpgr_slots.cpgr_islots = NULL;
410 } else if (pager->cpgr_num_slots > 2) {
411 chunk = pager->cpgr_slots.cpgr_dslots;
412 num_slots_freed =
413 compressor_pager_slots_chunk_free(
414 chunk,
415 pager->cpgr_num_slots,
416 0,
417 NULL);
418 pager->cpgr_slots.cpgr_dslots = NULL;
419 zfree_slot_array(chunk,
420 (pager->cpgr_num_slots *
421 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
422 } else {
423 chunk = &pager->cpgr_slots.cpgr_eslots[0];
424 num_slots_freed =
425 compressor_pager_slots_chunk_free(
426 chunk,
427 pager->cpgr_num_slots,
428 0,
429 NULL);
430 }
431
432 compressor_pager_lock_destroy(pager);
433 zfree(compressor_pager_zone, pager);
434 }
435
436 kern_return_t
437 compressor_memory_object_data_request(
438 memory_object_t mem_obj,
439 memory_object_offset_t offset,
440 memory_object_cluster_size_t length,
441 __unused vm_prot_t protection_required,
442 __unused memory_object_fault_info_t fault_info)
443 {
444 compressor_pager_t pager;
445 kern_return_t kr;
446 compressor_slot_t *slot_p;
447
448 compressor_pager_stats.data_requests++;
449
450 /*
451 * Request must be on a page boundary and a multiple of pages.
452 */
453 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
454 panic("compressor_memory_object_data_request(): bad alignment");
455 }
456
457 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
458 panic("%s: offset 0x%llx overflow\n",
459 __FUNCTION__, (uint64_t) offset);
460 return KERN_FAILURE;
461 }
462
463 compressor_pager_lookup(mem_obj, pager);
464
465 if (length == 0) {
466 /* we're only querying the pager for this page */
467 } else {
468 panic("compressor: data_request");
469 }
470
471 /* find the compressor slot for that page */
472 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
473
474 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
475 /* out of range */
476 kr = KERN_FAILURE;
477 } else if (slot_p == NULL || *slot_p == 0) {
478 /* compressor does not have this page */
479 kr = KERN_FAILURE;
480 } else {
481 /* compressor does have this page */
482 kr = KERN_SUCCESS;
483 }
484 return kr;
485 }
486
487 /*
488 * memory_object_data_initialize: check whether we already have each page, and
489 * write it if we do not. The implementation is far from optimized, and
490 * also assumes that the default_pager is single-threaded.
491 */
492 /* It is questionable whether or not a pager should decide what is relevant */
493 /* and what is not in data sent from the kernel. Data initialize has been */
494 /* changed to copy back all data sent to it in preparation for its eventual */
495 /* merge with data return. It is the kernel that should decide what pages */
496 /* to write back. As of the writing of this note, this is indeed the case */
497 /* the kernel writes back one page at a time through this interface */
498
499 kern_return_t
500 compressor_memory_object_data_initialize(
501 memory_object_t mem_obj,
502 memory_object_offset_t offset,
503 memory_object_cluster_size_t size)
504 {
505 compressor_pager_t pager;
506 memory_object_offset_t cur_offset;
507
508 compressor_pager_lookup(mem_obj, pager);
509 compressor_pager_lock(pager);
510
511 for (cur_offset = offset;
512 cur_offset < offset + size;
513 cur_offset += PAGE_SIZE) {
514 panic("do a data_return() if slot for this page is empty");
515 }
516
517 compressor_pager_unlock(pager);
518
519 return KERN_SUCCESS;
520 }
521
522 kern_return_t
523 compressor_memory_object_data_unlock(
524 __unused memory_object_t mem_obj,
525 __unused memory_object_offset_t offset,
526 __unused memory_object_size_t size,
527 __unused vm_prot_t desired_access)
528 {
529 panic("compressor_memory_object_data_unlock()");
530 return KERN_FAILURE;
531 }
532
533
534 /*ARGSUSED*/
535 kern_return_t
536 compressor_memory_object_data_return(
537 __unused memory_object_t mem_obj,
538 __unused memory_object_offset_t offset,
539 __unused memory_object_cluster_size_t size,
540 __unused memory_object_offset_t *resid_offset,
541 __unused int *io_error,
542 __unused boolean_t dirty,
543 __unused boolean_t kernel_copy,
544 __unused int upl_flags)
545 {
546 panic("compressor: data_return");
547 return KERN_FAILURE;
548 }
549
550 /*
551 * Routine: default_pager_memory_object_create
552 * Purpose:
553 * Handle requests for memory objects from the
554 * kernel.
555 * Notes:
556 * Because we only give out the default memory
557 * manager port to the kernel, we don't have to
558 * be so paranoid about the contents.
559 */
560 kern_return_t
561 compressor_memory_object_create(
562 memory_object_size_t new_size,
563 memory_object_t *new_mem_obj)
564 {
565 compressor_pager_t pager;
566 int num_chunks;
567
568 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
569 /* 32-bit overflow for number of pages */
570 panic("%s: size 0x%llx overflow\n",
571 __FUNCTION__, (uint64_t) new_size);
572 return KERN_INVALID_ARGUMENT;
573 }
574
575 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
576 if (pager == NULL) {
577 return KERN_RESOURCE_SHORTAGE;
578 }
579
580 compressor_pager_lock_init(pager);
581 pager->cpgr_references = 1;
582 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
583 pager->cpgr_num_slots_occupied = 0;
584
585 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
586 if (num_chunks > 1) {
587 pager->cpgr_slots.cpgr_islots = kheap_alloc(KHEAP_DEFAULT,
588 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]),
589 Z_WAITOK | Z_ZERO);
590 } else if (pager->cpgr_num_slots > 2) {
591 pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
592 sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
593 } else {
594 pager->cpgr_slots.cpgr_eslots[0] = 0;
595 pager->cpgr_slots.cpgr_eslots[1] = 0;
596 }
597
598 /*
599 * Set up associations between this memory object
600 * and this compressor_pager structure
601 */
602 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
603 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
604 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
605
606 *new_mem_obj = (memory_object_t) pager;
607 return KERN_SUCCESS;
608 }
609
610
611 unsigned int
612 compressor_pager_slots_chunk_free(
613 compressor_slot_t *chunk,
614 int num_slots,
615 int flags,
616 int *failures)
617 {
618 int i;
619 int retval;
620 unsigned int num_slots_freed;
621
622 if (failures) {
623 *failures = 0;
624 }
625 num_slots_freed = 0;
626 for (i = 0; i < num_slots; i++) {
627 if (chunk[i] != 0) {
628 retval = vm_compressor_free(&chunk[i], flags);
629
630 if (retval == 0) {
631 num_slots_freed++;
632 } else {
633 if (retval == -2) {
634 assert(flags & C_DONT_BLOCK);
635 }
636
637 if (failures) {
638 *failures += 1;
639 }
640 }
641 }
642 }
643 return num_slots_freed;
644 }
645
646 void
647 compressor_pager_slot_lookup(
648 compressor_pager_t pager,
649 boolean_t do_alloc,
650 memory_object_offset_t offset,
651 compressor_slot_t **slot_pp)
652 {
653 int num_chunks;
654 uint32_t page_num;
655 int chunk_idx;
656 int slot_idx;
657 compressor_slot_t *chunk;
658 compressor_slot_t *t_chunk;
659
660 page_num = (uint32_t)(offset / PAGE_SIZE);
661 if (page_num != (offset / PAGE_SIZE)) {
662 /* overflow */
663 panic("%s: offset 0x%llx overflow\n",
664 __FUNCTION__, (uint64_t) offset);
665 *slot_pp = NULL;
666 return;
667 }
668 if (page_num >= pager->cpgr_num_slots) {
669 /* out of range */
670 *slot_pp = NULL;
671 return;
672 }
673 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
674 if (num_chunks > 1) {
675 /* we have an array of chunks */
676 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
677 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
678
679 if (chunk == NULL && do_alloc) {
680 t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
681 Z_WAITOK | Z_ZERO);
682
683 compressor_pager_lock(pager);
684
685 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
686 /*
687 * On some platforms, the memory stores from
688 * the bzero(t_chunk) above might not have been
689 * made visible and another thread might see
690 * the contents of this new chunk before it's
691 * been fully zero-filled.
692 * This memory barrier should take care of this
693 * according to the platform requirements.
694 */
695 os_atomic_thread_fence(release);
696
697 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
698 t_chunk = NULL;
699 }
700 compressor_pager_unlock(pager);
701
702 if (t_chunk) {
703 zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
704 }
705 }
706 if (chunk == NULL) {
707 *slot_pp = NULL;
708 } else {
709 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
710 *slot_pp = &chunk[slot_idx];
711 }
712 } else if (pager->cpgr_num_slots > 2) {
713 slot_idx = page_num;
714 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
715 } else {
716 slot_idx = page_num;
717 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
718 }
719 }
720
721 void
722 vm_compressor_pager_init(void)
723 {
724 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
725 compressor_pager_zone = zone_create_ext("compressor_pager",
726 sizeof(struct compressor_pager), ZC_NOENCRYPT,
727 ZONE_ID_ANY, ^(zone_t z){
728 #if defined(__LP64__)
729 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP);
730 #else
731 (void)z;
732 #endif /* defined(__LP64__) */
733 });
734
735 #if defined(__LP64__)
736 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
737 compressor_slots_zones[idx] = zone_create_ext(
738 compressor_slots_zones_names[idx],
739 compressor_slots_zones_sizes[idx], ZC_NONE,
740 ZONE_ID_ANY, ^(zone_t z){
741 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP);
742 });
743 }
744 #endif /* defined(__LP64__) */
745
746 vm_compressor_init();
747 }
748
749 static compressor_slot_t *
750 zalloc_slot_array(size_t size, zalloc_flags_t flags)
751 {
752 #if defined(__LP64__)
753 compressor_slot_t *slots = NULL;
754
755 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
756 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
757 if (size > compressor_slots_zones_sizes[idx]) {
758 continue;
759 }
760 slots = zalloc_flags(compressor_slots_zones[idx], flags);
761 break;
762 }
763 return slots;
764 #else /* defined(__LP64__) */
765 return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags);
766 #endif /* !defined(__LP64__) */
767 }
768
769 static void
770 zfree_slot_array(compressor_slot_t *slots, size_t size)
771 {
772 #if defined(__LP64__)
773 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
774 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
775 if (size > compressor_slots_zones_sizes[idx]) {
776 continue;
777 }
778 zfree(compressor_slots_zones[idx], slots);
779 break;
780 }
781 #else /* defined(__LP64__) */
782 kheap_free(KHEAP_DATA_BUFFERS, slots, size);
783 #endif /* !defined(__LP64__) */
784 }
785
786 kern_return_t
787 vm_compressor_pager_put(
788 memory_object_t mem_obj,
789 memory_object_offset_t offset,
790 ppnum_t ppnum,
791 void **current_chead,
792 char *scratch_buf,
793 int *compressed_count_delta_p)
794 {
795 compressor_pager_t pager;
796 compressor_slot_t *slot_p;
797
798 compressor_pager_stats.put++;
799
800 *compressed_count_delta_p = 0;
801
802 /* This routine is called by the pageout thread. The pageout thread */
803 /* cannot be blocked by read activities unless the read activities */
804 /* Therefore the grant of vs lock must be done on a try versus a */
805 /* blocking basis. The code below relies on the fact that the */
806 /* interface is synchronous. Should this interface be again async */
807 /* for some type of pager in the future the pages will have to be */
808 /* returned through a separate, asynchronous path. */
809
810 compressor_pager_lookup(mem_obj, pager);
811
812 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
813 /* overflow */
814 panic("%s: offset 0x%llx overflow\n",
815 __FUNCTION__, (uint64_t) offset);
816 return KERN_RESOURCE_SHORTAGE;
817 }
818
819 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
820
821 if (slot_p == NULL) {
822 /* out of range ? */
823 panic("vm_compressor_pager_put: out of range");
824 }
825 if (*slot_p != 0) {
826 /*
827 * Already compressed: forget about the old one.
828 *
829 * This can happen after a vm_object_do_collapse() when
830 * the "backing_object" had some pages paged out and the
831 * "object" had an equivalent page resident.
832 */
833 vm_compressor_free(slot_p, 0);
834 *compressed_count_delta_p -= 1;
835 }
836
837 /*
838 * If the compressor operation succeeds, we presumably don't need to
839 * undo any previous WIMG update, as all live mappings should be
840 * disconnected.
841 */
842
843 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
844 return KERN_RESOURCE_SHORTAGE;
845 }
846 *compressed_count_delta_p += 1;
847
848 return KERN_SUCCESS;
849 }
850
851
852 kern_return_t
853 vm_compressor_pager_get(
854 memory_object_t mem_obj,
855 memory_object_offset_t offset,
856 ppnum_t ppnum,
857 int *my_fault_type,
858 int flags,
859 int *compressed_count_delta_p)
860 {
861 compressor_pager_t pager;
862 kern_return_t kr;
863 compressor_slot_t *slot_p;
864
865 compressor_pager_stats.get++;
866
867 *compressed_count_delta_p = 0;
868
869 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
870 panic("%s: offset 0x%llx overflow\n",
871 __FUNCTION__, (uint64_t) offset);
872 return KERN_MEMORY_ERROR;
873 }
874
875 compressor_pager_lookup(mem_obj, pager);
876
877 /* find the compressor slot for that page */
878 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
879
880 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
881 /* out of range */
882 kr = KERN_MEMORY_FAILURE;
883 } else if (slot_p == NULL || *slot_p == 0) {
884 /* compressor does not have this page */
885 kr = KERN_MEMORY_ERROR;
886 } else {
887 /* compressor does have this page */
888 kr = KERN_SUCCESS;
889 }
890 *my_fault_type = DBG_COMPRESSOR_FAULT;
891
892 if (kr == KERN_SUCCESS) {
893 int retval;
894
895 /* get the page from the compressor */
896 retval = vm_compressor_get(ppnum, slot_p, flags);
897 if (retval == -1) {
898 kr = KERN_MEMORY_FAILURE;
899 } else if (retval == 1) {
900 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
901 } else if (retval == -2) {
902 assert((flags & C_DONT_BLOCK));
903 kr = KERN_FAILURE;
904 }
905 }
906
907 if (kr == KERN_SUCCESS) {
908 assert(slot_p != NULL);
909 if (*slot_p != 0) {
910 /*
911 * We got the page for a copy-on-write fault
912 * and we kept the original in place. Slot
913 * is still occupied.
914 */
915 } else {
916 *compressed_count_delta_p -= 1;
917 }
918 }
919
920 return kr;
921 }
922
923 unsigned int
924 vm_compressor_pager_state_clr(
925 memory_object_t mem_obj,
926 memory_object_offset_t offset)
927 {
928 compressor_pager_t pager;
929 compressor_slot_t *slot_p;
930 unsigned int num_slots_freed;
931
932 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
933
934 compressor_pager_stats.state_clr++;
935
936 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
937 /* overflow */
938 panic("%s: offset 0x%llx overflow\n",
939 __FUNCTION__, (uint64_t) offset);
940 return 0;
941 }
942
943 compressor_pager_lookup(mem_obj, pager);
944
945 /* find the compressor slot for that page */
946 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
947
948 num_slots_freed = 0;
949 if (slot_p && *slot_p != 0) {
950 vm_compressor_free(slot_p, 0);
951 num_slots_freed++;
952 assert(*slot_p == 0);
953 }
954
955 return num_slots_freed;
956 }
957
958 vm_external_state_t
959 vm_compressor_pager_state_get(
960 memory_object_t mem_obj,
961 memory_object_offset_t offset)
962 {
963 compressor_pager_t pager;
964 compressor_slot_t *slot_p;
965
966 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
967
968 compressor_pager_stats.state_get++;
969
970 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
971 /* overflow */
972 panic("%s: offset 0x%llx overflow\n",
973 __FUNCTION__, (uint64_t) offset);
974 return VM_EXTERNAL_STATE_ABSENT;
975 }
976
977 compressor_pager_lookup(mem_obj, pager);
978
979 /* find the compressor slot for that page */
980 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
981
982 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
983 /* out of range */
984 return VM_EXTERNAL_STATE_ABSENT;
985 } else if (slot_p == NULL || *slot_p == 0) {
986 /* compressor does not have this page */
987 return VM_EXTERNAL_STATE_ABSENT;
988 } else {
989 /* compressor does have this page */
990 return VM_EXTERNAL_STATE_EXISTS;
991 }
992 }
993
994 unsigned int
995 vm_compressor_pager_reap_pages(
996 memory_object_t mem_obj,
997 int flags)
998 {
999 compressor_pager_t pager;
1000 int num_chunks;
1001 int failures;
1002 int i;
1003 compressor_slot_t *chunk;
1004 unsigned int num_slots_freed;
1005
1006 compressor_pager_lookup(mem_obj, pager);
1007 if (pager == NULL) {
1008 return 0;
1009 }
1010
1011 compressor_pager_lock(pager);
1012
1013 /* reap the compressor slots */
1014 num_slots_freed = 0;
1015
1016 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
1017 if (num_chunks > 1) {
1018 /* we have an array of chunks */
1019 for (i = 0; i < num_chunks; i++) {
1020 chunk = pager->cpgr_slots.cpgr_islots[i];
1021 if (chunk != NULL) {
1022 num_slots_freed +=
1023 compressor_pager_slots_chunk_free(
1024 chunk,
1025 COMPRESSOR_SLOTS_PER_CHUNK,
1026 flags,
1027 &failures);
1028 if (failures == 0) {
1029 pager->cpgr_slots.cpgr_islots[i] = NULL;
1030 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
1031 }
1032 }
1033 }
1034 } else if (pager->cpgr_num_slots > 2) {
1035 chunk = pager->cpgr_slots.cpgr_dslots;
1036 num_slots_freed +=
1037 compressor_pager_slots_chunk_free(
1038 chunk,
1039 pager->cpgr_num_slots,
1040 flags,
1041 NULL);
1042 } else {
1043 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1044 num_slots_freed +=
1045 compressor_pager_slots_chunk_free(
1046 chunk,
1047 pager->cpgr_num_slots,
1048 flags,
1049 NULL);
1050 }
1051
1052 compressor_pager_unlock(pager);
1053
1054 return num_slots_freed;
1055 }
1056
1057 void
1058 vm_compressor_pager_transfer(
1059 memory_object_t dst_mem_obj,
1060 memory_object_offset_t dst_offset,
1061 memory_object_t src_mem_obj,
1062 memory_object_offset_t src_offset)
1063 {
1064 compressor_pager_t src_pager, dst_pager;
1065 compressor_slot_t *src_slot_p, *dst_slot_p;
1066
1067 compressor_pager_stats.transfer++;
1068
1069 /* find the compressor slot for the destination */
1070 assert((uint32_t) dst_offset == dst_offset);
1071 compressor_pager_lookup(dst_mem_obj, dst_pager);
1072 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
1073 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
1074 &dst_slot_p);
1075 assert(dst_slot_p != NULL);
1076 assert(*dst_slot_p == 0);
1077
1078 /* find the compressor slot for the source */
1079 assert((uint32_t) src_offset == src_offset);
1080 compressor_pager_lookup(src_mem_obj, src_pager);
1081 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
1082 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
1083 &src_slot_p);
1084 assert(src_slot_p != NULL);
1085 assert(*src_slot_p != 0);
1086
1087 /* transfer the slot from source to destination */
1088 vm_compressor_transfer(dst_slot_p, src_slot_p);
1089 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1090 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1091 }
1092
1093 memory_object_offset_t
1094 vm_compressor_pager_next_compressed(
1095 memory_object_t mem_obj,
1096 memory_object_offset_t offset)
1097 {
1098 compressor_pager_t pager;
1099 uint32_t num_chunks;
1100 uint32_t page_num;
1101 uint32_t chunk_idx;
1102 uint32_t slot_idx;
1103 compressor_slot_t *chunk;
1104
1105 compressor_pager_lookup(mem_obj, pager);
1106
1107 page_num = (uint32_t)(offset / PAGE_SIZE);
1108 if (page_num != (offset / PAGE_SIZE)) {
1109 /* overflow */
1110 return (memory_object_offset_t) -1;
1111 }
1112 if (page_num >= pager->cpgr_num_slots) {
1113 /* out of range */
1114 return (memory_object_offset_t) -1;
1115 }
1116
1117 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1118 COMPRESSOR_SLOTS_PER_CHUNK);
1119
1120 if (num_chunks == 1) {
1121 if (pager->cpgr_num_slots > 2) {
1122 chunk = pager->cpgr_slots.cpgr_dslots;
1123 } else {
1124 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1125 }
1126 for (slot_idx = page_num;
1127 slot_idx < pager->cpgr_num_slots;
1128 slot_idx++) {
1129 if (chunk[slot_idx] != 0) {
1130 /* found a non-NULL slot in this chunk */
1131 return (memory_object_offset_t) (slot_idx *
1132 PAGE_SIZE);
1133 }
1134 }
1135 return (memory_object_offset_t) -1;
1136 }
1137
1138 /* we have an array of chunks; find the next non-NULL chunk */
1139 chunk = NULL;
1140 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1141 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1142 chunk_idx < num_chunks;
1143 chunk_idx++,
1144 slot_idx = 0) {
1145 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1146 if (chunk == NULL) {
1147 /* no chunk here: try the next one */
1148 continue;
1149 }
1150 /* search for an occupied slot in this chunk */
1151 for (;
1152 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1153 slot_idx++) {
1154 if (chunk[slot_idx] != 0) {
1155 /* found an occupied slot in this chunk */
1156 uint32_t next_slot;
1157
1158 next_slot = ((chunk_idx *
1159 COMPRESSOR_SLOTS_PER_CHUNK) +
1160 slot_idx);
1161 if (next_slot >= pager->cpgr_num_slots) {
1162 /* went beyond end of object */
1163 return (memory_object_offset_t) -1;
1164 }
1165 return (memory_object_offset_t) (next_slot *
1166 PAGE_SIZE);
1167 }
1168 }
1169 }
1170 return (memory_object_offset_t) -1;
1171 }
1172
1173 unsigned int
1174 vm_compressor_pager_get_count(
1175 memory_object_t mem_obj)
1176 {
1177 compressor_pager_t pager;
1178
1179 compressor_pager_lookup(mem_obj, pager);
1180 if (pager == NULL) {
1181 return 0;
1182 }
1183
1184 /*
1185 * The caller should have the VM object locked and one
1186 * needs that lock to do a page-in or page-out, so no
1187 * need to lock the pager here.
1188 */
1189 assert(pager->cpgr_num_slots_occupied >= 0);
1190
1191 return pager->cpgr_num_slots_occupied;
1192 }
1193
1194 void
1195 vm_compressor_pager_count(
1196 memory_object_t mem_obj,
1197 int compressed_count_delta,
1198 boolean_t shared_lock,
1199 vm_object_t object __unused)
1200 {
1201 compressor_pager_t pager;
1202
1203 if (compressed_count_delta == 0) {
1204 return;
1205 }
1206
1207 compressor_pager_lookup(mem_obj, pager);
1208 if (pager == NULL) {
1209 return;
1210 }
1211
1212 if (compressed_count_delta < 0) {
1213 assert(pager->cpgr_num_slots_occupied >=
1214 (unsigned int) -compressed_count_delta);
1215 }
1216
1217 /*
1218 * The caller should have the VM object locked,
1219 * shared or exclusive.
1220 */
1221 if (shared_lock) {
1222 vm_object_lock_assert_shared(object);
1223 OSAddAtomic(compressed_count_delta,
1224 &pager->cpgr_num_slots_occupied);
1225 } else {
1226 vm_object_lock_assert_exclusive(object);
1227 pager->cpgr_num_slots_occupied += compressed_count_delta;
1228 }
1229 }
1230
1231 #if CONFIG_FREEZE
1232 kern_return_t
1233 vm_compressor_pager_relocate(
1234 memory_object_t mem_obj,
1235 memory_object_offset_t offset,
1236 void **current_chead)
1237 {
1238 /*
1239 * Has the page at this offset been compressed?
1240 */
1241
1242 compressor_slot_t *slot_p;
1243 compressor_pager_t dst_pager;
1244
1245 assert(mem_obj);
1246
1247 compressor_pager_lookup(mem_obj, dst_pager);
1248 if (dst_pager == NULL) {
1249 return KERN_FAILURE;
1250 }
1251
1252 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
1253 return vm_compressor_relocate(current_chead, slot_p);
1254 }
1255 #endif /* CONFIG_FREEZE */
1256
1257 #if DEVELOPMENT || DEBUG
1258
1259 kern_return_t
1260 vm_compressor_pager_inject_error(memory_object_t mem_obj,
1261 memory_object_offset_t offset)
1262 {
1263 kern_return_t result = KERN_FAILURE;
1264 compressor_slot_t *slot_p;
1265 compressor_pager_t pager;
1266
1267 assert(mem_obj);
1268
1269 compressor_pager_lookup(mem_obj, pager);
1270 if (pager != NULL) {
1271 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1272 if (slot_p != NULL && *slot_p != 0) {
1273 vm_compressor_inject_error(slot_p);
1274 result = KERN_SUCCESS;
1275 }
1276 }
1277
1278 return result;
1279 }
1280
1281 #endif