]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor_pager.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
CommitLineData
39236c6e 1/*
f427ee49 2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
39236c6e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
39236c6e
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
39236c6e
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
39236c6e
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
39236c6e
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
39236c6e
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
39236c6e
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
39236c6e
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
39236c6e 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
39236c6e
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
39236c6e
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62#include <kern/host_statistics.h>
63#include <kern/kalloc.h>
39037602 64#include <kern/ipc_kobject.h>
39236c6e 65
cb323159
A
66#include <machine/atomic.h>
67
39236c6e
A
68#include <mach/memory_object_control.h>
69#include <mach/memory_object_types.h>
39236c6e
A
70#include <mach/upl.h>
71
72#include <vm/memory_object.h>
73#include <vm/vm_compressor_pager.h>
74#include <vm/vm_external.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_protos.h>
77
78/* memory_object interfaces */
79void compressor_memory_object_reference(memory_object_t mem_obj);
80void compressor_memory_object_deallocate(memory_object_t mem_obj);
81kern_return_t compressor_memory_object_init(
0a7de745
A
82 memory_object_t mem_obj,
83 memory_object_control_t control,
39236c6e
A
84 memory_object_cluster_size_t pager_page_size);
85kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
86kern_return_t compressor_memory_object_data_request(
0a7de745
A
87 memory_object_t mem_obj,
88 memory_object_offset_t offset,
89 memory_object_cluster_size_t length,
90 __unused vm_prot_t protection_required,
91 memory_object_fault_info_t fault_info);
39236c6e 92kern_return_t compressor_memory_object_data_return(
0a7de745
A
93 memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t size,
96 __unused memory_object_offset_t *resid_offset,
97 __unused int *io_error,
98 __unused boolean_t dirty,
99 __unused boolean_t kernel_copy,
100 __unused int upl_flags);
39236c6e 101kern_return_t compressor_memory_object_data_initialize(
0a7de745
A
102 memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t size);
39236c6e 105kern_return_t compressor_memory_object_data_unlock(
0a7de745
A
106 __unused memory_object_t mem_obj,
107 __unused memory_object_offset_t offset,
108 __unused memory_object_size_t size,
109 __unused vm_prot_t desired_access);
39236c6e 110kern_return_t compressor_memory_object_synchronize(
0a7de745
A
111 memory_object_t mem_obj,
112 memory_object_offset_t offset,
113 memory_object_size_t length,
114 __unused vm_sync_t flags);
39236c6e 115kern_return_t compressor_memory_object_map(
0a7de745
A
116 __unused memory_object_t mem_obj,
117 __unused vm_prot_t prot);
39236c6e
A
118kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
119kern_return_t compressor_memory_object_data_reclaim(
0a7de745
A
120 __unused memory_object_t mem_obj,
121 __unused boolean_t reclaim_backing_store);
39236c6e
A
122
123const struct memory_object_pager_ops compressor_pager_ops = {
cb323159
A
124 .memory_object_reference = compressor_memory_object_reference,
125 .memory_object_deallocate = compressor_memory_object_deallocate,
126 .memory_object_init = compressor_memory_object_init,
127 .memory_object_terminate = compressor_memory_object_terminate,
128 .memory_object_data_request = compressor_memory_object_data_request,
129 .memory_object_data_return = compressor_memory_object_data_return,
130 .memory_object_data_initialize = compressor_memory_object_data_initialize,
131 .memory_object_data_unlock = compressor_memory_object_data_unlock,
132 .memory_object_synchronize = compressor_memory_object_synchronize,
133 .memory_object_map = compressor_memory_object_map,
134 .memory_object_last_unmap = compressor_memory_object_last_unmap,
135 .memory_object_data_reclaim = compressor_memory_object_data_reclaim,
a991bd8d 136 .memory_object_backing_object = NULL,
cb323159 137 .memory_object_pager_name = "compressor pager"
39236c6e
A
138};
139
140/* internal data structures */
141
142struct {
0a7de745
A
143 uint64_t data_returns;
144 uint64_t data_requests;
145 uint64_t put;
146 uint64_t get;
147 uint64_t state_clr;
148 uint64_t state_get;
149 uint64_t transfer;
39236c6e
A
150} compressor_pager_stats;
151
152typedef int compressor_slot_t;
153
154typedef struct compressor_pager {
5ba3f43e
A
155 /* mandatory generic header */
156 struct memory_object cpgr_hdr;
39236c6e 157
5ba3f43e 158 /* pager-specific data */
0a7de745 159 lck_mtx_t cpgr_lock;
c3c9b80d
A
160#if MEMORY_OBJECT_HAS_REFCOUNT
161#define cpgr_references cpgr_hdr.mo_ref
162#else
163 os_ref_atomic_t cpgr_references;
164#endif
0a7de745
A
165 unsigned int cpgr_num_slots;
166 unsigned int cpgr_num_slots_occupied;
39236c6e 167 union {
0a7de745
A
168 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
169 compressor_slot_t *cpgr_dslots; /* direct slots */
170 compressor_slot_t **cpgr_islots; /* indirect slots */
39236c6e
A
171 } cpgr_slots;
172} *compressor_pager_t;
173
0a7de745
A
174#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
175 MACRO_BEGIN \
176 if (_mem_obj_ == NULL || \
177 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
178 _cpgr_ = NULL; \
179 } else { \
180 _cpgr_ = (compressor_pager_t) _mem_obj_; \
181 } \
39236c6e
A
182 MACRO_END
183
184zone_t compressor_pager_zone;
185
f427ee49 186LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager");
39236c6e
A
187
188#define compressor_pager_lock(_cpgr_) \
189 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
190#define compressor_pager_unlock(_cpgr_) \
191 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
192#define compressor_pager_lock_init(_cpgr_) \
f427ee49 193 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
39236c6e
A
194#define compressor_pager_lock_destroy(_cpgr_) \
195 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
196
0a7de745
A
197#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
198#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
39236c6e
A
199
200/* forward declarations */
fe8ab488 201unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
0a7de745
A
202 int num_slots,
203 int flags,
204 int *failures);
39236c6e 205void compressor_pager_slot_lookup(
0a7de745
A
206 compressor_pager_t pager,
207 boolean_t do_alloc,
208 memory_object_offset_t offset,
209 compressor_slot_t **slot_pp);
39236c6e 210
f427ee49
A
211#if defined(__LP64__)
212
213/* restricted VA zones for slots */
214
215#define NUM_SLOTS_ZONES 3
216
217static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = {
218 16,
219 64,
220 COMPRESSOR_SLOTS_CHUNK_SIZE
221};
222
223static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = {
224 "compressor_slots.16",
225 "compressor_slots.64",
226 "compressor_slots.512"
227};
228
229static zone_t
230 compressor_slots_zones[NUM_SLOTS_ZONES];
231
232#endif /* defined(__LP64__) */
233
234static void
235zfree_slot_array(compressor_slot_t *slots, size_t size);
236static compressor_slot_t *
237zalloc_slot_array(size_t size, zalloc_flags_t);
238
239
39236c6e
A
240kern_return_t
241compressor_memory_object_init(
0a7de745
A
242 memory_object_t mem_obj,
243 memory_object_control_t control,
39236c6e
A
244 __unused memory_object_cluster_size_t pager_page_size)
245{
0a7de745 246 compressor_pager_t pager;
39236c6e
A
247
248 assert(pager_page_size == PAGE_SIZE);
249
250 memory_object_control_reference(control);
251
252 compressor_pager_lookup(mem_obj, pager);
253 compressor_pager_lock(pager);
254
0a7de745 255 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
39236c6e 256 panic("compressor_memory_object_init: bad request");
0a7de745 257 }
5ba3f43e 258 pager->cpgr_hdr.mo_control = control;
39236c6e
A
259
260 compressor_pager_unlock(pager);
261
262 return KERN_SUCCESS;
263}
264
265kern_return_t
266compressor_memory_object_synchronize(
5ba3f43e 267 __unused memory_object_t mem_obj,
0a7de745
A
268 __unused memory_object_offset_t offset,
269 __unused memory_object_size_t length,
270 __unused vm_sync_t flags)
39236c6e 271{
5ba3f43e
A
272 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
273 return KERN_FAILURE;
39236c6e
A
274}
275
276kern_return_t
277compressor_memory_object_map(
0a7de745
A
278 __unused memory_object_t mem_obj,
279 __unused vm_prot_t prot)
39236c6e
A
280{
281 panic("compressor_memory_object_map");
282 return KERN_FAILURE;
283}
284
285kern_return_t
286compressor_memory_object_last_unmap(
0a7de745 287 __unused memory_object_t mem_obj)
39236c6e
A
288{
289 panic("compressor_memory_object_last_unmap");
290 return KERN_FAILURE;
291}
292
293kern_return_t
294compressor_memory_object_data_reclaim(
0a7de745
A
295 __unused memory_object_t mem_obj,
296 __unused boolean_t reclaim_backing_store)
39236c6e
A
297{
298 panic("compressor_memory_object_data_reclaim");
299 return KERN_FAILURE;
300}
301
302kern_return_t
303compressor_memory_object_terminate(
0a7de745 304 memory_object_t mem_obj)
39236c6e 305{
0a7de745
A
306 memory_object_control_t control;
307 compressor_pager_t pager;
39236c6e 308
0a7de745 309 /*
39236c6e
A
310 * control port is a receive right, not a send right.
311 */
312
313 compressor_pager_lookup(mem_obj, pager);
314 compressor_pager_lock(pager);
315
316 /*
317 * After memory_object_terminate both memory_object_init
318 * and a no-senders notification are possible, so we need
319 * to clean up our reference to the memory_object_control
320 * to prepare for a new init.
321 */
322
5ba3f43e
A
323 control = pager->cpgr_hdr.mo_control;
324 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
39236c6e
A
325
326 compressor_pager_unlock(pager);
327
328 /*
329 * Now we deallocate our reference on the control.
330 */
331 memory_object_control_deallocate(control);
332 return KERN_SUCCESS;
333}
334
335void
336compressor_memory_object_reference(
0a7de745 337 memory_object_t mem_obj)
39236c6e 338{
0a7de745 339 compressor_pager_t pager;
39236c6e
A
340
341 compressor_pager_lookup(mem_obj, pager);
0a7de745 342 if (pager == NULL) {
39236c6e 343 return;
0a7de745 344 }
39236c6e
A
345
346 compressor_pager_lock(pager);
c3c9b80d 347 os_ref_retain_locked_raw(&pager->cpgr_references, NULL);
39236c6e
A
348 compressor_pager_unlock(pager);
349}
350
351void
352compressor_memory_object_deallocate(
0a7de745 353 memory_object_t mem_obj)
39236c6e 354{
0a7de745
A
355 compressor_pager_t pager;
356 unsigned int num_slots_freed;
39236c6e
A
357
358 /*
359 * Because we don't give out multiple first references
360 * for a memory object, there can't be a race
361 * between getting a deallocate call and creating
362 * a new reference for the object.
363 */
364
365 compressor_pager_lookup(mem_obj, pager);
0a7de745 366 if (pager == NULL) {
39236c6e 367 return;
0a7de745 368 }
39236c6e
A
369
370 compressor_pager_lock(pager);
c3c9b80d 371 if (os_ref_release_locked_raw(&pager->cpgr_references, NULL) > 0) {
39236c6e
A
372 compressor_pager_unlock(pager);
373 return;
374 }
375
376 /*
377 * We shouldn't get a deallocation call
378 * when the kernel has the object cached.
379 */
0a7de745 380 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
39236c6e 381 panic("compressor_memory_object_deallocate(): bad request");
0a7de745 382 }
39236c6e
A
383
384 /*
385 * Unlock the pager (though there should be no one
386 * waiting for it).
387 */
388 compressor_pager_unlock(pager);
389
390 /* free the compressor slots */
391 int num_chunks;
392 int i;
393 compressor_slot_t *chunk;
394
0a7de745 395 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
39236c6e
A
396 if (num_chunks > 1) {
397 /* we have an array of chunks */
398 for (i = 0; i < num_chunks; i++) {
399 chunk = pager->cpgr_slots.cpgr_islots[i];
400 if (chunk != NULL) {
fe8ab488 401 num_slots_freed =
0a7de745
A
402 compressor_pager_slots_chunk_free(
403 chunk,
404 COMPRESSOR_SLOTS_PER_CHUNK,
405 0,
406 NULL);
39236c6e 407 pager->cpgr_slots.cpgr_islots[i] = NULL;
f427ee49 408 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
39236c6e
A
409 }
410 }
f427ee49 411 kheap_free(KHEAP_DEFAULT, pager->cpgr_slots.cpgr_islots,
0a7de745 412 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
39236c6e 413 pager->cpgr_slots.cpgr_islots = NULL;
3e170ce0 414 } else if (pager->cpgr_num_slots > 2) {
39236c6e 415 chunk = pager->cpgr_slots.cpgr_dslots;
fe8ab488 416 num_slots_freed =
0a7de745
A
417 compressor_pager_slots_chunk_free(
418 chunk,
419 pager->cpgr_num_slots,
420 0,
421 NULL);
39236c6e 422 pager->cpgr_slots.cpgr_dslots = NULL;
f427ee49 423 zfree_slot_array(chunk,
0a7de745
A
424 (pager->cpgr_num_slots *
425 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
3e170ce0
A
426 } else {
427 chunk = &pager->cpgr_slots.cpgr_eslots[0];
428 num_slots_freed =
0a7de745
A
429 compressor_pager_slots_chunk_free(
430 chunk,
431 pager->cpgr_num_slots,
432 0,
433 NULL);
39236c6e
A
434 }
435
436 compressor_pager_lock_destroy(pager);
437 zfree(compressor_pager_zone, pager);
438}
439
440kern_return_t
441compressor_memory_object_data_request(
0a7de745
A
442 memory_object_t mem_obj,
443 memory_object_offset_t offset,
444 memory_object_cluster_size_t length,
445 __unused vm_prot_t protection_required,
446 __unused memory_object_fault_info_t fault_info)
39236c6e 447{
0a7de745
A
448 compressor_pager_t pager;
449 kern_return_t kr;
450 compressor_slot_t *slot_p;
451
39236c6e
A
452 compressor_pager_stats.data_requests++;
453
454 /*
455 * Request must be on a page boundary and a multiple of pages.
456 */
0a7de745 457 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
39236c6e 458 panic("compressor_memory_object_data_request(): bad alignment");
0a7de745 459 }
39236c6e 460
0a7de745 461 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c 462 panic("%s: offset 0x%llx overflow\n",
0a7de745 463 __FUNCTION__, (uint64_t) offset);
22ba694c
A
464 return KERN_FAILURE;
465 }
39236c6e
A
466
467 compressor_pager_lookup(mem_obj, pager);
468
469 if (length == 0) {
470 /* we're only querying the pager for this page */
471 } else {
472 panic("compressor: data_request");
473 }
474
475 /* find the compressor slot for that page */
22ba694c 476 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 477
5ba3f43e 478 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
479 /* out of range */
480 kr = KERN_FAILURE;
481 } else if (slot_p == NULL || *slot_p == 0) {
482 /* compressor does not have this page */
483 kr = KERN_FAILURE;
484 } else {
485 /* compressor does have this page */
486 kr = KERN_SUCCESS;
487 }
488 return kr;
489}
490
491/*
492 * memory_object_data_initialize: check whether we already have each page, and
493 * write it if we do not. The implementation is far from optimized, and
494 * also assumes that the default_pager is single-threaded.
495 */
496/* It is questionable whether or not a pager should decide what is relevant */
497/* and what is not in data sent from the kernel. Data initialize has been */
498/* changed to copy back all data sent to it in preparation for its eventual */
499/* merge with data return. It is the kernel that should decide what pages */
500/* to write back. As of the writing of this note, this is indeed the case */
501/* the kernel writes back one page at a time through this interface */
502
503kern_return_t
504compressor_memory_object_data_initialize(
0a7de745
A
505 memory_object_t mem_obj,
506 memory_object_offset_t offset,
507 memory_object_cluster_size_t size)
39236c6e 508{
0a7de745
A
509 compressor_pager_t pager;
510 memory_object_offset_t cur_offset;
39236c6e
A
511
512 compressor_pager_lookup(mem_obj, pager);
513 compressor_pager_lock(pager);
514
515 for (cur_offset = offset;
0a7de745
A
516 cur_offset < offset + size;
517 cur_offset += PAGE_SIZE) {
39236c6e
A
518 panic("do a data_return() if slot for this page is empty");
519 }
520
521 compressor_pager_unlock(pager);
522
523 return KERN_SUCCESS;
524}
525
526kern_return_t
527compressor_memory_object_data_unlock(
0a7de745
A
528 __unused memory_object_t mem_obj,
529 __unused memory_object_offset_t offset,
530 __unused memory_object_size_t size,
531 __unused vm_prot_t desired_access)
39236c6e
A
532{
533 panic("compressor_memory_object_data_unlock()");
534 return KERN_FAILURE;
535}
536
537
538/*ARGSUSED*/
539kern_return_t
540compressor_memory_object_data_return(
0a7de745
A
541 __unused memory_object_t mem_obj,
542 __unused memory_object_offset_t offset,
543 __unused memory_object_cluster_size_t size,
544 __unused memory_object_offset_t *resid_offset,
545 __unused int *io_error,
546 __unused boolean_t dirty,
547 __unused boolean_t kernel_copy,
548 __unused int upl_flags)
39236c6e
A
549{
550 panic("compressor: data_return");
551 return KERN_FAILURE;
552}
553
554/*
555 * Routine: default_pager_memory_object_create
556 * Purpose:
0a7de745
A
557 * Handle requests for memory objects from the
558 * kernel.
39236c6e 559 * Notes:
0a7de745
A
560 * Because we only give out the default memory
561 * manager port to the kernel, we don't have to
562 * be so paranoid about the contents.
39236c6e
A
563 */
564kern_return_t
565compressor_memory_object_create(
0a7de745
A
566 memory_object_size_t new_size,
567 memory_object_t *new_mem_obj)
39236c6e 568{
0a7de745
A
569 compressor_pager_t pager;
570 int num_chunks;
39236c6e 571
0a7de745 572 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
22ba694c
A
573 /* 32-bit overflow for number of pages */
574 panic("%s: size 0x%llx overflow\n",
0a7de745 575 __FUNCTION__, (uint64_t) new_size);
39236c6e
A
576 return KERN_INVALID_ARGUMENT;
577 }
578
579 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
580 if (pager == NULL) {
581 return KERN_RESOURCE_SHORTAGE;
582 }
583
584 compressor_pager_lock_init(pager);
c3c9b80d 585 os_ref_init_raw(&pager->cpgr_references, NULL);
0a7de745 586 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
fe8ab488 587 pager->cpgr_num_slots_occupied = 0;
39236c6e
A
588
589 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
590 if (num_chunks > 1) {
f427ee49
A
591 pager->cpgr_slots.cpgr_islots = kheap_alloc(KHEAP_DEFAULT,
592 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]),
593 Z_WAITOK | Z_ZERO);
3e170ce0 594 } else if (pager->cpgr_num_slots > 2) {
f427ee49
A
595 pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots *
596 sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO);
3e170ce0
A
597 } else {
598 pager->cpgr_slots.cpgr_eslots[0] = 0;
599 pager->cpgr_slots.cpgr_eslots[1] = 0;
39236c6e
A
600 }
601
602 /*
603 * Set up associations between this memory object
604 * and this compressor_pager structure
605 */
5ba3f43e
A
606 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
607 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
608 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
39236c6e
A
609
610 *new_mem_obj = (memory_object_t) pager;
611 return KERN_SUCCESS;
612}
613
614
fe8ab488 615unsigned int
39236c6e 616compressor_pager_slots_chunk_free(
0a7de745
A
617 compressor_slot_t *chunk,
618 int num_slots,
619 int flags,
620 int *failures)
39236c6e 621{
39236c6e 622 int i;
3e170ce0 623 int retval;
fe8ab488
A
624 unsigned int num_slots_freed;
625
0a7de745 626 if (failures) {
fe8ab488 627 *failures = 0;
0a7de745 628 }
fe8ab488 629 num_slots_freed = 0;
39236c6e
A
630 for (i = 0; i < num_slots; i++) {
631 if (chunk[i] != 0) {
3e170ce0
A
632 retval = vm_compressor_free(&chunk[i], flags);
633
0a7de745 634 if (retval == 0) {
fe8ab488 635 num_slots_freed++;
0a7de745
A
636 } else {
637 if (retval == -2) {
3e170ce0 638 assert(flags & C_DONT_BLOCK);
0a7de745 639 }
fe8ab488 640
0a7de745 641 if (failures) {
fe8ab488 642 *failures += 1;
0a7de745 643 }
fe8ab488 644 }
39236c6e
A
645 }
646 }
fe8ab488 647 return num_slots_freed;
39236c6e
A
648}
649
650void
651compressor_pager_slot_lookup(
0a7de745
A
652 compressor_pager_t pager,
653 boolean_t do_alloc,
654 memory_object_offset_t offset,
655 compressor_slot_t **slot_pp)
39236c6e 656{
0a7de745
A
657 int num_chunks;
658 uint32_t page_num;
659 int chunk_idx;
660 int slot_idx;
661 compressor_slot_t *chunk;
662 compressor_slot_t *t_chunk;
663
664 page_num = (uint32_t)(offset / PAGE_SIZE);
665 if (page_num != (offset / PAGE_SIZE)) {
22ba694c
A
666 /* overflow */
667 panic("%s: offset 0x%llx overflow\n",
0a7de745 668 __FUNCTION__, (uint64_t) offset);
22ba694c
A
669 *slot_pp = NULL;
670 return;
671 }
5ba3f43e 672 if (page_num >= pager->cpgr_num_slots) {
39236c6e
A
673 /* out of range */
674 *slot_pp = NULL;
675 return;
676 }
677 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
678 if (num_chunks > 1) {
679 /* we have an array of chunks */
680 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
681 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
682
683 if (chunk == NULL && do_alloc) {
f427ee49
A
684 t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE,
685 Z_WAITOK | Z_ZERO);
39236c6e
A
686
687 compressor_pager_lock(pager);
688
689 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
cc8bc92a
A
690 /*
691 * On some platforms, the memory stores from
692 * the bzero(t_chunk) above might not have been
693 * made visible and another thread might see
694 * the contents of this new chunk before it's
695 * been fully zero-filled.
696 * This memory barrier should take care of this
697 * according to the platform requirements.
698 */
cb323159 699 os_atomic_thread_fence(release);
cc8bc92a 700
39236c6e
A
701 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
702 t_chunk = NULL;
703 }
704 compressor_pager_unlock(pager);
0a7de745
A
705
706 if (t_chunk) {
f427ee49 707 zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
0a7de745 708 }
39236c6e
A
709 }
710 if (chunk == NULL) {
711 *slot_pp = NULL;
712 } else {
713 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
714 *slot_pp = &chunk[slot_idx];
715 }
3e170ce0 716 } else if (pager->cpgr_num_slots > 2) {
39236c6e
A
717 slot_idx = page_num;
718 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
3e170ce0
A
719 } else {
720 slot_idx = page_num;
721 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
39236c6e
A
722 }
723}
724
725void
726vm_compressor_pager_init(void)
727{
f427ee49
A
728 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
729 compressor_pager_zone = zone_create_ext("compressor_pager",
730 sizeof(struct compressor_pager), ZC_NOENCRYPT,
731 ZONE_ID_ANY, ^(zone_t z){
732#if defined(__LP64__)
c3c9b80d 733 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED);
f427ee49
A
734#else
735 (void)z;
736#endif /* defined(__LP64__) */
737 });
738
739#if defined(__LP64__)
740 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
741 compressor_slots_zones[idx] = zone_create_ext(
742 compressor_slots_zones_names[idx],
743 compressor_slots_zones_sizes[idx], ZC_NONE,
744 ZONE_ID_ANY, ^(zone_t z){
c3c9b80d 745 zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED);
f427ee49
A
746 });
747 }
748#endif /* defined(__LP64__) */
39236c6e
A
749
750 vm_compressor_init();
751}
752
f427ee49
A
753static compressor_slot_t *
754zalloc_slot_array(size_t size, zalloc_flags_t flags)
755{
756#if defined(__LP64__)
757 compressor_slot_t *slots = NULL;
758
759 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
760 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
761 if (size > compressor_slots_zones_sizes[idx]) {
762 continue;
763 }
764 slots = zalloc_flags(compressor_slots_zones[idx], flags);
765 break;
766 }
767 return slots;
768#else /* defined(__LP64__) */
769 return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags);
770#endif /* !defined(__LP64__) */
771}
772
773static void
774zfree_slot_array(compressor_slot_t *slots, size_t size)
775{
776#if defined(__LP64__)
777 assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE);
778 for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) {
779 if (size > compressor_slots_zones_sizes[idx]) {
780 continue;
781 }
782 zfree(compressor_slots_zones[idx], slots);
783 break;
784 }
785#else /* defined(__LP64__) */
786 kheap_free(KHEAP_DATA_BUFFERS, slots, size);
787#endif /* !defined(__LP64__) */
788}
789
39236c6e
A
790kern_return_t
791vm_compressor_pager_put(
0a7de745
A
792 memory_object_t mem_obj,
793 memory_object_offset_t offset,
794 ppnum_t ppnum,
795 void **current_chead,
796 char *scratch_buf,
797 int *compressed_count_delta_p)
39236c6e 798{
0a7de745
A
799 compressor_pager_t pager;
800 compressor_slot_t *slot_p;
39236c6e 801
fe8ab488
A
802 compressor_pager_stats.put++;
803
804 *compressed_count_delta_p = 0;
39236c6e
A
805
806 /* This routine is called by the pageout thread. The pageout thread */
807 /* cannot be blocked by read activities unless the read activities */
808 /* Therefore the grant of vs lock must be done on a try versus a */
809 /* blocking basis. The code below relies on the fact that the */
810 /* interface is synchronous. Should this interface be again async */
811 /* for some type of pager in the future the pages will have to be */
812 /* returned through a separate, asynchronous path. */
813
814 compressor_pager_lookup(mem_obj, pager);
815
0a7de745 816 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
817 /* overflow */
818 panic("%s: offset 0x%llx overflow\n",
0a7de745 819 __FUNCTION__, (uint64_t) offset);
22ba694c
A
820 return KERN_RESOURCE_SHORTAGE;
821 }
39236c6e 822
22ba694c 823 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
39236c6e
A
824
825 if (slot_p == NULL) {
826 /* out of range ? */
fe8ab488 827 panic("vm_compressor_pager_put: out of range");
39236c6e
A
828 }
829 if (*slot_p != 0) {
830 /*
831 * Already compressed: forget about the old one.
832 *
833 * This can happen after a vm_object_do_collapse() when
834 * the "backing_object" had some pages paged out and the
835 * "object" had an equivalent page resident.
836 */
fe8ab488 837 vm_compressor_free(slot_p, 0);
fe8ab488 838 *compressed_count_delta_p -= 1;
39236c6e 839 }
d9a64523 840
d9a64523
A
841 /*
842 * If the compressor operation succeeds, we presumably don't need to
843 * undo any previous WIMG update, as all live mappings should be
844 * disconnected.
845 */
846
847 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
0a7de745 848 return KERN_RESOURCE_SHORTAGE;
e8c3f781 849 }
fe8ab488 850 *compressed_count_delta_p += 1;
39236c6e 851
d9a64523 852 return KERN_SUCCESS;
39236c6e
A
853}
854
855
856kern_return_t
857vm_compressor_pager_get(
0a7de745
A
858 memory_object_t mem_obj,
859 memory_object_offset_t offset,
860 ppnum_t ppnum,
861 int *my_fault_type,
862 int flags,
863 int *compressed_count_delta_p)
39236c6e 864{
0a7de745
A
865 compressor_pager_t pager;
866 kern_return_t kr;
867 compressor_slot_t *slot_p;
868
fe8ab488
A
869 compressor_pager_stats.get++;
870
871 *compressed_count_delta_p = 0;
39236c6e 872
0a7de745 873 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c 874 panic("%s: offset 0x%llx overflow\n",
0a7de745 875 __FUNCTION__, (uint64_t) offset);
22ba694c
A
876 return KERN_MEMORY_ERROR;
877 }
39236c6e
A
878
879 compressor_pager_lookup(mem_obj, pager);
880
881 /* find the compressor slot for that page */
22ba694c 882 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 883
5ba3f43e 884 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
885 /* out of range */
886 kr = KERN_MEMORY_FAILURE;
887 } else if (slot_p == NULL || *slot_p == 0) {
888 /* compressor does not have this page */
889 kr = KERN_MEMORY_ERROR;
890 } else {
891 /* compressor does have this page */
892 kr = KERN_SUCCESS;
893 }
894 *my_fault_type = DBG_COMPRESSOR_FAULT;
d9a64523 895
0a7de745
A
896 if (kr == KERN_SUCCESS) {
897 int retval;
d9a64523 898
39236c6e 899 /* get the page from the compressor */
fe8ab488 900 retval = vm_compressor_get(ppnum, slot_p, flags);
0a7de745 901 if (retval == -1) {
39236c6e 902 kr = KERN_MEMORY_FAILURE;
0a7de745 903 } else if (retval == 1) {
39236c6e 904 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
0a7de745 905 } else if (retval == -2) {
39236c6e
A
906 assert((flags & C_DONT_BLOCK));
907 kr = KERN_FAILURE;
908 }
909 }
fe8ab488
A
910
911 if (kr == KERN_SUCCESS) {
912 assert(slot_p != NULL);
913 if (*slot_p != 0) {
914 /*
915 * We got the page for a copy-on-write fault
916 * and we kept the original in place. Slot
917 * is still occupied.
918 */
919 } else {
fe8ab488
A
920 *compressed_count_delta_p -= 1;
921 }
922 }
923
39236c6e
A
924 return kr;
925}
926
fe8ab488 927unsigned int
39236c6e 928vm_compressor_pager_state_clr(
0a7de745
A
929 memory_object_t mem_obj,
930 memory_object_offset_t offset)
39236c6e 931{
0a7de745
A
932 compressor_pager_t pager;
933 compressor_slot_t *slot_p;
934 unsigned int num_slots_freed;
935
39037602
A
936 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
937
39236c6e
A
938 compressor_pager_stats.state_clr++;
939
0a7de745 940 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
941 /* overflow */
942 panic("%s: offset 0x%llx overflow\n",
0a7de745 943 __FUNCTION__, (uint64_t) offset);
fe8ab488 944 return 0;
22ba694c 945 }
39236c6e
A
946
947 compressor_pager_lookup(mem_obj, pager);
948
949 /* find the compressor slot for that page */
22ba694c 950 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 951
fe8ab488 952 num_slots_freed = 0;
39236c6e 953 if (slot_p && *slot_p != 0) {
fe8ab488
A
954 vm_compressor_free(slot_p, 0);
955 num_slots_freed++;
956 assert(*slot_p == 0);
39236c6e 957 }
fe8ab488
A
958
959 return num_slots_freed;
39236c6e
A
960}
961
962vm_external_state_t
963vm_compressor_pager_state_get(
0a7de745
A
964 memory_object_t mem_obj,
965 memory_object_offset_t offset)
39236c6e 966{
0a7de745
A
967 compressor_pager_t pager;
968 compressor_slot_t *slot_p;
39037602
A
969
970 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
0a7de745 971
39236c6e
A
972 compressor_pager_stats.state_get++;
973
0a7de745 974 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
975 /* overflow */
976 panic("%s: offset 0x%llx overflow\n",
0a7de745 977 __FUNCTION__, (uint64_t) offset);
22ba694c
A
978 return VM_EXTERNAL_STATE_ABSENT;
979 }
39236c6e
A
980
981 compressor_pager_lookup(mem_obj, pager);
982
983 /* find the compressor slot for that page */
22ba694c 984 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 985
5ba3f43e 986 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
987 /* out of range */
988 return VM_EXTERNAL_STATE_ABSENT;
989 } else if (slot_p == NULL || *slot_p == 0) {
990 /* compressor does not have this page */
991 return VM_EXTERNAL_STATE_ABSENT;
992 } else {
993 /* compressor does have this page */
994 return VM_EXTERNAL_STATE_EXISTS;
995 }
996}
fe8ab488
A
997
998unsigned int
999vm_compressor_pager_reap_pages(
0a7de745
A
1000 memory_object_t mem_obj,
1001 int flags)
fe8ab488 1002{
0a7de745
A
1003 compressor_pager_t pager;
1004 int num_chunks;
1005 int failures;
1006 int i;
1007 compressor_slot_t *chunk;
1008 unsigned int num_slots_freed;
fe8ab488
A
1009
1010 compressor_pager_lookup(mem_obj, pager);
0a7de745 1011 if (pager == NULL) {
fe8ab488 1012 return 0;
0a7de745 1013 }
fe8ab488
A
1014
1015 compressor_pager_lock(pager);
1016
1017 /* reap the compressor slots */
1018 num_slots_freed = 0;
1019
0a7de745 1020 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
fe8ab488
A
1021 if (num_chunks > 1) {
1022 /* we have an array of chunks */
1023 for (i = 0; i < num_chunks; i++) {
1024 chunk = pager->cpgr_slots.cpgr_islots[i];
1025 if (chunk != NULL) {
1026 num_slots_freed +=
0a7de745
A
1027 compressor_pager_slots_chunk_free(
1028 chunk,
1029 COMPRESSOR_SLOTS_PER_CHUNK,
1030 flags,
1031 &failures);
fe8ab488
A
1032 if (failures == 0) {
1033 pager->cpgr_slots.cpgr_islots[i] = NULL;
f427ee49 1034 zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
fe8ab488
A
1035 }
1036 }
1037 }
3e170ce0 1038 } else if (pager->cpgr_num_slots > 2) {
fe8ab488
A
1039 chunk = pager->cpgr_slots.cpgr_dslots;
1040 num_slots_freed +=
0a7de745
A
1041 compressor_pager_slots_chunk_free(
1042 chunk,
1043 pager->cpgr_num_slots,
1044 flags,
1045 NULL);
3e170ce0
A
1046 } else {
1047 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1048 num_slots_freed +=
0a7de745
A
1049 compressor_pager_slots_chunk_free(
1050 chunk,
1051 pager->cpgr_num_slots,
1052 flags,
1053 NULL);
fe8ab488 1054 }
fe8ab488
A
1055
1056 compressor_pager_unlock(pager);
1057
1058 return num_slots_freed;
1059}
1060
fe8ab488
A
1061void
1062vm_compressor_pager_transfer(
0a7de745
A
1063 memory_object_t dst_mem_obj,
1064 memory_object_offset_t dst_offset,
1065 memory_object_t src_mem_obj,
1066 memory_object_offset_t src_offset)
fe8ab488 1067{
0a7de745
A
1068 compressor_pager_t src_pager, dst_pager;
1069 compressor_slot_t *src_slot_p, *dst_slot_p;
1070
fe8ab488
A
1071 compressor_pager_stats.transfer++;
1072
1073 /* find the compressor slot for the destination */
1074 assert((uint32_t) dst_offset == dst_offset);
1075 compressor_pager_lookup(dst_mem_obj, dst_pager);
5ba3f43e 1076 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
fe8ab488 1077 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
0a7de745 1078 &dst_slot_p);
fe8ab488
A
1079 assert(dst_slot_p != NULL);
1080 assert(*dst_slot_p == 0);
1081
1082 /* find the compressor slot for the source */
1083 assert((uint32_t) src_offset == src_offset);
1084 compressor_pager_lookup(src_mem_obj, src_pager);
5ba3f43e 1085 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
fe8ab488 1086 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
0a7de745 1087 &src_slot_p);
fe8ab488
A
1088 assert(src_slot_p != NULL);
1089 assert(*src_slot_p != 0);
1090
1091 /* transfer the slot from source to destination */
1092 vm_compressor_transfer(dst_slot_p, src_slot_p);
fe8ab488
A
1093 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1094 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1095}
1096
1097memory_object_offset_t
1098vm_compressor_pager_next_compressed(
0a7de745
A
1099 memory_object_t mem_obj,
1100 memory_object_offset_t offset)
fe8ab488 1101{
0a7de745
A
1102 compressor_pager_t pager;
1103 uint32_t num_chunks;
1104 uint32_t page_num;
1105 uint32_t chunk_idx;
1106 uint32_t slot_idx;
1107 compressor_slot_t *chunk;
fe8ab488
A
1108
1109 compressor_pager_lookup(mem_obj, pager);
1110
1111 page_num = (uint32_t)(offset / PAGE_SIZE);
0a7de745 1112 if (page_num != (offset / PAGE_SIZE)) {
fe8ab488
A
1113 /* overflow */
1114 return (memory_object_offset_t) -1;
1115 }
5ba3f43e 1116 if (page_num >= pager->cpgr_num_slots) {
fe8ab488
A
1117 /* out of range */
1118 return (memory_object_offset_t) -1;
1119 }
3e170ce0 1120
fe8ab488 1121 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
0a7de745 1122 COMPRESSOR_SLOTS_PER_CHUNK);
fe8ab488
A
1123
1124 if (num_chunks == 1) {
3e170ce0
A
1125 if (pager->cpgr_num_slots > 2) {
1126 chunk = pager->cpgr_slots.cpgr_dslots;
1127 } else {
1128 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1129 }
fe8ab488 1130 for (slot_idx = page_num;
0a7de745
A
1131 slot_idx < pager->cpgr_num_slots;
1132 slot_idx++) {
fe8ab488
A
1133 if (chunk[slot_idx] != 0) {
1134 /* found a non-NULL slot in this chunk */
1135 return (memory_object_offset_t) (slot_idx *
0a7de745 1136 PAGE_SIZE);
fe8ab488
A
1137 }
1138 }
1139 return (memory_object_offset_t) -1;
1140 }
1141
1142 /* we have an array of chunks; find the next non-NULL chunk */
1143 chunk = NULL;
1144 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
0a7de745
A
1145 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1146 chunk_idx < num_chunks;
1147 chunk_idx++,
1148 slot_idx = 0) {
fe8ab488
A
1149 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1150 if (chunk == NULL) {
1151 /* no chunk here: try the next one */
1152 continue;
1153 }
1154 /* search for an occupied slot in this chunk */
1155 for (;
0a7de745
A
1156 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1157 slot_idx++) {
fe8ab488
A
1158 if (chunk[slot_idx] != 0) {
1159 /* found an occupied slot in this chunk */
1160 uint32_t next_slot;
1161
1162 next_slot = ((chunk_idx *
0a7de745
A
1163 COMPRESSOR_SLOTS_PER_CHUNK) +
1164 slot_idx);
5ba3f43e 1165 if (next_slot >= pager->cpgr_num_slots) {
fe8ab488
A
1166 /* went beyond end of object */
1167 return (memory_object_offset_t) -1;
1168 }
1169 return (memory_object_offset_t) (next_slot *
0a7de745 1170 PAGE_SIZE);
fe8ab488
A
1171 }
1172 }
1173 }
1174 return (memory_object_offset_t) -1;
1175}
1176
1177unsigned int
1178vm_compressor_pager_get_count(
1179 memory_object_t mem_obj)
1180{
0a7de745 1181 compressor_pager_t pager;
fe8ab488
A
1182
1183 compressor_pager_lookup(mem_obj, pager);
0a7de745 1184 if (pager == NULL) {
fe8ab488 1185 return 0;
0a7de745 1186 }
fe8ab488
A
1187
1188 /*
1189 * The caller should have the VM object locked and one
1190 * needs that lock to do a page-in or page-out, so no
1191 * need to lock the pager here.
1192 */
1193 assert(pager->cpgr_num_slots_occupied >= 0);
1194
1195 return pager->cpgr_num_slots_occupied;
1196}
1197
1198void
1199vm_compressor_pager_count(
0a7de745
A
1200 memory_object_t mem_obj,
1201 int compressed_count_delta,
1202 boolean_t shared_lock,
1203 vm_object_t object __unused)
fe8ab488 1204{
0a7de745 1205 compressor_pager_t pager;
fe8ab488
A
1206
1207 if (compressed_count_delta == 0) {
1208 return;
1209 }
1210
1211 compressor_pager_lookup(mem_obj, pager);
0a7de745 1212 if (pager == NULL) {
fe8ab488 1213 return;
0a7de745 1214 }
fe8ab488
A
1215
1216 if (compressed_count_delta < 0) {
1217 assert(pager->cpgr_num_slots_occupied >=
0a7de745 1218 (unsigned int) -compressed_count_delta);
fe8ab488
A
1219 }
1220
1221 /*
1222 * The caller should have the VM object locked,
1223 * shared or exclusive.
1224 */
1225 if (shared_lock) {
1226 vm_object_lock_assert_shared(object);
1227 OSAddAtomic(compressed_count_delta,
0a7de745 1228 &pager->cpgr_num_slots_occupied);
fe8ab488
A
1229 } else {
1230 vm_object_lock_assert_exclusive(object);
1231 pager->cpgr_num_slots_occupied += compressed_count_delta;
1232 }
1233}
3e170ce0
A
1234
1235#if CONFIG_FREEZE
1236kern_return_t
1237vm_compressor_pager_relocate(
0a7de745
A
1238 memory_object_t mem_obj,
1239 memory_object_offset_t offset,
1240 void **current_chead)
3e170ce0
A
1241{
1242 /*
1243 * Has the page at this offset been compressed?
1244 */
1245
1246 compressor_slot_t *slot_p;
1247 compressor_pager_t dst_pager;
1248
1249 assert(mem_obj);
0a7de745 1250
3e170ce0 1251 compressor_pager_lookup(mem_obj, dst_pager);
0a7de745 1252 if (dst_pager == NULL) {
3e170ce0 1253 return KERN_FAILURE;
0a7de745 1254 }
3e170ce0
A
1255
1256 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
0a7de745 1257 return vm_compressor_relocate(current_chead, slot_p);
3e170ce0
A
1258}
1259#endif /* CONFIG_FREEZE */
f427ee49
A
1260
1261#if DEVELOPMENT || DEBUG
1262
1263kern_return_t
1264vm_compressor_pager_inject_error(memory_object_t mem_obj,
1265 memory_object_offset_t offset)
1266{
1267 kern_return_t result = KERN_FAILURE;
1268 compressor_slot_t *slot_p;
1269 compressor_pager_t pager;
1270
1271 assert(mem_obj);
1272
1273 compressor_pager_lookup(mem_obj, pager);
1274 if (pager != NULL) {
1275 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
1276 if (slot_p != NULL && *slot_p != 0) {
1277 vm_compressor_inject_error(slot_p);
1278 result = KERN_SUCCESS;
1279 }
1280 }
1281
1282 return result;
1283}
1284
1285#endif