]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor_pager.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
CommitLineData
39236c6e 1/*
cb323159 2 * Copyright (c) 2019 Apple Inc. All rights reserved.
39236c6e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
39236c6e
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
39236c6e
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
39236c6e
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
39236c6e
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
39236c6e
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
39236c6e
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
39236c6e
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
39236c6e 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
39236c6e
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
39236c6e
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62#include <kern/host_statistics.h>
63#include <kern/kalloc.h>
39037602 64#include <kern/ipc_kobject.h>
39236c6e 65
cb323159
A
66#include <machine/atomic.h>
67
39236c6e
A
68#include <mach/memory_object_control.h>
69#include <mach/memory_object_types.h>
39236c6e
A
70#include <mach/upl.h>
71
72#include <vm/memory_object.h>
73#include <vm/vm_compressor_pager.h>
74#include <vm/vm_external.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_protos.h>
77
78/* memory_object interfaces */
79void compressor_memory_object_reference(memory_object_t mem_obj);
80void compressor_memory_object_deallocate(memory_object_t mem_obj);
81kern_return_t compressor_memory_object_init(
0a7de745
A
82 memory_object_t mem_obj,
83 memory_object_control_t control,
39236c6e
A
84 memory_object_cluster_size_t pager_page_size);
85kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
86kern_return_t compressor_memory_object_data_request(
0a7de745
A
87 memory_object_t mem_obj,
88 memory_object_offset_t offset,
89 memory_object_cluster_size_t length,
90 __unused vm_prot_t protection_required,
91 memory_object_fault_info_t fault_info);
39236c6e 92kern_return_t compressor_memory_object_data_return(
0a7de745
A
93 memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t size,
96 __unused memory_object_offset_t *resid_offset,
97 __unused int *io_error,
98 __unused boolean_t dirty,
99 __unused boolean_t kernel_copy,
100 __unused int upl_flags);
39236c6e 101kern_return_t compressor_memory_object_data_initialize(
0a7de745
A
102 memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t size);
39236c6e 105kern_return_t compressor_memory_object_data_unlock(
0a7de745
A
106 __unused memory_object_t mem_obj,
107 __unused memory_object_offset_t offset,
108 __unused memory_object_size_t size,
109 __unused vm_prot_t desired_access);
39236c6e 110kern_return_t compressor_memory_object_synchronize(
0a7de745
A
111 memory_object_t mem_obj,
112 memory_object_offset_t offset,
113 memory_object_size_t length,
114 __unused vm_sync_t flags);
39236c6e 115kern_return_t compressor_memory_object_map(
0a7de745
A
116 __unused memory_object_t mem_obj,
117 __unused vm_prot_t prot);
39236c6e
A
118kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
119kern_return_t compressor_memory_object_data_reclaim(
0a7de745
A
120 __unused memory_object_t mem_obj,
121 __unused boolean_t reclaim_backing_store);
39236c6e
A
122
123const struct memory_object_pager_ops compressor_pager_ops = {
cb323159
A
124 .memory_object_reference = compressor_memory_object_reference,
125 .memory_object_deallocate = compressor_memory_object_deallocate,
126 .memory_object_init = compressor_memory_object_init,
127 .memory_object_terminate = compressor_memory_object_terminate,
128 .memory_object_data_request = compressor_memory_object_data_request,
129 .memory_object_data_return = compressor_memory_object_data_return,
130 .memory_object_data_initialize = compressor_memory_object_data_initialize,
131 .memory_object_data_unlock = compressor_memory_object_data_unlock,
132 .memory_object_synchronize = compressor_memory_object_synchronize,
133 .memory_object_map = compressor_memory_object_map,
134 .memory_object_last_unmap = compressor_memory_object_last_unmap,
135 .memory_object_data_reclaim = compressor_memory_object_data_reclaim,
136 .memory_object_pager_name = "compressor pager"
39236c6e
A
137};
138
139/* internal data structures */
140
141struct {
0a7de745
A
142 uint64_t data_returns;
143 uint64_t data_requests;
144 uint64_t put;
145 uint64_t get;
146 uint64_t state_clr;
147 uint64_t state_get;
148 uint64_t transfer;
39236c6e
A
149} compressor_pager_stats;
150
151typedef int compressor_slot_t;
152
153typedef struct compressor_pager {
5ba3f43e
A
154 /* mandatory generic header */
155 struct memory_object cpgr_hdr;
39236c6e 156
5ba3f43e 157 /* pager-specific data */
0a7de745
A
158 lck_mtx_t cpgr_lock;
159 unsigned int cpgr_references;
160 unsigned int cpgr_num_slots;
161 unsigned int cpgr_num_slots_occupied;
39236c6e 162 union {
0a7de745
A
163 compressor_slot_t cpgr_eslots[2]; /* embedded slots */
164 compressor_slot_t *cpgr_dslots; /* direct slots */
165 compressor_slot_t **cpgr_islots; /* indirect slots */
39236c6e
A
166 } cpgr_slots;
167} *compressor_pager_t;
168
0a7de745
A
169#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
170 MACRO_BEGIN \
171 if (_mem_obj_ == NULL || \
172 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
173 _cpgr_ = NULL; \
174 } else { \
175 _cpgr_ = (compressor_pager_t) _mem_obj_; \
176 } \
39236c6e
A
177 MACRO_END
178
179zone_t compressor_pager_zone;
180
0a7de745
A
181lck_grp_t compressor_pager_lck_grp;
182lck_grp_attr_t compressor_pager_lck_grp_attr;
183lck_attr_t compressor_pager_lck_attr;
39236c6e
A
184
185#define compressor_pager_lock(_cpgr_) \
186 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
187#define compressor_pager_unlock(_cpgr_) \
188 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
189#define compressor_pager_lock_init(_cpgr_) \
190 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
191#define compressor_pager_lock_destroy(_cpgr_) \
192 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
193
0a7de745
A
194#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
195#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
39236c6e
A
196
197/* forward declarations */
fe8ab488 198unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
0a7de745
A
199 int num_slots,
200 int flags,
201 int *failures);
39236c6e 202void compressor_pager_slot_lookup(
0a7de745
A
203 compressor_pager_t pager,
204 boolean_t do_alloc,
205 memory_object_offset_t offset,
206 compressor_slot_t **slot_pp);
39236c6e
A
207
208kern_return_t
209compressor_memory_object_init(
0a7de745
A
210 memory_object_t mem_obj,
211 memory_object_control_t control,
39236c6e
A
212 __unused memory_object_cluster_size_t pager_page_size)
213{
0a7de745 214 compressor_pager_t pager;
39236c6e
A
215
216 assert(pager_page_size == PAGE_SIZE);
217
218 memory_object_control_reference(control);
219
220 compressor_pager_lookup(mem_obj, pager);
221 compressor_pager_lock(pager);
222
0a7de745 223 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
39236c6e 224 panic("compressor_memory_object_init: bad request");
0a7de745 225 }
5ba3f43e 226 pager->cpgr_hdr.mo_control = control;
39236c6e
A
227
228 compressor_pager_unlock(pager);
229
230 return KERN_SUCCESS;
231}
232
233kern_return_t
234compressor_memory_object_synchronize(
5ba3f43e 235 __unused memory_object_t mem_obj,
0a7de745
A
236 __unused memory_object_offset_t offset,
237 __unused memory_object_size_t length,
238 __unused vm_sync_t flags)
39236c6e 239{
5ba3f43e
A
240 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
241 return KERN_FAILURE;
39236c6e
A
242}
243
244kern_return_t
245compressor_memory_object_map(
0a7de745
A
246 __unused memory_object_t mem_obj,
247 __unused vm_prot_t prot)
39236c6e
A
248{
249 panic("compressor_memory_object_map");
250 return KERN_FAILURE;
251}
252
253kern_return_t
254compressor_memory_object_last_unmap(
0a7de745 255 __unused memory_object_t mem_obj)
39236c6e
A
256{
257 panic("compressor_memory_object_last_unmap");
258 return KERN_FAILURE;
259}
260
261kern_return_t
262compressor_memory_object_data_reclaim(
0a7de745
A
263 __unused memory_object_t mem_obj,
264 __unused boolean_t reclaim_backing_store)
39236c6e
A
265{
266 panic("compressor_memory_object_data_reclaim");
267 return KERN_FAILURE;
268}
269
270kern_return_t
271compressor_memory_object_terminate(
0a7de745 272 memory_object_t mem_obj)
39236c6e 273{
0a7de745
A
274 memory_object_control_t control;
275 compressor_pager_t pager;
39236c6e 276
0a7de745 277 /*
39236c6e
A
278 * control port is a receive right, not a send right.
279 */
280
281 compressor_pager_lookup(mem_obj, pager);
282 compressor_pager_lock(pager);
283
284 /*
285 * After memory_object_terminate both memory_object_init
286 * and a no-senders notification are possible, so we need
287 * to clean up our reference to the memory_object_control
288 * to prepare for a new init.
289 */
290
5ba3f43e
A
291 control = pager->cpgr_hdr.mo_control;
292 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
39236c6e
A
293
294 compressor_pager_unlock(pager);
295
296 /*
297 * Now we deallocate our reference on the control.
298 */
299 memory_object_control_deallocate(control);
300 return KERN_SUCCESS;
301}
302
303void
304compressor_memory_object_reference(
0a7de745 305 memory_object_t mem_obj)
39236c6e 306{
0a7de745 307 compressor_pager_t pager;
39236c6e
A
308
309 compressor_pager_lookup(mem_obj, pager);
0a7de745 310 if (pager == NULL) {
39236c6e 311 return;
0a7de745 312 }
39236c6e
A
313
314 compressor_pager_lock(pager);
315 assert(pager->cpgr_references > 0);
316 pager->cpgr_references++;
317 compressor_pager_unlock(pager);
318}
319
320void
321compressor_memory_object_deallocate(
0a7de745 322 memory_object_t mem_obj)
39236c6e 323{
0a7de745
A
324 compressor_pager_t pager;
325 unsigned int num_slots_freed;
39236c6e
A
326
327 /*
328 * Because we don't give out multiple first references
329 * for a memory object, there can't be a race
330 * between getting a deallocate call and creating
331 * a new reference for the object.
332 */
333
334 compressor_pager_lookup(mem_obj, pager);
0a7de745 335 if (pager == NULL) {
39236c6e 336 return;
0a7de745 337 }
39236c6e
A
338
339 compressor_pager_lock(pager);
340 if (--pager->cpgr_references > 0) {
341 compressor_pager_unlock(pager);
342 return;
343 }
344
345 /*
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
348 */
0a7de745 349 if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
39236c6e 350 panic("compressor_memory_object_deallocate(): bad request");
0a7de745 351 }
39236c6e
A
352
353 /*
354 * Unlock the pager (though there should be no one
355 * waiting for it).
356 */
357 compressor_pager_unlock(pager);
358
359 /* free the compressor slots */
360 int num_chunks;
361 int i;
362 compressor_slot_t *chunk;
363
0a7de745 364 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
39236c6e
A
365 if (num_chunks > 1) {
366 /* we have an array of chunks */
367 for (i = 0; i < num_chunks; i++) {
368 chunk = pager->cpgr_slots.cpgr_islots[i];
369 if (chunk != NULL) {
fe8ab488 370 num_slots_freed =
0a7de745
A
371 compressor_pager_slots_chunk_free(
372 chunk,
373 COMPRESSOR_SLOTS_PER_CHUNK,
374 0,
375 NULL);
39236c6e
A
376 pager->cpgr_slots.cpgr_islots[i] = NULL;
377 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
378 }
379 }
380 kfree(pager->cpgr_slots.cpgr_islots,
0a7de745 381 num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
39236c6e 382 pager->cpgr_slots.cpgr_islots = NULL;
3e170ce0 383 } else if (pager->cpgr_num_slots > 2) {
39236c6e 384 chunk = pager->cpgr_slots.cpgr_dslots;
fe8ab488 385 num_slots_freed =
0a7de745
A
386 compressor_pager_slots_chunk_free(
387 chunk,
388 pager->cpgr_num_slots,
389 0,
390 NULL);
39236c6e
A
391 pager->cpgr_slots.cpgr_dslots = NULL;
392 kfree(chunk,
0a7de745
A
393 (pager->cpgr_num_slots *
394 sizeof(pager->cpgr_slots.cpgr_dslots[0])));
3e170ce0
A
395 } else {
396 chunk = &pager->cpgr_slots.cpgr_eslots[0];
397 num_slots_freed =
0a7de745
A
398 compressor_pager_slots_chunk_free(
399 chunk,
400 pager->cpgr_num_slots,
401 0,
402 NULL);
39236c6e
A
403 }
404
405 compressor_pager_lock_destroy(pager);
406 zfree(compressor_pager_zone, pager);
407}
408
409kern_return_t
410compressor_memory_object_data_request(
0a7de745
A
411 memory_object_t mem_obj,
412 memory_object_offset_t offset,
413 memory_object_cluster_size_t length,
414 __unused vm_prot_t protection_required,
415 __unused memory_object_fault_info_t fault_info)
39236c6e 416{
0a7de745
A
417 compressor_pager_t pager;
418 kern_return_t kr;
419 compressor_slot_t *slot_p;
420
39236c6e
A
421 compressor_pager_stats.data_requests++;
422
423 /*
424 * Request must be on a page boundary and a multiple of pages.
425 */
0a7de745 426 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
39236c6e 427 panic("compressor_memory_object_data_request(): bad alignment");
0a7de745 428 }
39236c6e 429
0a7de745 430 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c 431 panic("%s: offset 0x%llx overflow\n",
0a7de745 432 __FUNCTION__, (uint64_t) offset);
22ba694c
A
433 return KERN_FAILURE;
434 }
39236c6e
A
435
436 compressor_pager_lookup(mem_obj, pager);
437
438 if (length == 0) {
439 /* we're only querying the pager for this page */
440 } else {
441 panic("compressor: data_request");
442 }
443
444 /* find the compressor slot for that page */
22ba694c 445 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 446
5ba3f43e 447 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
448 /* out of range */
449 kr = KERN_FAILURE;
450 } else if (slot_p == NULL || *slot_p == 0) {
451 /* compressor does not have this page */
452 kr = KERN_FAILURE;
453 } else {
454 /* compressor does have this page */
455 kr = KERN_SUCCESS;
456 }
457 return kr;
458}
459
460/*
461 * memory_object_data_initialize: check whether we already have each page, and
462 * write it if we do not. The implementation is far from optimized, and
463 * also assumes that the default_pager is single-threaded.
464 */
465/* It is questionable whether or not a pager should decide what is relevant */
466/* and what is not in data sent from the kernel. Data initialize has been */
467/* changed to copy back all data sent to it in preparation for its eventual */
468/* merge with data return. It is the kernel that should decide what pages */
469/* to write back. As of the writing of this note, this is indeed the case */
470/* the kernel writes back one page at a time through this interface */
471
472kern_return_t
473compressor_memory_object_data_initialize(
0a7de745
A
474 memory_object_t mem_obj,
475 memory_object_offset_t offset,
476 memory_object_cluster_size_t size)
39236c6e 477{
0a7de745
A
478 compressor_pager_t pager;
479 memory_object_offset_t cur_offset;
39236c6e
A
480
481 compressor_pager_lookup(mem_obj, pager);
482 compressor_pager_lock(pager);
483
484 for (cur_offset = offset;
0a7de745
A
485 cur_offset < offset + size;
486 cur_offset += PAGE_SIZE) {
39236c6e
A
487 panic("do a data_return() if slot for this page is empty");
488 }
489
490 compressor_pager_unlock(pager);
491
492 return KERN_SUCCESS;
493}
494
495kern_return_t
496compressor_memory_object_data_unlock(
0a7de745
A
497 __unused memory_object_t mem_obj,
498 __unused memory_object_offset_t offset,
499 __unused memory_object_size_t size,
500 __unused vm_prot_t desired_access)
39236c6e
A
501{
502 panic("compressor_memory_object_data_unlock()");
503 return KERN_FAILURE;
504}
505
506
507/*ARGSUSED*/
508kern_return_t
509compressor_memory_object_data_return(
0a7de745
A
510 __unused memory_object_t mem_obj,
511 __unused memory_object_offset_t offset,
512 __unused memory_object_cluster_size_t size,
513 __unused memory_object_offset_t *resid_offset,
514 __unused int *io_error,
515 __unused boolean_t dirty,
516 __unused boolean_t kernel_copy,
517 __unused int upl_flags)
39236c6e
A
518{
519 panic("compressor: data_return");
520 return KERN_FAILURE;
521}
522
523/*
524 * Routine: default_pager_memory_object_create
525 * Purpose:
0a7de745
A
526 * Handle requests for memory objects from the
527 * kernel.
39236c6e 528 * Notes:
0a7de745
A
529 * Because we only give out the default memory
530 * manager port to the kernel, we don't have to
531 * be so paranoid about the contents.
39236c6e
A
532 */
533kern_return_t
534compressor_memory_object_create(
0a7de745
A
535 memory_object_size_t new_size,
536 memory_object_t *new_mem_obj)
39236c6e 537{
0a7de745
A
538 compressor_pager_t pager;
539 int num_chunks;
39236c6e 540
0a7de745 541 if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) {
22ba694c
A
542 /* 32-bit overflow for number of pages */
543 panic("%s: size 0x%llx overflow\n",
0a7de745 544 __FUNCTION__, (uint64_t) new_size);
39236c6e
A
545 return KERN_INVALID_ARGUMENT;
546 }
547
548 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
549 if (pager == NULL) {
550 return KERN_RESOURCE_SHORTAGE;
551 }
552
553 compressor_pager_lock_init(pager);
39236c6e 554 pager->cpgr_references = 1;
0a7de745 555 pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE);
fe8ab488 556 pager->cpgr_num_slots_occupied = 0;
39236c6e
A
557
558 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
559 if (num_chunks > 1) {
0a7de745
A
560 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
561 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]));
3e170ce0 562 } else if (pager->cpgr_num_slots > 2) {
0a7de745
A
563 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
564 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]));
3e170ce0
A
565 } else {
566 pager->cpgr_slots.cpgr_eslots[0] = 0;
567 pager->cpgr_slots.cpgr_eslots[1] = 0;
39236c6e
A
568 }
569
570 /*
571 * Set up associations between this memory object
572 * and this compressor_pager structure
573 */
5ba3f43e
A
574 pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
575 pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops;
576 pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
39236c6e
A
577
578 *new_mem_obj = (memory_object_t) pager;
579 return KERN_SUCCESS;
580}
581
582
fe8ab488 583unsigned int
39236c6e 584compressor_pager_slots_chunk_free(
0a7de745
A
585 compressor_slot_t *chunk,
586 int num_slots,
587 int flags,
588 int *failures)
39236c6e 589{
39236c6e 590 int i;
3e170ce0 591 int retval;
fe8ab488
A
592 unsigned int num_slots_freed;
593
0a7de745 594 if (failures) {
fe8ab488 595 *failures = 0;
0a7de745 596 }
fe8ab488 597 num_slots_freed = 0;
39236c6e
A
598 for (i = 0; i < num_slots; i++) {
599 if (chunk[i] != 0) {
3e170ce0
A
600 retval = vm_compressor_free(&chunk[i], flags);
601
0a7de745 602 if (retval == 0) {
fe8ab488 603 num_slots_freed++;
0a7de745
A
604 } else {
605 if (retval == -2) {
3e170ce0 606 assert(flags & C_DONT_BLOCK);
0a7de745 607 }
fe8ab488 608
0a7de745 609 if (failures) {
fe8ab488 610 *failures += 1;
0a7de745 611 }
fe8ab488 612 }
39236c6e
A
613 }
614 }
fe8ab488 615 return num_slots_freed;
39236c6e
A
616}
617
618void
619compressor_pager_slot_lookup(
0a7de745
A
620 compressor_pager_t pager,
621 boolean_t do_alloc,
622 memory_object_offset_t offset,
623 compressor_slot_t **slot_pp)
39236c6e 624{
0a7de745
A
625 int num_chunks;
626 uint32_t page_num;
627 int chunk_idx;
628 int slot_idx;
629 compressor_slot_t *chunk;
630 compressor_slot_t *t_chunk;
631
632 page_num = (uint32_t)(offset / PAGE_SIZE);
633 if (page_num != (offset / PAGE_SIZE)) {
22ba694c
A
634 /* overflow */
635 panic("%s: offset 0x%llx overflow\n",
0a7de745 636 __FUNCTION__, (uint64_t) offset);
22ba694c
A
637 *slot_pp = NULL;
638 return;
639 }
5ba3f43e 640 if (page_num >= pager->cpgr_num_slots) {
39236c6e
A
641 /* out of range */
642 *slot_pp = NULL;
643 return;
644 }
645 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
646 if (num_chunks > 1) {
647 /* we have an array of chunks */
648 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
649 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
650
651 if (chunk == NULL && do_alloc) {
652 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
653 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
654
655 compressor_pager_lock(pager);
656
657 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
cc8bc92a
A
658 /*
659 * On some platforms, the memory stores from
660 * the bzero(t_chunk) above might not have been
661 * made visible and another thread might see
662 * the contents of this new chunk before it's
663 * been fully zero-filled.
664 * This memory barrier should take care of this
665 * according to the platform requirements.
666 */
cb323159 667 os_atomic_thread_fence(release);
cc8bc92a 668
39236c6e
A
669 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
670 t_chunk = NULL;
671 }
672 compressor_pager_unlock(pager);
0a7de745
A
673
674 if (t_chunk) {
39236c6e 675 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
0a7de745 676 }
39236c6e
A
677 }
678 if (chunk == NULL) {
679 *slot_pp = NULL;
680 } else {
681 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
682 *slot_pp = &chunk[slot_idx];
683 }
3e170ce0 684 } else if (pager->cpgr_num_slots > 2) {
39236c6e
A
685 slot_idx = page_num;
686 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
3e170ce0
A
687 } else {
688 slot_idx = page_num;
689 *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx];
39236c6e
A
690 }
691}
692
693void
694vm_compressor_pager_init(void)
695{
696 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
697 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
698 lck_attr_setdefault(&compressor_pager_lck_attr);
699
0a7de745
A
700 compressor_pager_zone = zinit(sizeof(struct compressor_pager),
701 10000 * sizeof(struct compressor_pager),
702 8192, "compressor_pager");
39236c6e
A
703 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
704 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
705
706 vm_compressor_init();
707}
708
709kern_return_t
710vm_compressor_pager_put(
0a7de745
A
711 memory_object_t mem_obj,
712 memory_object_offset_t offset,
713 ppnum_t ppnum,
714 void **current_chead,
715 char *scratch_buf,
716 int *compressed_count_delta_p)
39236c6e 717{
0a7de745
A
718 compressor_pager_t pager;
719 compressor_slot_t *slot_p;
39236c6e 720
fe8ab488
A
721 compressor_pager_stats.put++;
722
723 *compressed_count_delta_p = 0;
39236c6e
A
724
725 /* This routine is called by the pageout thread. The pageout thread */
726 /* cannot be blocked by read activities unless the read activities */
727 /* Therefore the grant of vs lock must be done on a try versus a */
728 /* blocking basis. The code below relies on the fact that the */
729 /* interface is synchronous. Should this interface be again async */
730 /* for some type of pager in the future the pages will have to be */
731 /* returned through a separate, asynchronous path. */
732
733 compressor_pager_lookup(mem_obj, pager);
734
0a7de745 735 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
736 /* overflow */
737 panic("%s: offset 0x%llx overflow\n",
0a7de745 738 __FUNCTION__, (uint64_t) offset);
22ba694c
A
739 return KERN_RESOURCE_SHORTAGE;
740 }
39236c6e 741
22ba694c 742 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
39236c6e
A
743
744 if (slot_p == NULL) {
745 /* out of range ? */
fe8ab488 746 panic("vm_compressor_pager_put: out of range");
39236c6e
A
747 }
748 if (*slot_p != 0) {
749 /*
750 * Already compressed: forget about the old one.
751 *
752 * This can happen after a vm_object_do_collapse() when
753 * the "backing_object" had some pages paged out and the
754 * "object" had an equivalent page resident.
755 */
fe8ab488 756 vm_compressor_free(slot_p, 0);
fe8ab488 757 *compressed_count_delta_p -= 1;
39236c6e 758 }
d9a64523 759
d9a64523
A
760 /*
761 * If the compressor operation succeeds, we presumably don't need to
762 * undo any previous WIMG update, as all live mappings should be
763 * disconnected.
764 */
765
766 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
0a7de745 767 return KERN_RESOURCE_SHORTAGE;
e8c3f781 768 }
fe8ab488 769 *compressed_count_delta_p += 1;
39236c6e 770
d9a64523 771 return KERN_SUCCESS;
39236c6e
A
772}
773
774
775kern_return_t
776vm_compressor_pager_get(
0a7de745
A
777 memory_object_t mem_obj,
778 memory_object_offset_t offset,
779 ppnum_t ppnum,
780 int *my_fault_type,
781 int flags,
782 int *compressed_count_delta_p)
39236c6e 783{
0a7de745
A
784 compressor_pager_t pager;
785 kern_return_t kr;
786 compressor_slot_t *slot_p;
787
fe8ab488
A
788 compressor_pager_stats.get++;
789
790 *compressed_count_delta_p = 0;
39236c6e 791
0a7de745 792 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c 793 panic("%s: offset 0x%llx overflow\n",
0a7de745 794 __FUNCTION__, (uint64_t) offset);
22ba694c
A
795 return KERN_MEMORY_ERROR;
796 }
39236c6e
A
797
798 compressor_pager_lookup(mem_obj, pager);
799
800 /* find the compressor slot for that page */
22ba694c 801 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 802
5ba3f43e 803 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
804 /* out of range */
805 kr = KERN_MEMORY_FAILURE;
806 } else if (slot_p == NULL || *slot_p == 0) {
807 /* compressor does not have this page */
808 kr = KERN_MEMORY_ERROR;
809 } else {
810 /* compressor does have this page */
811 kr = KERN_SUCCESS;
812 }
813 *my_fault_type = DBG_COMPRESSOR_FAULT;
d9a64523 814
0a7de745
A
815 if (kr == KERN_SUCCESS) {
816 int retval;
d9a64523 817
39236c6e 818 /* get the page from the compressor */
fe8ab488 819 retval = vm_compressor_get(ppnum, slot_p, flags);
0a7de745 820 if (retval == -1) {
39236c6e 821 kr = KERN_MEMORY_FAILURE;
0a7de745 822 } else if (retval == 1) {
39236c6e 823 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
0a7de745 824 } else if (retval == -2) {
39236c6e
A
825 assert((flags & C_DONT_BLOCK));
826 kr = KERN_FAILURE;
827 }
828 }
fe8ab488
A
829
830 if (kr == KERN_SUCCESS) {
831 assert(slot_p != NULL);
832 if (*slot_p != 0) {
833 /*
834 * We got the page for a copy-on-write fault
835 * and we kept the original in place. Slot
836 * is still occupied.
837 */
838 } else {
fe8ab488
A
839 *compressed_count_delta_p -= 1;
840 }
841 }
842
39236c6e
A
843 return kr;
844}
845
fe8ab488 846unsigned int
39236c6e 847vm_compressor_pager_state_clr(
0a7de745
A
848 memory_object_t mem_obj,
849 memory_object_offset_t offset)
39236c6e 850{
0a7de745
A
851 compressor_pager_t pager;
852 compressor_slot_t *slot_p;
853 unsigned int num_slots_freed;
854
39037602
A
855 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
856
39236c6e
A
857 compressor_pager_stats.state_clr++;
858
0a7de745 859 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
860 /* overflow */
861 panic("%s: offset 0x%llx overflow\n",
0a7de745 862 __FUNCTION__, (uint64_t) offset);
fe8ab488 863 return 0;
22ba694c 864 }
39236c6e
A
865
866 compressor_pager_lookup(mem_obj, pager);
867
868 /* find the compressor slot for that page */
22ba694c 869 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 870
fe8ab488 871 num_slots_freed = 0;
39236c6e 872 if (slot_p && *slot_p != 0) {
fe8ab488
A
873 vm_compressor_free(slot_p, 0);
874 num_slots_freed++;
875 assert(*slot_p == 0);
39236c6e 876 }
fe8ab488
A
877
878 return num_slots_freed;
39236c6e
A
879}
880
881vm_external_state_t
882vm_compressor_pager_state_get(
0a7de745
A
883 memory_object_t mem_obj,
884 memory_object_offset_t offset)
39236c6e 885{
0a7de745
A
886 compressor_pager_t pager;
887 compressor_slot_t *slot_p;
39037602
A
888
889 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
0a7de745 890
39236c6e
A
891 compressor_pager_stats.state_get++;
892
0a7de745 893 if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) {
22ba694c
A
894 /* overflow */
895 panic("%s: offset 0x%llx overflow\n",
0a7de745 896 __FUNCTION__, (uint64_t) offset);
22ba694c
A
897 return VM_EXTERNAL_STATE_ABSENT;
898 }
39236c6e
A
899
900 compressor_pager_lookup(mem_obj, pager);
901
902 /* find the compressor slot for that page */
22ba694c 903 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 904
5ba3f43e 905 if (offset / PAGE_SIZE >= pager->cpgr_num_slots) {
39236c6e
A
906 /* out of range */
907 return VM_EXTERNAL_STATE_ABSENT;
908 } else if (slot_p == NULL || *slot_p == 0) {
909 /* compressor does not have this page */
910 return VM_EXTERNAL_STATE_ABSENT;
911 } else {
912 /* compressor does have this page */
913 return VM_EXTERNAL_STATE_EXISTS;
914 }
915}
fe8ab488
A
916
917unsigned int
918vm_compressor_pager_reap_pages(
0a7de745
A
919 memory_object_t mem_obj,
920 int flags)
fe8ab488 921{
0a7de745
A
922 compressor_pager_t pager;
923 int num_chunks;
924 int failures;
925 int i;
926 compressor_slot_t *chunk;
927 unsigned int num_slots_freed;
fe8ab488
A
928
929 compressor_pager_lookup(mem_obj, pager);
0a7de745 930 if (pager == NULL) {
fe8ab488 931 return 0;
0a7de745 932 }
fe8ab488
A
933
934 compressor_pager_lock(pager);
935
936 /* reap the compressor slots */
937 num_slots_freed = 0;
938
0a7de745 939 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
fe8ab488
A
940 if (num_chunks > 1) {
941 /* we have an array of chunks */
942 for (i = 0; i < num_chunks; i++) {
943 chunk = pager->cpgr_slots.cpgr_islots[i];
944 if (chunk != NULL) {
945 num_slots_freed +=
0a7de745
A
946 compressor_pager_slots_chunk_free(
947 chunk,
948 COMPRESSOR_SLOTS_PER_CHUNK,
949 flags,
950 &failures);
fe8ab488
A
951 if (failures == 0) {
952 pager->cpgr_slots.cpgr_islots[i] = NULL;
953 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
954 }
955 }
956 }
3e170ce0 957 } else if (pager->cpgr_num_slots > 2) {
fe8ab488
A
958 chunk = pager->cpgr_slots.cpgr_dslots;
959 num_slots_freed +=
0a7de745
A
960 compressor_pager_slots_chunk_free(
961 chunk,
962 pager->cpgr_num_slots,
963 flags,
964 NULL);
3e170ce0
A
965 } else {
966 chunk = &pager->cpgr_slots.cpgr_eslots[0];
967 num_slots_freed +=
0a7de745
A
968 compressor_pager_slots_chunk_free(
969 chunk,
970 pager->cpgr_num_slots,
971 flags,
972 NULL);
fe8ab488 973 }
fe8ab488
A
974
975 compressor_pager_unlock(pager);
976
977 return num_slots_freed;
978}
979
fe8ab488
A
980void
981vm_compressor_pager_transfer(
0a7de745
A
982 memory_object_t dst_mem_obj,
983 memory_object_offset_t dst_offset,
984 memory_object_t src_mem_obj,
985 memory_object_offset_t src_offset)
fe8ab488 986{
0a7de745
A
987 compressor_pager_t src_pager, dst_pager;
988 compressor_slot_t *src_slot_p, *dst_slot_p;
989
fe8ab488
A
990 compressor_pager_stats.transfer++;
991
992 /* find the compressor slot for the destination */
993 assert((uint32_t) dst_offset == dst_offset);
994 compressor_pager_lookup(dst_mem_obj, dst_pager);
5ba3f43e 995 assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots);
fe8ab488 996 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
0a7de745 997 &dst_slot_p);
fe8ab488
A
998 assert(dst_slot_p != NULL);
999 assert(*dst_slot_p == 0);
1000
1001 /* find the compressor slot for the source */
1002 assert((uint32_t) src_offset == src_offset);
1003 compressor_pager_lookup(src_mem_obj, src_pager);
5ba3f43e 1004 assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots);
fe8ab488 1005 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
0a7de745 1006 &src_slot_p);
fe8ab488
A
1007 assert(src_slot_p != NULL);
1008 assert(*src_slot_p != 0);
1009
1010 /* transfer the slot from source to destination */
1011 vm_compressor_transfer(dst_slot_p, src_slot_p);
fe8ab488
A
1012 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
1013 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
1014}
1015
1016memory_object_offset_t
1017vm_compressor_pager_next_compressed(
0a7de745
A
1018 memory_object_t mem_obj,
1019 memory_object_offset_t offset)
fe8ab488 1020{
0a7de745
A
1021 compressor_pager_t pager;
1022 uint32_t num_chunks;
1023 uint32_t page_num;
1024 uint32_t chunk_idx;
1025 uint32_t slot_idx;
1026 compressor_slot_t *chunk;
fe8ab488
A
1027
1028 compressor_pager_lookup(mem_obj, pager);
1029
1030 page_num = (uint32_t)(offset / PAGE_SIZE);
0a7de745 1031 if (page_num != (offset / PAGE_SIZE)) {
fe8ab488
A
1032 /* overflow */
1033 return (memory_object_offset_t) -1;
1034 }
5ba3f43e 1035 if (page_num >= pager->cpgr_num_slots) {
fe8ab488
A
1036 /* out of range */
1037 return (memory_object_offset_t) -1;
1038 }
3e170ce0 1039
fe8ab488 1040 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
0a7de745 1041 COMPRESSOR_SLOTS_PER_CHUNK);
fe8ab488
A
1042
1043 if (num_chunks == 1) {
3e170ce0
A
1044 if (pager->cpgr_num_slots > 2) {
1045 chunk = pager->cpgr_slots.cpgr_dslots;
1046 } else {
1047 chunk = &pager->cpgr_slots.cpgr_eslots[0];
1048 }
fe8ab488 1049 for (slot_idx = page_num;
0a7de745
A
1050 slot_idx < pager->cpgr_num_slots;
1051 slot_idx++) {
fe8ab488
A
1052 if (chunk[slot_idx] != 0) {
1053 /* found a non-NULL slot in this chunk */
1054 return (memory_object_offset_t) (slot_idx *
0a7de745 1055 PAGE_SIZE);
fe8ab488
A
1056 }
1057 }
1058 return (memory_object_offset_t) -1;
1059 }
1060
1061 /* we have an array of chunks; find the next non-NULL chunk */
1062 chunk = NULL;
1063 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
0a7de745
A
1064 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1065 chunk_idx < num_chunks;
1066 chunk_idx++,
1067 slot_idx = 0) {
fe8ab488
A
1068 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1069 if (chunk == NULL) {
1070 /* no chunk here: try the next one */
1071 continue;
1072 }
1073 /* search for an occupied slot in this chunk */
1074 for (;
0a7de745
A
1075 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1076 slot_idx++) {
fe8ab488
A
1077 if (chunk[slot_idx] != 0) {
1078 /* found an occupied slot in this chunk */
1079 uint32_t next_slot;
1080
1081 next_slot = ((chunk_idx *
0a7de745
A
1082 COMPRESSOR_SLOTS_PER_CHUNK) +
1083 slot_idx);
5ba3f43e 1084 if (next_slot >= pager->cpgr_num_slots) {
fe8ab488
A
1085 /* went beyond end of object */
1086 return (memory_object_offset_t) -1;
1087 }
1088 return (memory_object_offset_t) (next_slot *
0a7de745 1089 PAGE_SIZE);
fe8ab488
A
1090 }
1091 }
1092 }
1093 return (memory_object_offset_t) -1;
1094}
1095
1096unsigned int
1097vm_compressor_pager_get_count(
1098 memory_object_t mem_obj)
1099{
0a7de745 1100 compressor_pager_t pager;
fe8ab488
A
1101
1102 compressor_pager_lookup(mem_obj, pager);
0a7de745 1103 if (pager == NULL) {
fe8ab488 1104 return 0;
0a7de745 1105 }
fe8ab488
A
1106
1107 /*
1108 * The caller should have the VM object locked and one
1109 * needs that lock to do a page-in or page-out, so no
1110 * need to lock the pager here.
1111 */
1112 assert(pager->cpgr_num_slots_occupied >= 0);
1113
1114 return pager->cpgr_num_slots_occupied;
1115}
1116
1117void
1118vm_compressor_pager_count(
0a7de745
A
1119 memory_object_t mem_obj,
1120 int compressed_count_delta,
1121 boolean_t shared_lock,
1122 vm_object_t object __unused)
fe8ab488 1123{
0a7de745 1124 compressor_pager_t pager;
fe8ab488
A
1125
1126 if (compressed_count_delta == 0) {
1127 return;
1128 }
1129
1130 compressor_pager_lookup(mem_obj, pager);
0a7de745 1131 if (pager == NULL) {
fe8ab488 1132 return;
0a7de745 1133 }
fe8ab488
A
1134
1135 if (compressed_count_delta < 0) {
1136 assert(pager->cpgr_num_slots_occupied >=
0a7de745 1137 (unsigned int) -compressed_count_delta);
fe8ab488
A
1138 }
1139
1140 /*
1141 * The caller should have the VM object locked,
1142 * shared or exclusive.
1143 */
1144 if (shared_lock) {
1145 vm_object_lock_assert_shared(object);
1146 OSAddAtomic(compressed_count_delta,
0a7de745 1147 &pager->cpgr_num_slots_occupied);
fe8ab488
A
1148 } else {
1149 vm_object_lock_assert_exclusive(object);
1150 pager->cpgr_num_slots_occupied += compressed_count_delta;
1151 }
1152}
3e170ce0
A
1153
1154#if CONFIG_FREEZE
1155kern_return_t
1156vm_compressor_pager_relocate(
0a7de745
A
1157 memory_object_t mem_obj,
1158 memory_object_offset_t offset,
1159 void **current_chead)
3e170ce0
A
1160{
1161 /*
1162 * Has the page at this offset been compressed?
1163 */
1164
1165 compressor_slot_t *slot_p;
1166 compressor_pager_t dst_pager;
1167
1168 assert(mem_obj);
0a7de745 1169
3e170ce0 1170 compressor_pager_lookup(mem_obj, dst_pager);
0a7de745 1171 if (dst_pager == NULL) {
3e170ce0 1172 return KERN_FAILURE;
0a7de745 1173 }
3e170ce0
A
1174
1175 compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p);
0a7de745 1176 return vm_compressor_relocate(current_chead, slot_p);
3e170ce0
A
1177}
1178#endif /* CONFIG_FREEZE */