]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor_pager.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62#include <kern/host_statistics.h>
63#include <kern/kalloc.h>
64
65#include <mach/memory_object_control.h>
66#include <mach/memory_object_types.h>
67#include <mach/memory_object_server.h>
68#include <mach/upl.h>
69
70#include <vm/memory_object.h>
71#include <vm/vm_compressor_pager.h>
72#include <vm/vm_external.h>
73#include <vm/vm_pageout.h>
74#include <vm/vm_protos.h>
75
76/* memory_object interfaces */
77void compressor_memory_object_reference(memory_object_t mem_obj);
78void compressor_memory_object_deallocate(memory_object_t mem_obj);
79kern_return_t compressor_memory_object_init(
80 memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pager_page_size);
83kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
84kern_return_t compressor_memory_object_data_request(
85 memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 __unused vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90kern_return_t compressor_memory_object_data_return(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t size,
94 __unused memory_object_offset_t *resid_offset,
95 __unused int *io_error,
96 __unused boolean_t dirty,
97 __unused boolean_t kernel_copy,
98 __unused int upl_flags);
99kern_return_t compressor_memory_object_data_initialize(
100 memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_cluster_size_t size);
103kern_return_t compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj,
105 __unused memory_object_offset_t offset,
106 __unused memory_object_size_t size,
107 __unused vm_prot_t desired_access);
108kern_return_t compressor_memory_object_synchronize(
109 memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 __unused vm_sync_t flags);
113kern_return_t compressor_memory_object_map(
114 __unused memory_object_t mem_obj,
115 __unused vm_prot_t prot);
116kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
117kern_return_t compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj,
119 __unused boolean_t reclaim_backing_store);
120
121const struct memory_object_pager_ops compressor_pager_ops = {
122 compressor_memory_object_reference,
123 compressor_memory_object_deallocate,
124 compressor_memory_object_init,
125 compressor_memory_object_terminate,
126 compressor_memory_object_data_request,
127 compressor_memory_object_data_return,
128 compressor_memory_object_data_initialize,
129 compressor_memory_object_data_unlock,
130 compressor_memory_object_synchronize,
131 compressor_memory_object_map,
132 compressor_memory_object_last_unmap,
133 compressor_memory_object_data_reclaim,
134 "compressor pager"
135};
136
137/* internal data structures */
138
139struct {
140 uint64_t data_returns;
141 uint64_t data_requests;
fe8ab488
A
142 uint64_t put;
143 uint64_t get;
39236c6e
A
144 uint64_t state_clr;
145 uint64_t state_get;
fe8ab488 146 uint64_t transfer;
39236c6e
A
147} compressor_pager_stats;
148
149typedef int compressor_slot_t;
150
151typedef struct compressor_pager {
152 struct ipc_object_header cpgr_pager_header; /* fake ip_kotype */
153 memory_object_pager_ops_t cpgr_pager_ops; /* == &compressor_pager_ops */
154 memory_object_control_t cpgr_control;
155 lck_mtx_t cpgr_lock;
156
157 unsigned int cpgr_references;
158 unsigned int cpgr_num_slots;
fe8ab488
A
159 unsigned int cpgr_num_slots_occupied_pager;
160 unsigned int cpgr_num_slots_occupied;
39236c6e
A
161 union {
162 compressor_slot_t *cpgr_dslots;
163 compressor_slot_t **cpgr_islots;
164 } cpgr_slots;
165} *compressor_pager_t;
166
167#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
168 MACRO_BEGIN \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
171 _cpgr_ = NULL; \
172 } else { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
174 } \
175 MACRO_END
176
177zone_t compressor_pager_zone;
178
179lck_grp_t compressor_pager_lck_grp;
180lck_grp_attr_t compressor_pager_lck_grp_attr;
181lck_attr_t compressor_pager_lck_attr;
182
183#define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185#define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187#define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189#define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
191
192#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
194
195/* forward declarations */
fe8ab488
A
196unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk,
197 int num_slots,
198 int flags,
199 int *failures);
39236c6e
A
200void compressor_pager_slot_lookup(
201 compressor_pager_t pager,
202 boolean_t do_alloc,
22ba694c 203 memory_object_offset_t offset,
39236c6e
A
204 compressor_slot_t **slot_pp);
205
206kern_return_t
207compressor_memory_object_init(
208 memory_object_t mem_obj,
209 memory_object_control_t control,
210 __unused memory_object_cluster_size_t pager_page_size)
211{
212 compressor_pager_t pager;
213
214 assert(pager_page_size == PAGE_SIZE);
215
216 memory_object_control_reference(control);
217
218 compressor_pager_lookup(mem_obj, pager);
219 compressor_pager_lock(pager);
220
221 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
222 panic("compressor_memory_object_init: bad request");
223 pager->cpgr_control = control;
224
225 compressor_pager_unlock(pager);
226
227 return KERN_SUCCESS;
228}
229
230kern_return_t
231compressor_memory_object_synchronize(
232 memory_object_t mem_obj,
233 memory_object_offset_t offset,
234 memory_object_size_t length,
235 __unused vm_sync_t flags)
236{
237 compressor_pager_t pager;
238
239 compressor_pager_lookup(mem_obj, pager);
240
241 memory_object_synchronize_completed(pager->cpgr_control, offset, length);
242
243 return KERN_SUCCESS;
244}
245
246kern_return_t
247compressor_memory_object_map(
248 __unused memory_object_t mem_obj,
249 __unused vm_prot_t prot)
250{
251 panic("compressor_memory_object_map");
252 return KERN_FAILURE;
253}
254
255kern_return_t
256compressor_memory_object_last_unmap(
257 __unused memory_object_t mem_obj)
258{
259 panic("compressor_memory_object_last_unmap");
260 return KERN_FAILURE;
261}
262
263kern_return_t
264compressor_memory_object_data_reclaim(
265 __unused memory_object_t mem_obj,
266 __unused boolean_t reclaim_backing_store)
267{
268 panic("compressor_memory_object_data_reclaim");
269 return KERN_FAILURE;
270}
271
272kern_return_t
273compressor_memory_object_terminate(
274 memory_object_t mem_obj)
275{
276 memory_object_control_t control;
277 compressor_pager_t pager;
278
279 /*
280 * control port is a receive right, not a send right.
281 */
282
283 compressor_pager_lookup(mem_obj, pager);
284 compressor_pager_lock(pager);
285
286 /*
287 * After memory_object_terminate both memory_object_init
288 * and a no-senders notification are possible, so we need
289 * to clean up our reference to the memory_object_control
290 * to prepare for a new init.
291 */
292
293 control = pager->cpgr_control;
294 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
295
296 compressor_pager_unlock(pager);
297
298 /*
299 * Now we deallocate our reference on the control.
300 */
301 memory_object_control_deallocate(control);
302 return KERN_SUCCESS;
303}
304
305void
306compressor_memory_object_reference(
307 memory_object_t mem_obj)
308{
309 compressor_pager_t pager;
310
311 compressor_pager_lookup(mem_obj, pager);
312 if (pager == NULL)
313 return;
314
315 compressor_pager_lock(pager);
316 assert(pager->cpgr_references > 0);
317 pager->cpgr_references++;
318 compressor_pager_unlock(pager);
319}
320
321void
322compressor_memory_object_deallocate(
323 memory_object_t mem_obj)
324{
325 compressor_pager_t pager;
fe8ab488 326 unsigned int num_slots_freed;
39236c6e
A
327
328 /*
329 * Because we don't give out multiple first references
330 * for a memory object, there can't be a race
331 * between getting a deallocate call and creating
332 * a new reference for the object.
333 */
334
335 compressor_pager_lookup(mem_obj, pager);
336 if (pager == NULL)
337 return;
338
339 compressor_pager_lock(pager);
340 if (--pager->cpgr_references > 0) {
341 compressor_pager_unlock(pager);
342 return;
343 }
344
345 /*
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
348 */
349 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
350 panic("compressor_memory_object_deallocate(): bad request");
351
352 /*
353 * Unlock the pager (though there should be no one
354 * waiting for it).
355 */
356 compressor_pager_unlock(pager);
357
358 /* free the compressor slots */
359 int num_chunks;
360 int i;
361 compressor_slot_t *chunk;
362
363 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
364 if (num_chunks > 1) {
365 /* we have an array of chunks */
366 for (i = 0; i < num_chunks; i++) {
367 chunk = pager->cpgr_slots.cpgr_islots[i];
368 if (chunk != NULL) {
fe8ab488
A
369 num_slots_freed =
370 compressor_pager_slots_chunk_free(
371 chunk,
372 COMPRESSOR_SLOTS_PER_CHUNK,
373 0,
374 NULL);
375 assert(pager->cpgr_num_slots_occupied_pager >=
376 num_slots_freed);
377 OSAddAtomic(-num_slots_freed,
378 &pager->cpgr_num_slots_occupied_pager);
379 assert(pager->cpgr_num_slots_occupied_pager >= 0);
39236c6e
A
380 pager->cpgr_slots.cpgr_islots[i] = NULL;
381 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
382 }
383 }
384 kfree(pager->cpgr_slots.cpgr_islots,
385 num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
386 pager->cpgr_slots.cpgr_islots = NULL;
387 } else {
388 chunk = pager->cpgr_slots.cpgr_dslots;
fe8ab488
A
389 num_slots_freed =
390 compressor_pager_slots_chunk_free(
391 chunk,
392 pager->cpgr_num_slots,
393 0,
394 NULL);
395 assert(pager->cpgr_num_slots_occupied_pager >= num_slots_freed);
396 OSAddAtomic(-num_slots_freed, &pager->cpgr_num_slots_occupied_pager);
397 assert(pager->cpgr_num_slots_occupied_pager >= 0);
39236c6e
A
398 pager->cpgr_slots.cpgr_dslots = NULL;
399 kfree(chunk,
400 (pager->cpgr_num_slots *
401 sizeof (pager->cpgr_slots.cpgr_dslots[0])));
402 }
fe8ab488 403 assert(pager->cpgr_num_slots_occupied_pager == 0);
39236c6e
A
404
405 compressor_pager_lock_destroy(pager);
406 zfree(compressor_pager_zone, pager);
407}
408
409kern_return_t
410compressor_memory_object_data_request(
411 memory_object_t mem_obj,
412 memory_object_offset_t offset,
413 memory_object_cluster_size_t length,
414 __unused vm_prot_t protection_required,
415 __unused memory_object_fault_info_t fault_info)
416{
417 compressor_pager_t pager;
418 kern_return_t kr;
419 compressor_slot_t *slot_p;
420
421 compressor_pager_stats.data_requests++;
422
423 /*
424 * Request must be on a page boundary and a multiple of pages.
425 */
426 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
427 panic("compressor_memory_object_data_request(): bad alignment");
428
22ba694c
A
429 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
430 panic("%s: offset 0x%llx overflow\n",
431 __FUNCTION__, (uint64_t) offset);
432 return KERN_FAILURE;
433 }
39236c6e
A
434
435 compressor_pager_lookup(mem_obj, pager);
436
437 if (length == 0) {
438 /* we're only querying the pager for this page */
439 } else {
440 panic("compressor: data_request");
441 }
442
443 /* find the compressor slot for that page */
22ba694c 444 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
445
446 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
447 /* out of range */
448 kr = KERN_FAILURE;
449 } else if (slot_p == NULL || *slot_p == 0) {
450 /* compressor does not have this page */
451 kr = KERN_FAILURE;
452 } else {
453 /* compressor does have this page */
454 kr = KERN_SUCCESS;
455 }
456 return kr;
457}
458
459/*
460 * memory_object_data_initialize: check whether we already have each page, and
461 * write it if we do not. The implementation is far from optimized, and
462 * also assumes that the default_pager is single-threaded.
463 */
464/* It is questionable whether or not a pager should decide what is relevant */
465/* and what is not in data sent from the kernel. Data initialize has been */
466/* changed to copy back all data sent to it in preparation for its eventual */
467/* merge with data return. It is the kernel that should decide what pages */
468/* to write back. As of the writing of this note, this is indeed the case */
469/* the kernel writes back one page at a time through this interface */
470
471kern_return_t
472compressor_memory_object_data_initialize(
473 memory_object_t mem_obj,
474 memory_object_offset_t offset,
475 memory_object_cluster_size_t size)
476{
477 compressor_pager_t pager;
478 memory_object_offset_t cur_offset;
479
480 compressor_pager_lookup(mem_obj, pager);
481 compressor_pager_lock(pager);
482
483 for (cur_offset = offset;
484 cur_offset < offset + size;
485 cur_offset += PAGE_SIZE) {
486 panic("do a data_return() if slot for this page is empty");
487 }
488
489 compressor_pager_unlock(pager);
490
491 return KERN_SUCCESS;
492}
493
494kern_return_t
495compressor_memory_object_data_unlock(
496 __unused memory_object_t mem_obj,
497 __unused memory_object_offset_t offset,
498 __unused memory_object_size_t size,
499 __unused vm_prot_t desired_access)
500{
501 panic("compressor_memory_object_data_unlock()");
502 return KERN_FAILURE;
503}
504
505
506/*ARGSUSED*/
507kern_return_t
508compressor_memory_object_data_return(
509 __unused memory_object_t mem_obj,
510 __unused memory_object_offset_t offset,
511 __unused memory_object_cluster_size_t size,
512 __unused memory_object_offset_t *resid_offset,
513 __unused int *io_error,
514 __unused boolean_t dirty,
515 __unused boolean_t kernel_copy,
516 __unused int upl_flags)
517{
518 panic("compressor: data_return");
519 return KERN_FAILURE;
520}
521
522/*
523 * Routine: default_pager_memory_object_create
524 * Purpose:
525 * Handle requests for memory objects from the
526 * kernel.
527 * Notes:
528 * Because we only give out the default memory
529 * manager port to the kernel, we don't have to
530 * be so paranoid about the contents.
531 */
532kern_return_t
533compressor_memory_object_create(
22ba694c 534 memory_object_size_t new_size,
39236c6e
A
535 memory_object_t *new_mem_obj)
536{
537 compressor_pager_t pager;
538 int num_chunks;
539
22ba694c
A
540 if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
541 /* 32-bit overflow for number of pages */
542 panic("%s: size 0x%llx overflow\n",
543 __FUNCTION__, (uint64_t) new_size);
39236c6e
A
544 return KERN_INVALID_ARGUMENT;
545 }
546
547 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
548 if (pager == NULL) {
549 return KERN_RESOURCE_SHORTAGE;
550 }
551
552 compressor_pager_lock_init(pager);
553 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
554 pager->cpgr_references = 1;
22ba694c 555 pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
fe8ab488
A
556 pager->cpgr_num_slots_occupied_pager = 0;
557 pager->cpgr_num_slots_occupied = 0;
39236c6e
A
558
559 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
560 if (num_chunks > 1) {
561 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
562 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
563 } else {
564 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
565 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
566 }
567
568 /*
569 * Set up associations between this memory object
570 * and this compressor_pager structure
571 */
572
573 pager->cpgr_pager_ops = &compressor_pager_ops;
574 pager->cpgr_pager_header.io_bits = IKOT_MEMORY_OBJECT;
575
576 *new_mem_obj = (memory_object_t) pager;
577 return KERN_SUCCESS;
578}
579
580
fe8ab488 581unsigned int
39236c6e
A
582compressor_pager_slots_chunk_free(
583 compressor_slot_t *chunk,
fe8ab488
A
584 int num_slots,
585 int flags,
586 int *failures)
39236c6e 587{
39236c6e 588 int i;
fe8ab488
A
589 unsigned int num_slots_freed;
590
591 if (failures)
592 *failures = 0;
593 num_slots_freed = 0;
39236c6e
A
594 for (i = 0; i < num_slots; i++) {
595 if (chunk[i] != 0) {
fe8ab488
A
596 if (vm_compressor_free(&chunk[i], flags) == 0)
597 num_slots_freed++;
598 else {
599 assert(flags & C_DONT_BLOCK);
600
601 if (failures)
602 *failures += 1;
603 }
39236c6e
A
604 }
605 }
fe8ab488 606 return num_slots_freed;
39236c6e
A
607}
608
609void
610compressor_pager_slot_lookup(
611 compressor_pager_t pager,
612 boolean_t do_alloc,
22ba694c 613 memory_object_offset_t offset,
39236c6e
A
614 compressor_slot_t **slot_pp)
615{
616 int num_chunks;
617 uint32_t page_num;
618 int chunk_idx;
619 int slot_idx;
620 compressor_slot_t *chunk;
621 compressor_slot_t *t_chunk;
622
22ba694c
A
623 page_num = (uint32_t)(offset/PAGE_SIZE);
624 if (page_num != (offset/PAGE_SIZE)) {
625 /* overflow */
626 panic("%s: offset 0x%llx overflow\n",
627 __FUNCTION__, (uint64_t) offset);
628 *slot_pp = NULL;
629 return;
630 }
39236c6e
A
631 if (page_num > pager->cpgr_num_slots) {
632 /* out of range */
633 *slot_pp = NULL;
634 return;
635 }
636 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
637 if (num_chunks > 1) {
638 /* we have an array of chunks */
639 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
640 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
641
642 if (chunk == NULL && do_alloc) {
643 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
644 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
645
646 compressor_pager_lock(pager);
647
648 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
649 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
650 t_chunk = NULL;
651 }
652 compressor_pager_unlock(pager);
653
654 if (t_chunk)
655 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
656 }
657 if (chunk == NULL) {
658 *slot_pp = NULL;
659 } else {
660 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
661 *slot_pp = &chunk[slot_idx];
662 }
663 } else {
664 slot_idx = page_num;
665 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
666 }
667}
668
669void
670vm_compressor_pager_init(void)
671{
672 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
673 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
674 lck_attr_setdefault(&compressor_pager_lck_attr);
675
676 compressor_pager_zone = zinit(sizeof (struct compressor_pager),
677 10000 * sizeof (struct compressor_pager),
678 8192, "compressor_pager");
679 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
680 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
681
682 vm_compressor_init();
683}
684
685kern_return_t
686vm_compressor_pager_put(
687 memory_object_t mem_obj,
688 memory_object_offset_t offset,
689 ppnum_t ppnum,
690 void **current_chead,
fe8ab488
A
691 char *scratch_buf,
692 int *compressed_count_delta_p)
39236c6e
A
693{
694 compressor_pager_t pager;
695 compressor_slot_t *slot_p;
696
fe8ab488
A
697 compressor_pager_stats.put++;
698
699 *compressed_count_delta_p = 0;
39236c6e
A
700
701 /* This routine is called by the pageout thread. The pageout thread */
702 /* cannot be blocked by read activities unless the read activities */
703 /* Therefore the grant of vs lock must be done on a try versus a */
704 /* blocking basis. The code below relies on the fact that the */
705 /* interface is synchronous. Should this interface be again async */
706 /* for some type of pager in the future the pages will have to be */
707 /* returned through a separate, asynchronous path. */
708
709 compressor_pager_lookup(mem_obj, pager);
710
22ba694c
A
711 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
712 /* overflow */
713 panic("%s: offset 0x%llx overflow\n",
714 __FUNCTION__, (uint64_t) offset);
715 return KERN_RESOURCE_SHORTAGE;
716 }
39236c6e 717
22ba694c 718 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
39236c6e
A
719
720 if (slot_p == NULL) {
721 /* out of range ? */
fe8ab488 722 panic("vm_compressor_pager_put: out of range");
39236c6e
A
723 }
724 if (*slot_p != 0) {
725 /*
726 * Already compressed: forget about the old one.
727 *
728 * This can happen after a vm_object_do_collapse() when
729 * the "backing_object" had some pages paged out and the
730 * "object" had an equivalent page resident.
731 */
fe8ab488
A
732 vm_compressor_free(slot_p, 0);
733 assert(pager->cpgr_num_slots_occupied_pager >= 1);
734 OSAddAtomic(-1, &pager->cpgr_num_slots_occupied_pager);
735 assert(pager->cpgr_num_slots_occupied_pager >= 0);
736 *compressed_count_delta_p -= 1;
39236c6e
A
737 }
738 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf))
739 return (KERN_RESOURCE_SHORTAGE);
fe8ab488
A
740 assert(pager->cpgr_num_slots_occupied_pager >= 0);
741 OSAddAtomic(+1, &pager->cpgr_num_slots_occupied_pager);
742 assert(pager->cpgr_num_slots_occupied_pager > 0);
743 *compressed_count_delta_p += 1;
39236c6e
A
744
745 return (KERN_SUCCESS);
746}
747
748
749kern_return_t
750vm_compressor_pager_get(
751 memory_object_t mem_obj,
752 memory_object_offset_t offset,
753 ppnum_t ppnum,
754 int *my_fault_type,
fe8ab488
A
755 int flags,
756 int *compressed_count_delta_p)
39236c6e
A
757{
758 compressor_pager_t pager;
759 kern_return_t kr;
760 compressor_slot_t *slot_p;
761
fe8ab488
A
762 compressor_pager_stats.get++;
763
764 *compressed_count_delta_p = 0;
39236c6e 765
22ba694c
A
766 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
767 panic("%s: offset 0x%llx overflow\n",
768 __FUNCTION__, (uint64_t) offset);
769 return KERN_MEMORY_ERROR;
770 }
39236c6e
A
771
772 compressor_pager_lookup(mem_obj, pager);
773
774 /* find the compressor slot for that page */
22ba694c 775 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
776
777 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
778 /* out of range */
779 kr = KERN_MEMORY_FAILURE;
780 } else if (slot_p == NULL || *slot_p == 0) {
781 /* compressor does not have this page */
782 kr = KERN_MEMORY_ERROR;
783 } else {
784 /* compressor does have this page */
785 kr = KERN_SUCCESS;
786 }
787 *my_fault_type = DBG_COMPRESSOR_FAULT;
788
789 if (kr == KERN_SUCCESS) {
790 int retval;
791
792 /* get the page from the compressor */
fe8ab488
A
793 retval = vm_compressor_get(ppnum, slot_p, flags);
794 if (retval == -1)
39236c6e
A
795 kr = KERN_MEMORY_FAILURE;
796 else if (retval == 1)
797 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
798 else if (retval == -2) {
799 assert((flags & C_DONT_BLOCK));
800 kr = KERN_FAILURE;
801 }
802 }
fe8ab488
A
803
804 if (kr == KERN_SUCCESS) {
805 assert(slot_p != NULL);
806 if (*slot_p != 0) {
807 /*
808 * We got the page for a copy-on-write fault
809 * and we kept the original in place. Slot
810 * is still occupied.
811 */
812 } else {
813 assert(pager->cpgr_num_slots_occupied_pager >= 1);
814 OSAddAtomic(-1, &pager->cpgr_num_slots_occupied_pager);
815 assert(pager->cpgr_num_slots_occupied_pager >= 0);
816 *compressed_count_delta_p -= 1;
817 }
818 }
819
39236c6e
A
820 return kr;
821}
822
fe8ab488 823unsigned int
39236c6e
A
824vm_compressor_pager_state_clr(
825 memory_object_t mem_obj,
826 memory_object_offset_t offset)
827{
828 compressor_pager_t pager;
829 compressor_slot_t *slot_p;
fe8ab488 830 unsigned int num_slots_freed;
39236c6e
A
831
832 compressor_pager_stats.state_clr++;
833
22ba694c
A
834 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
835 /* overflow */
836 panic("%s: offset 0x%llx overflow\n",
837 __FUNCTION__, (uint64_t) offset);
fe8ab488 838 return 0;
22ba694c 839 }
39236c6e
A
840
841 compressor_pager_lookup(mem_obj, pager);
842
843 /* find the compressor slot for that page */
22ba694c 844 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e 845
fe8ab488 846 num_slots_freed = 0;
39236c6e 847 if (slot_p && *slot_p != 0) {
fe8ab488
A
848 vm_compressor_free(slot_p, 0);
849 num_slots_freed++;
850 assert(*slot_p == 0);
851 assert(pager->cpgr_num_slots_occupied_pager >= 1);
852 OSAddAtomic(-1, &pager->cpgr_num_slots_occupied_pager);
853 assert(pager->cpgr_num_slots_occupied_pager >= 0);
39236c6e 854 }
fe8ab488
A
855
856 return num_slots_freed;
39236c6e
A
857}
858
859vm_external_state_t
860vm_compressor_pager_state_get(
861 memory_object_t mem_obj,
862 memory_object_offset_t offset)
863{
864 compressor_pager_t pager;
865 compressor_slot_t *slot_p;
866
867 compressor_pager_stats.state_get++;
868
22ba694c
A
869 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
870 /* overflow */
871 panic("%s: offset 0x%llx overflow\n",
872 __FUNCTION__, (uint64_t) offset);
873 return VM_EXTERNAL_STATE_ABSENT;
874 }
39236c6e
A
875
876 compressor_pager_lookup(mem_obj, pager);
877
878 /* find the compressor slot for that page */
22ba694c 879 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
880
881 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
882 /* out of range */
883 return VM_EXTERNAL_STATE_ABSENT;
884 } else if (slot_p == NULL || *slot_p == 0) {
885 /* compressor does not have this page */
886 return VM_EXTERNAL_STATE_ABSENT;
887 } else {
888 /* compressor does have this page */
889 return VM_EXTERNAL_STATE_EXISTS;
890 }
891}
fe8ab488
A
892
893unsigned int
894vm_compressor_pager_reap_pages(
895 memory_object_t mem_obj,
896 int flags)
897{
898 compressor_pager_t pager;
899 int num_chunks;
900 int failures;
901 int i;
902 compressor_slot_t *chunk;
903 unsigned int num_slots_freed;
904
905 compressor_pager_lookup(mem_obj, pager);
906 if (pager == NULL)
907 return 0;
908
909 compressor_pager_lock(pager);
910
911 /* reap the compressor slots */
912 num_slots_freed = 0;
913
914 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
915 if (num_chunks > 1) {
916 /* we have an array of chunks */
917 for (i = 0; i < num_chunks; i++) {
918 chunk = pager->cpgr_slots.cpgr_islots[i];
919 if (chunk != NULL) {
920 num_slots_freed +=
921 compressor_pager_slots_chunk_free(
922 chunk,
923 COMPRESSOR_SLOTS_PER_CHUNK,
924 flags,
925 &failures);
926 if (failures == 0) {
927 pager->cpgr_slots.cpgr_islots[i] = NULL;
928 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
929 }
930 }
931 }
932 } else {
933 chunk = pager->cpgr_slots.cpgr_dslots;
934 num_slots_freed +=
935 compressor_pager_slots_chunk_free(
936 chunk,
937 pager->cpgr_num_slots,
938 flags,
939 NULL);
940 }
941 OSAddAtomic(-num_slots_freed, &pager->cpgr_num_slots_occupied_pager);
942
943 compressor_pager_unlock(pager);
944
945 return num_slots_freed;
946}
947
948unsigned int
949vm_compressor_pager_get_slots_occupied(
950 memory_object_t mem_obj)
951{
952 compressor_pager_t pager;
953
954 compressor_pager_lookup(mem_obj, pager);
955 if (pager == NULL)
956 return 0;
957
958 assert(pager->cpgr_num_slots_occupied_pager >= 0);
959
960 return pager->cpgr_num_slots_occupied_pager;
961}
962
963void
964vm_compressor_pager_transfer(
965 memory_object_t dst_mem_obj,
966 memory_object_offset_t dst_offset,
967 memory_object_t src_mem_obj,
968 memory_object_offset_t src_offset)
969{
970 compressor_pager_t src_pager, dst_pager;
971 compressor_slot_t *src_slot_p, *dst_slot_p;
972
973 compressor_pager_stats.transfer++;
974
975 /* find the compressor slot for the destination */
976 assert((uint32_t) dst_offset == dst_offset);
977 compressor_pager_lookup(dst_mem_obj, dst_pager);
978 assert(dst_offset / PAGE_SIZE <= dst_pager->cpgr_num_slots);
979 compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset,
980 &dst_slot_p);
981 assert(dst_slot_p != NULL);
982 assert(*dst_slot_p == 0);
983
984 /* find the compressor slot for the source */
985 assert((uint32_t) src_offset == src_offset);
986 compressor_pager_lookup(src_mem_obj, src_pager);
987 assert(src_offset / PAGE_SIZE <= src_pager->cpgr_num_slots);
988 compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset,
989 &src_slot_p);
990 assert(src_slot_p != NULL);
991 assert(*src_slot_p != 0);
992
993 /* transfer the slot from source to destination */
994 vm_compressor_transfer(dst_slot_p, src_slot_p);
995 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied_pager);
996 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied_pager);
997 OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied);
998 OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied);
999}
1000
1001memory_object_offset_t
1002vm_compressor_pager_next_compressed(
1003 memory_object_t mem_obj,
1004 memory_object_offset_t offset)
1005{
1006 compressor_pager_t pager;
1007 uint32_t num_chunks;
1008 uint32_t page_num;
1009 uint32_t chunk_idx;
1010 uint32_t slot_idx;
1011 compressor_slot_t *chunk;
1012
1013 compressor_pager_lookup(mem_obj, pager);
1014
1015 page_num = (uint32_t)(offset / PAGE_SIZE);
1016 if (page_num != (offset/PAGE_SIZE)) {
1017 /* overflow */
1018 return (memory_object_offset_t) -1;
1019 }
1020 if (page_num > pager->cpgr_num_slots) {
1021 /* out of range */
1022 return (memory_object_offset_t) -1;
1023 }
1024 num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) /
1025 COMPRESSOR_SLOTS_PER_CHUNK);
1026
1027 if (num_chunks == 1) {
1028 chunk = pager->cpgr_slots.cpgr_dslots;
1029 for (slot_idx = page_num;
1030 slot_idx < pager->cpgr_num_slots;
1031 slot_idx++) {
1032 if (chunk[slot_idx] != 0) {
1033 /* found a non-NULL slot in this chunk */
1034 return (memory_object_offset_t) (slot_idx *
1035 PAGE_SIZE);
1036 }
1037 }
1038 return (memory_object_offset_t) -1;
1039 }
1040
1041 /* we have an array of chunks; find the next non-NULL chunk */
1042 chunk = NULL;
1043 for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK,
1044 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
1045 chunk_idx < num_chunks;
1046 chunk_idx++,
1047 slot_idx = 0) {
1048 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
1049 if (chunk == NULL) {
1050 /* no chunk here: try the next one */
1051 continue;
1052 }
1053 /* search for an occupied slot in this chunk */
1054 for (;
1055 slot_idx < COMPRESSOR_SLOTS_PER_CHUNK;
1056 slot_idx++) {
1057 if (chunk[slot_idx] != 0) {
1058 /* found an occupied slot in this chunk */
1059 uint32_t next_slot;
1060
1061 next_slot = ((chunk_idx *
1062 COMPRESSOR_SLOTS_PER_CHUNK) +
1063 slot_idx);
1064 if (next_slot > pager->cpgr_num_slots) {
1065 /* went beyond end of object */
1066 return (memory_object_offset_t) -1;
1067 }
1068 return (memory_object_offset_t) (next_slot *
1069 PAGE_SIZE);
1070 }
1071 }
1072 }
1073 return (memory_object_offset_t) -1;
1074}
1075
1076unsigned int
1077vm_compressor_pager_get_count(
1078 memory_object_t mem_obj)
1079{
1080 compressor_pager_t pager;
1081
1082 compressor_pager_lookup(mem_obj, pager);
1083 if (pager == NULL)
1084 return 0;
1085
1086 /*
1087 * The caller should have the VM object locked and one
1088 * needs that lock to do a page-in or page-out, so no
1089 * need to lock the pager here.
1090 */
1091 assert(pager->cpgr_num_slots_occupied >= 0);
1092
1093 return pager->cpgr_num_slots_occupied;
1094}
1095
1096void
1097vm_compressor_pager_count(
1098 memory_object_t mem_obj,
1099 int compressed_count_delta,
1100 boolean_t shared_lock,
1101 vm_object_t object __unused)
1102{
1103 compressor_pager_t pager;
1104
1105 if (compressed_count_delta == 0) {
1106 return;
1107 }
1108
1109 compressor_pager_lookup(mem_obj, pager);
1110 if (pager == NULL)
1111 return;
1112
1113 if (compressed_count_delta < 0) {
1114 assert(pager->cpgr_num_slots_occupied >=
1115 (unsigned int) -compressed_count_delta);
1116 }
1117
1118 /*
1119 * The caller should have the VM object locked,
1120 * shared or exclusive.
1121 */
1122 if (shared_lock) {
1123 vm_object_lock_assert_shared(object);
1124 OSAddAtomic(compressed_count_delta,
1125 &pager->cpgr_num_slots_occupied);
1126 } else {
1127 vm_object_lock_assert_exclusive(object);
1128 pager->cpgr_num_slots_occupied += compressed_count_delta;
1129 }
1130}