]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor_pager.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor_pager.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 * Compressor Pager.
59 * Memory Object Management.
60 */
61
62#include <kern/host_statistics.h>
63#include <kern/kalloc.h>
64
65#include <mach/memory_object_control.h>
66#include <mach/memory_object_types.h>
67#include <mach/memory_object_server.h>
68#include <mach/upl.h>
69
70#include <vm/memory_object.h>
71#include <vm/vm_compressor_pager.h>
72#include <vm/vm_external.h>
73#include <vm/vm_pageout.h>
74#include <vm/vm_protos.h>
75
76/* memory_object interfaces */
77void compressor_memory_object_reference(memory_object_t mem_obj);
78void compressor_memory_object_deallocate(memory_object_t mem_obj);
79kern_return_t compressor_memory_object_init(
80 memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pager_page_size);
83kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj);
84kern_return_t compressor_memory_object_data_request(
85 memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 __unused vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90kern_return_t compressor_memory_object_data_return(
91 memory_object_t mem_obj,
92 memory_object_offset_t offset,
93 memory_object_cluster_size_t size,
94 __unused memory_object_offset_t *resid_offset,
95 __unused int *io_error,
96 __unused boolean_t dirty,
97 __unused boolean_t kernel_copy,
98 __unused int upl_flags);
99kern_return_t compressor_memory_object_data_initialize(
100 memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_cluster_size_t size);
103kern_return_t compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj,
105 __unused memory_object_offset_t offset,
106 __unused memory_object_size_t size,
107 __unused vm_prot_t desired_access);
108kern_return_t compressor_memory_object_synchronize(
109 memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 __unused vm_sync_t flags);
113kern_return_t compressor_memory_object_map(
114 __unused memory_object_t mem_obj,
115 __unused vm_prot_t prot);
116kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj);
117kern_return_t compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj,
119 __unused boolean_t reclaim_backing_store);
120
121const struct memory_object_pager_ops compressor_pager_ops = {
122 compressor_memory_object_reference,
123 compressor_memory_object_deallocate,
124 compressor_memory_object_init,
125 compressor_memory_object_terminate,
126 compressor_memory_object_data_request,
127 compressor_memory_object_data_return,
128 compressor_memory_object_data_initialize,
129 compressor_memory_object_data_unlock,
130 compressor_memory_object_synchronize,
131 compressor_memory_object_map,
132 compressor_memory_object_last_unmap,
133 compressor_memory_object_data_reclaim,
134 "compressor pager"
135};
136
137/* internal data structures */
138
139struct {
140 uint64_t data_returns;
141 uint64_t data_requests;
142 uint64_t state_clr;
143 uint64_t state_get;
144} compressor_pager_stats;
145
146typedef int compressor_slot_t;
147
148typedef struct compressor_pager {
149 struct ipc_object_header cpgr_pager_header; /* fake ip_kotype */
150 memory_object_pager_ops_t cpgr_pager_ops; /* == &compressor_pager_ops */
151 memory_object_control_t cpgr_control;
152 lck_mtx_t cpgr_lock;
153
154 unsigned int cpgr_references;
155 unsigned int cpgr_num_slots;
156 union {
157 compressor_slot_t *cpgr_dslots;
158 compressor_slot_t **cpgr_islots;
159 } cpgr_slots;
160} *compressor_pager_t;
161
162#define compressor_pager_lookup(_mem_obj_, _cpgr_) \
163 MACRO_BEGIN \
164 if (_mem_obj_ == NULL || \
165 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
166 _cpgr_ = NULL; \
167 } else { \
168 _cpgr_ = (compressor_pager_t) _mem_obj_; \
169 } \
170 MACRO_END
171
172zone_t compressor_pager_zone;
173
174lck_grp_t compressor_pager_lck_grp;
175lck_grp_attr_t compressor_pager_lck_grp_attr;
176lck_attr_t compressor_pager_lck_attr;
177
178#define compressor_pager_lock(_cpgr_) \
179 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
180#define compressor_pager_unlock(_cpgr_) \
181 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
182#define compressor_pager_lock_init(_cpgr_) \
183 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
184#define compressor_pager_lock_destroy(_cpgr_) \
185 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
186
187#define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
188#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
189
190/* forward declarations */
191void compressor_pager_slots_chunk_free(compressor_slot_t *chunk, int num_slots);
192void compressor_pager_slot_lookup(
193 compressor_pager_t pager,
194 boolean_t do_alloc,
22ba694c 195 memory_object_offset_t offset,
39236c6e
A
196 compressor_slot_t **slot_pp);
197
198kern_return_t
199compressor_memory_object_init(
200 memory_object_t mem_obj,
201 memory_object_control_t control,
202 __unused memory_object_cluster_size_t pager_page_size)
203{
204 compressor_pager_t pager;
205
206 assert(pager_page_size == PAGE_SIZE);
207
208 memory_object_control_reference(control);
209
210 compressor_pager_lookup(mem_obj, pager);
211 compressor_pager_lock(pager);
212
213 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
214 panic("compressor_memory_object_init: bad request");
215 pager->cpgr_control = control;
216
217 compressor_pager_unlock(pager);
218
219 return KERN_SUCCESS;
220}
221
222kern_return_t
223compressor_memory_object_synchronize(
224 memory_object_t mem_obj,
225 memory_object_offset_t offset,
226 memory_object_size_t length,
227 __unused vm_sync_t flags)
228{
229 compressor_pager_t pager;
230
231 compressor_pager_lookup(mem_obj, pager);
232
233 memory_object_synchronize_completed(pager->cpgr_control, offset, length);
234
235 return KERN_SUCCESS;
236}
237
238kern_return_t
239compressor_memory_object_map(
240 __unused memory_object_t mem_obj,
241 __unused vm_prot_t prot)
242{
243 panic("compressor_memory_object_map");
244 return KERN_FAILURE;
245}
246
247kern_return_t
248compressor_memory_object_last_unmap(
249 __unused memory_object_t mem_obj)
250{
251 panic("compressor_memory_object_last_unmap");
252 return KERN_FAILURE;
253}
254
255kern_return_t
256compressor_memory_object_data_reclaim(
257 __unused memory_object_t mem_obj,
258 __unused boolean_t reclaim_backing_store)
259{
260 panic("compressor_memory_object_data_reclaim");
261 return KERN_FAILURE;
262}
263
264kern_return_t
265compressor_memory_object_terminate(
266 memory_object_t mem_obj)
267{
268 memory_object_control_t control;
269 compressor_pager_t pager;
270
271 /*
272 * control port is a receive right, not a send right.
273 */
274
275 compressor_pager_lookup(mem_obj, pager);
276 compressor_pager_lock(pager);
277
278 /*
279 * After memory_object_terminate both memory_object_init
280 * and a no-senders notification are possible, so we need
281 * to clean up our reference to the memory_object_control
282 * to prepare for a new init.
283 */
284
285 control = pager->cpgr_control;
286 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
287
288 compressor_pager_unlock(pager);
289
290 /*
291 * Now we deallocate our reference on the control.
292 */
293 memory_object_control_deallocate(control);
294 return KERN_SUCCESS;
295}
296
297void
298compressor_memory_object_reference(
299 memory_object_t mem_obj)
300{
301 compressor_pager_t pager;
302
303 compressor_pager_lookup(mem_obj, pager);
304 if (pager == NULL)
305 return;
306
307 compressor_pager_lock(pager);
308 assert(pager->cpgr_references > 0);
309 pager->cpgr_references++;
310 compressor_pager_unlock(pager);
311}
312
313void
314compressor_memory_object_deallocate(
315 memory_object_t mem_obj)
316{
317 compressor_pager_t pager;
318
319 /*
320 * Because we don't give out multiple first references
321 * for a memory object, there can't be a race
322 * between getting a deallocate call and creating
323 * a new reference for the object.
324 */
325
326 compressor_pager_lookup(mem_obj, pager);
327 if (pager == NULL)
328 return;
329
330 compressor_pager_lock(pager);
331 if (--pager->cpgr_references > 0) {
332 compressor_pager_unlock(pager);
333 return;
334 }
335
336 /*
337 * We shouldn't get a deallocation call
338 * when the kernel has the object cached.
339 */
340 if (pager->cpgr_control != MEMORY_OBJECT_CONTROL_NULL)
341 panic("compressor_memory_object_deallocate(): bad request");
342
343 /*
344 * Unlock the pager (though there should be no one
345 * waiting for it).
346 */
347 compressor_pager_unlock(pager);
348
349 /* free the compressor slots */
350 int num_chunks;
351 int i;
352 compressor_slot_t *chunk;
353
354 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK;
355 if (num_chunks > 1) {
356 /* we have an array of chunks */
357 for (i = 0; i < num_chunks; i++) {
358 chunk = pager->cpgr_slots.cpgr_islots[i];
359 if (chunk != NULL) {
360 compressor_pager_slots_chunk_free(
361 chunk,
362 COMPRESSOR_SLOTS_PER_CHUNK);
363 pager->cpgr_slots.cpgr_islots[i] = NULL;
364 kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
365 }
366 }
367 kfree(pager->cpgr_slots.cpgr_islots,
368 num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
369 pager->cpgr_slots.cpgr_islots = NULL;
370 } else {
371 chunk = pager->cpgr_slots.cpgr_dslots;
372 compressor_pager_slots_chunk_free(
373 chunk,
374 pager->cpgr_num_slots);
375 pager->cpgr_slots.cpgr_dslots = NULL;
376 kfree(chunk,
377 (pager->cpgr_num_slots *
378 sizeof (pager->cpgr_slots.cpgr_dslots[0])));
379 }
380
381 compressor_pager_lock_destroy(pager);
382 zfree(compressor_pager_zone, pager);
383}
384
385kern_return_t
386compressor_memory_object_data_request(
387 memory_object_t mem_obj,
388 memory_object_offset_t offset,
389 memory_object_cluster_size_t length,
390 __unused vm_prot_t protection_required,
391 __unused memory_object_fault_info_t fault_info)
392{
393 compressor_pager_t pager;
394 kern_return_t kr;
395 compressor_slot_t *slot_p;
396
397 compressor_pager_stats.data_requests++;
398
399 /*
400 * Request must be on a page boundary and a multiple of pages.
401 */
402 if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
403 panic("compressor_memory_object_data_request(): bad alignment");
404
22ba694c
A
405 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
406 panic("%s: offset 0x%llx overflow\n",
407 __FUNCTION__, (uint64_t) offset);
408 return KERN_FAILURE;
409 }
39236c6e
A
410
411 compressor_pager_lookup(mem_obj, pager);
412
413 if (length == 0) {
414 /* we're only querying the pager for this page */
415 } else {
416 panic("compressor: data_request");
417 }
418
419 /* find the compressor slot for that page */
22ba694c 420 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
421
422 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
423 /* out of range */
424 kr = KERN_FAILURE;
425 } else if (slot_p == NULL || *slot_p == 0) {
426 /* compressor does not have this page */
427 kr = KERN_FAILURE;
428 } else {
429 /* compressor does have this page */
430 kr = KERN_SUCCESS;
431 }
432 return kr;
433}
434
435/*
436 * memory_object_data_initialize: check whether we already have each page, and
437 * write it if we do not. The implementation is far from optimized, and
438 * also assumes that the default_pager is single-threaded.
439 */
440/* It is questionable whether or not a pager should decide what is relevant */
441/* and what is not in data sent from the kernel. Data initialize has been */
442/* changed to copy back all data sent to it in preparation for its eventual */
443/* merge with data return. It is the kernel that should decide what pages */
444/* to write back. As of the writing of this note, this is indeed the case */
445/* the kernel writes back one page at a time through this interface */
446
447kern_return_t
448compressor_memory_object_data_initialize(
449 memory_object_t mem_obj,
450 memory_object_offset_t offset,
451 memory_object_cluster_size_t size)
452{
453 compressor_pager_t pager;
454 memory_object_offset_t cur_offset;
455
456 compressor_pager_lookup(mem_obj, pager);
457 compressor_pager_lock(pager);
458
459 for (cur_offset = offset;
460 cur_offset < offset + size;
461 cur_offset += PAGE_SIZE) {
462 panic("do a data_return() if slot for this page is empty");
463 }
464
465 compressor_pager_unlock(pager);
466
467 return KERN_SUCCESS;
468}
469
470kern_return_t
471compressor_memory_object_data_unlock(
472 __unused memory_object_t mem_obj,
473 __unused memory_object_offset_t offset,
474 __unused memory_object_size_t size,
475 __unused vm_prot_t desired_access)
476{
477 panic("compressor_memory_object_data_unlock()");
478 return KERN_FAILURE;
479}
480
481
482/*ARGSUSED*/
483kern_return_t
484compressor_memory_object_data_return(
485 __unused memory_object_t mem_obj,
486 __unused memory_object_offset_t offset,
487 __unused memory_object_cluster_size_t size,
488 __unused memory_object_offset_t *resid_offset,
489 __unused int *io_error,
490 __unused boolean_t dirty,
491 __unused boolean_t kernel_copy,
492 __unused int upl_flags)
493{
494 panic("compressor: data_return");
495 return KERN_FAILURE;
496}
497
498/*
499 * Routine: default_pager_memory_object_create
500 * Purpose:
501 * Handle requests for memory objects from the
502 * kernel.
503 * Notes:
504 * Because we only give out the default memory
505 * manager port to the kernel, we don't have to
506 * be so paranoid about the contents.
507 */
508kern_return_t
509compressor_memory_object_create(
22ba694c 510 memory_object_size_t new_size,
39236c6e
A
511 memory_object_t *new_mem_obj)
512{
513 compressor_pager_t pager;
514 int num_chunks;
515
22ba694c
A
516 if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
517 /* 32-bit overflow for number of pages */
518 panic("%s: size 0x%llx overflow\n",
519 __FUNCTION__, (uint64_t) new_size);
39236c6e
A
520 return KERN_INVALID_ARGUMENT;
521 }
522
523 pager = (compressor_pager_t) zalloc(compressor_pager_zone);
524 if (pager == NULL) {
525 return KERN_RESOURCE_SHORTAGE;
526 }
527
528 compressor_pager_lock_init(pager);
529 pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
530 pager->cpgr_references = 1;
22ba694c 531 pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
39236c6e
A
532
533 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
534 if (num_chunks > 1) {
535 pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
536 bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0]));
537 } else {
538 pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
539 bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0]));
540 }
541
542 /*
543 * Set up associations between this memory object
544 * and this compressor_pager structure
545 */
546
547 pager->cpgr_pager_ops = &compressor_pager_ops;
548 pager->cpgr_pager_header.io_bits = IKOT_MEMORY_OBJECT;
549
550 *new_mem_obj = (memory_object_t) pager;
551 return KERN_SUCCESS;
552}
553
554
555void
556compressor_pager_slots_chunk_free(
557 compressor_slot_t *chunk,
558 int num_slots)
559{
560#if 00
561 vm_compressor_free(chunk, num_slots);
562#else
563 int i;
564 for (i = 0; i < num_slots; i++) {
565 if (chunk[i] != 0) {
566 vm_compressor_free(&chunk[i]);
567 }
568 }
569#endif
570}
571
572void
573compressor_pager_slot_lookup(
574 compressor_pager_t pager,
575 boolean_t do_alloc,
22ba694c 576 memory_object_offset_t offset,
39236c6e
A
577 compressor_slot_t **slot_pp)
578{
579 int num_chunks;
580 uint32_t page_num;
581 int chunk_idx;
582 int slot_idx;
583 compressor_slot_t *chunk;
584 compressor_slot_t *t_chunk;
585
22ba694c
A
586 page_num = (uint32_t)(offset/PAGE_SIZE);
587 if (page_num != (offset/PAGE_SIZE)) {
588 /* overflow */
589 panic("%s: offset 0x%llx overflow\n",
590 __FUNCTION__, (uint64_t) offset);
591 *slot_pp = NULL;
592 return;
593 }
39236c6e
A
594 if (page_num > pager->cpgr_num_slots) {
595 /* out of range */
596 *slot_pp = NULL;
597 return;
598 }
599 num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
600 if (num_chunks > 1) {
601 /* we have an array of chunks */
602 chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK;
603 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx];
604
605 if (chunk == NULL && do_alloc) {
606 t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE);
607 bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
608
609 compressor_pager_lock(pager);
610
611 if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) {
612 chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk;
613 t_chunk = NULL;
614 }
615 compressor_pager_unlock(pager);
616
617 if (t_chunk)
618 kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE);
619 }
620 if (chunk == NULL) {
621 *slot_pp = NULL;
622 } else {
623 slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK;
624 *slot_pp = &chunk[slot_idx];
625 }
626 } else {
627 slot_idx = page_num;
628 *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx];
629 }
630}
631
632void
633vm_compressor_pager_init(void)
634{
635 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr);
636 lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr);
637 lck_attr_setdefault(&compressor_pager_lck_attr);
638
639 compressor_pager_zone = zinit(sizeof (struct compressor_pager),
640 10000 * sizeof (struct compressor_pager),
641 8192, "compressor_pager");
642 zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE);
643 zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE);
644
645 vm_compressor_init();
646}
647
648kern_return_t
649vm_compressor_pager_put(
650 memory_object_t mem_obj,
651 memory_object_offset_t offset,
652 ppnum_t ppnum,
653 void **current_chead,
654 char *scratch_buf)
655{
656 compressor_pager_t pager;
657 compressor_slot_t *slot_p;
658
659 compressor_pager_stats.data_returns++;
660
661 /* This routine is called by the pageout thread. The pageout thread */
662 /* cannot be blocked by read activities unless the read activities */
663 /* Therefore the grant of vs lock must be done on a try versus a */
664 /* blocking basis. The code below relies on the fact that the */
665 /* interface is synchronous. Should this interface be again async */
666 /* for some type of pager in the future the pages will have to be */
667 /* returned through a separate, asynchronous path. */
668
669 compressor_pager_lookup(mem_obj, pager);
670
22ba694c
A
671 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
672 /* overflow */
673 panic("%s: offset 0x%llx overflow\n",
674 __FUNCTION__, (uint64_t) offset);
675 return KERN_RESOURCE_SHORTAGE;
676 }
39236c6e 677
22ba694c 678 compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
39236c6e
A
679
680 if (slot_p == NULL) {
681 /* out of range ? */
682 panic("compressor_pager_put: out of range");
683 }
684 if (*slot_p != 0) {
685 /*
686 * Already compressed: forget about the old one.
687 *
688 * This can happen after a vm_object_do_collapse() when
689 * the "backing_object" had some pages paged out and the
690 * "object" had an equivalent page resident.
691 */
692 vm_compressor_free(slot_p);
693 }
694 if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf))
695 return (KERN_RESOURCE_SHORTAGE);
696
697 return (KERN_SUCCESS);
698}
699
700
701kern_return_t
702vm_compressor_pager_get(
703 memory_object_t mem_obj,
704 memory_object_offset_t offset,
705 ppnum_t ppnum,
706 int *my_fault_type,
707 int flags)
708{
709 compressor_pager_t pager;
710 kern_return_t kr;
711 compressor_slot_t *slot_p;
712
713 compressor_pager_stats.data_requests++;
714
22ba694c
A
715 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
716 panic("%s: offset 0x%llx overflow\n",
717 __FUNCTION__, (uint64_t) offset);
718 return KERN_MEMORY_ERROR;
719 }
39236c6e
A
720
721 compressor_pager_lookup(mem_obj, pager);
722
723 /* find the compressor slot for that page */
22ba694c 724 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
725
726 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
727 /* out of range */
728 kr = KERN_MEMORY_FAILURE;
729 } else if (slot_p == NULL || *slot_p == 0) {
730 /* compressor does not have this page */
731 kr = KERN_MEMORY_ERROR;
732 } else {
733 /* compressor does have this page */
734 kr = KERN_SUCCESS;
735 }
736 *my_fault_type = DBG_COMPRESSOR_FAULT;
737
738 if (kr == KERN_SUCCESS) {
739 int retval;
740
741 /* get the page from the compressor */
742 if ((retval = vm_compressor_get(ppnum, slot_p, flags)) == -1)
743 kr = KERN_MEMORY_FAILURE;
744 else if (retval == 1)
745 *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT;
746 else if (retval == -2) {
747 assert((flags & C_DONT_BLOCK));
748 kr = KERN_FAILURE;
749 }
750 }
751 return kr;
752}
753
754void
755vm_compressor_pager_state_clr(
756 memory_object_t mem_obj,
757 memory_object_offset_t offset)
758{
759 compressor_pager_t pager;
760 compressor_slot_t *slot_p;
761
762 compressor_pager_stats.state_clr++;
763
22ba694c
A
764 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
765 /* overflow */
766 panic("%s: offset 0x%llx overflow\n",
767 __FUNCTION__, (uint64_t) offset);
768 return;
769 }
39236c6e
A
770
771 compressor_pager_lookup(mem_obj, pager);
772
773 /* find the compressor slot for that page */
22ba694c 774 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
775
776 if (slot_p && *slot_p != 0) {
777 vm_compressor_free(slot_p);
778 }
779}
780
781vm_external_state_t
782vm_compressor_pager_state_get(
783 memory_object_t mem_obj,
784 memory_object_offset_t offset)
785{
786 compressor_pager_t pager;
787 compressor_slot_t *slot_p;
788
789 compressor_pager_stats.state_get++;
790
22ba694c
A
791 if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
792 /* overflow */
793 panic("%s: offset 0x%llx overflow\n",
794 __FUNCTION__, (uint64_t) offset);
795 return VM_EXTERNAL_STATE_ABSENT;
796 }
39236c6e
A
797
798 compressor_pager_lookup(mem_obj, pager);
799
800 /* find the compressor slot for that page */
22ba694c 801 compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
39236c6e
A
802
803 if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
804 /* out of range */
805 return VM_EXTERNAL_STATE_ABSENT;
806 } else if (slot_p == NULL || *slot_p == 0) {
807 /* compressor does not have this page */
808 return VM_EXTERNAL_STATE_ABSENT;
809 } else {
810 /* compressor does have this page */
811 return VM_EXTERNAL_STATE_EXISTS;
812 }
813}