2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
65 #include <mach/memory_object_control.h>
66 #include <mach/memory_object_types.h>
67 #include <mach/memory_object_server.h>
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj
);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj
);
79 kern_return_t
compressor_memory_object_init(
80 memory_object_t mem_obj
,
81 memory_object_control_t control
,
82 memory_object_cluster_size_t pager_page_size
);
83 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj
);
84 kern_return_t
compressor_memory_object_data_request(
85 memory_object_t mem_obj
,
86 memory_object_offset_t offset
,
87 memory_object_cluster_size_t length
,
88 __unused vm_prot_t protection_required
,
89 memory_object_fault_info_t fault_info
);
90 kern_return_t
compressor_memory_object_data_return(
91 memory_object_t mem_obj
,
92 memory_object_offset_t offset
,
93 memory_object_cluster_size_t size
,
94 __unused memory_object_offset_t
*resid_offset
,
95 __unused
int *io_error
,
96 __unused boolean_t dirty
,
97 __unused boolean_t kernel_copy
,
98 __unused
int upl_flags
);
99 kern_return_t
compressor_memory_object_data_initialize(
100 memory_object_t mem_obj
,
101 memory_object_offset_t offset
,
102 memory_object_cluster_size_t size
);
103 kern_return_t
compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj
,
105 __unused memory_object_offset_t offset
,
106 __unused memory_object_size_t size
,
107 __unused vm_prot_t desired_access
);
108 kern_return_t
compressor_memory_object_synchronize(
109 memory_object_t mem_obj
,
110 memory_object_offset_t offset
,
111 memory_object_size_t length
,
112 __unused vm_sync_t flags
);
113 kern_return_t
compressor_memory_object_map(
114 __unused memory_object_t mem_obj
,
115 __unused vm_prot_t prot
);
116 kern_return_t
compressor_memory_object_last_unmap(memory_object_t mem_obj
);
117 kern_return_t
compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj
,
119 __unused boolean_t reclaim_backing_store
);
121 const struct memory_object_pager_ops compressor_pager_ops
= {
122 compressor_memory_object_reference
,
123 compressor_memory_object_deallocate
,
124 compressor_memory_object_init
,
125 compressor_memory_object_terminate
,
126 compressor_memory_object_data_request
,
127 compressor_memory_object_data_return
,
128 compressor_memory_object_data_initialize
,
129 compressor_memory_object_data_unlock
,
130 compressor_memory_object_synchronize
,
131 compressor_memory_object_map
,
132 compressor_memory_object_last_unmap
,
133 compressor_memory_object_data_reclaim
,
137 /* internal data structures */
140 uint64_t data_returns
;
141 uint64_t data_requests
;
144 } compressor_pager_stats
;
146 typedef int compressor_slot_t
;
148 typedef struct compressor_pager
{
149 struct ipc_object_header cpgr_pager_header
; /* fake ip_kotype */
150 memory_object_pager_ops_t cpgr_pager_ops
; /* == &compressor_pager_ops */
151 memory_object_control_t cpgr_control
;
154 unsigned int cpgr_references
;
155 unsigned int cpgr_num_slots
;
157 compressor_slot_t
*cpgr_dslots
;
158 compressor_slot_t
**cpgr_islots
;
160 } *compressor_pager_t
;
162 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
164 if (_mem_obj_ == NULL || \
165 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
168 _cpgr_ = (compressor_pager_t) _mem_obj_; \
172 zone_t compressor_pager_zone
;
174 lck_grp_t compressor_pager_lck_grp
;
175 lck_grp_attr_t compressor_pager_lck_grp_attr
;
176 lck_attr_t compressor_pager_lck_attr
;
178 #define compressor_pager_lock(_cpgr_) \
179 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
180 #define compressor_pager_unlock(_cpgr_) \
181 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
182 #define compressor_pager_lock_init(_cpgr_) \
183 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
184 #define compressor_pager_lock_destroy(_cpgr_) \
185 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
187 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
188 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
190 /* forward declarations */
191 void compressor_pager_slots_chunk_free(compressor_slot_t
*chunk
, int num_slots
);
192 void compressor_pager_slot_lookup(
193 compressor_pager_t pager
,
195 memory_object_offset_t offset
,
196 compressor_slot_t
**slot_pp
);
199 compressor_memory_object_init(
200 memory_object_t mem_obj
,
201 memory_object_control_t control
,
202 __unused memory_object_cluster_size_t pager_page_size
)
204 compressor_pager_t pager
;
206 assert(pager_page_size
== PAGE_SIZE
);
208 memory_object_control_reference(control
);
210 compressor_pager_lookup(mem_obj
, pager
);
211 compressor_pager_lock(pager
);
213 if (pager
->cpgr_control
!= MEMORY_OBJECT_CONTROL_NULL
)
214 panic("compressor_memory_object_init: bad request");
215 pager
->cpgr_control
= control
;
217 compressor_pager_unlock(pager
);
223 compressor_memory_object_synchronize(
224 memory_object_t mem_obj
,
225 memory_object_offset_t offset
,
226 memory_object_size_t length
,
227 __unused vm_sync_t flags
)
229 compressor_pager_t pager
;
231 compressor_pager_lookup(mem_obj
, pager
);
233 memory_object_synchronize_completed(pager
->cpgr_control
, offset
, length
);
239 compressor_memory_object_map(
240 __unused memory_object_t mem_obj
,
241 __unused vm_prot_t prot
)
243 panic("compressor_memory_object_map");
248 compressor_memory_object_last_unmap(
249 __unused memory_object_t mem_obj
)
251 panic("compressor_memory_object_last_unmap");
256 compressor_memory_object_data_reclaim(
257 __unused memory_object_t mem_obj
,
258 __unused boolean_t reclaim_backing_store
)
260 panic("compressor_memory_object_data_reclaim");
265 compressor_memory_object_terminate(
266 memory_object_t mem_obj
)
268 memory_object_control_t control
;
269 compressor_pager_t pager
;
272 * control port is a receive right, not a send right.
275 compressor_pager_lookup(mem_obj
, pager
);
276 compressor_pager_lock(pager
);
279 * After memory_object_terminate both memory_object_init
280 * and a no-senders notification are possible, so we need
281 * to clean up our reference to the memory_object_control
282 * to prepare for a new init.
285 control
= pager
->cpgr_control
;
286 pager
->cpgr_control
= MEMORY_OBJECT_CONTROL_NULL
;
288 compressor_pager_unlock(pager
);
291 * Now we deallocate our reference on the control.
293 memory_object_control_deallocate(control
);
298 compressor_memory_object_reference(
299 memory_object_t mem_obj
)
301 compressor_pager_t pager
;
303 compressor_pager_lookup(mem_obj
, pager
);
307 compressor_pager_lock(pager
);
308 assert(pager
->cpgr_references
> 0);
309 pager
->cpgr_references
++;
310 compressor_pager_unlock(pager
);
314 compressor_memory_object_deallocate(
315 memory_object_t mem_obj
)
317 compressor_pager_t pager
;
320 * Because we don't give out multiple first references
321 * for a memory object, there can't be a race
322 * between getting a deallocate call and creating
323 * a new reference for the object.
326 compressor_pager_lookup(mem_obj
, pager
);
330 compressor_pager_lock(pager
);
331 if (--pager
->cpgr_references
> 0) {
332 compressor_pager_unlock(pager
);
337 * We shouldn't get a deallocation call
338 * when the kernel has the object cached.
340 if (pager
->cpgr_control
!= MEMORY_OBJECT_CONTROL_NULL
)
341 panic("compressor_memory_object_deallocate(): bad request");
344 * Unlock the pager (though there should be no one
347 compressor_pager_unlock(pager
);
349 /* free the compressor slots */
352 compressor_slot_t
*chunk
;
354 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
-1) / COMPRESSOR_SLOTS_PER_CHUNK
;
355 if (num_chunks
> 1) {
356 /* we have an array of chunks */
357 for (i
= 0; i
< num_chunks
; i
++) {
358 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
360 compressor_pager_slots_chunk_free(
362 COMPRESSOR_SLOTS_PER_CHUNK
);
363 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
364 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
367 kfree(pager
->cpgr_slots
.cpgr_islots
,
368 num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
369 pager
->cpgr_slots
.cpgr_islots
= NULL
;
371 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
372 compressor_pager_slots_chunk_free(
374 pager
->cpgr_num_slots
);
375 pager
->cpgr_slots
.cpgr_dslots
= NULL
;
377 (pager
->cpgr_num_slots
*
378 sizeof (pager
->cpgr_slots
.cpgr_dslots
[0])));
381 compressor_pager_lock_destroy(pager
);
382 zfree(compressor_pager_zone
, pager
);
386 compressor_memory_object_data_request(
387 memory_object_t mem_obj
,
388 memory_object_offset_t offset
,
389 memory_object_cluster_size_t length
,
390 __unused vm_prot_t protection_required
,
391 __unused memory_object_fault_info_t fault_info
)
393 compressor_pager_t pager
;
395 compressor_slot_t
*slot_p
;
397 compressor_pager_stats
.data_requests
++;
400 * Request must be on a page boundary and a multiple of pages.
402 if ((offset
& PAGE_MASK
) != 0 || (length
& PAGE_MASK
) != 0)
403 panic("compressor_memory_object_data_request(): bad alignment");
405 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
406 panic("%s: offset 0x%llx overflow\n",
407 __FUNCTION__
, (uint64_t) offset
);
411 compressor_pager_lookup(mem_obj
, pager
);
414 /* we're only querying the pager for this page */
416 panic("compressor: data_request");
419 /* find the compressor slot for that page */
420 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
422 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
425 } else if (slot_p
== NULL
|| *slot_p
== 0) {
426 /* compressor does not have this page */
429 /* compressor does have this page */
436 * memory_object_data_initialize: check whether we already have each page, and
437 * write it if we do not. The implementation is far from optimized, and
438 * also assumes that the default_pager is single-threaded.
440 /* It is questionable whether or not a pager should decide what is relevant */
441 /* and what is not in data sent from the kernel. Data initialize has been */
442 /* changed to copy back all data sent to it in preparation for its eventual */
443 /* merge with data return. It is the kernel that should decide what pages */
444 /* to write back. As of the writing of this note, this is indeed the case */
445 /* the kernel writes back one page at a time through this interface */
448 compressor_memory_object_data_initialize(
449 memory_object_t mem_obj
,
450 memory_object_offset_t offset
,
451 memory_object_cluster_size_t size
)
453 compressor_pager_t pager
;
454 memory_object_offset_t cur_offset
;
456 compressor_pager_lookup(mem_obj
, pager
);
457 compressor_pager_lock(pager
);
459 for (cur_offset
= offset
;
460 cur_offset
< offset
+ size
;
461 cur_offset
+= PAGE_SIZE
) {
462 panic("do a data_return() if slot for this page is empty");
465 compressor_pager_unlock(pager
);
471 compressor_memory_object_data_unlock(
472 __unused memory_object_t mem_obj
,
473 __unused memory_object_offset_t offset
,
474 __unused memory_object_size_t size
,
475 __unused vm_prot_t desired_access
)
477 panic("compressor_memory_object_data_unlock()");
484 compressor_memory_object_data_return(
485 __unused memory_object_t mem_obj
,
486 __unused memory_object_offset_t offset
,
487 __unused memory_object_cluster_size_t size
,
488 __unused memory_object_offset_t
*resid_offset
,
489 __unused
int *io_error
,
490 __unused boolean_t dirty
,
491 __unused boolean_t kernel_copy
,
492 __unused
int upl_flags
)
494 panic("compressor: data_return");
499 * Routine: default_pager_memory_object_create
501 * Handle requests for memory objects from the
504 * Because we only give out the default memory
505 * manager port to the kernel, we don't have to
506 * be so paranoid about the contents.
509 compressor_memory_object_create(
510 memory_object_size_t new_size
,
511 memory_object_t
*new_mem_obj
)
513 compressor_pager_t pager
;
516 if ((uint32_t)(new_size
/PAGE_SIZE
) != (new_size
/PAGE_SIZE
)) {
517 /* 32-bit overflow for number of pages */
518 panic("%s: size 0x%llx overflow\n",
519 __FUNCTION__
, (uint64_t) new_size
);
520 return KERN_INVALID_ARGUMENT
;
523 pager
= (compressor_pager_t
) zalloc(compressor_pager_zone
);
525 return KERN_RESOURCE_SHORTAGE
;
528 compressor_pager_lock_init(pager
);
529 pager
->cpgr_control
= MEMORY_OBJECT_CONTROL_NULL
;
530 pager
->cpgr_references
= 1;
531 pager
->cpgr_num_slots
= (uint32_t)(new_size
/PAGE_SIZE
);
533 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
534 if (num_chunks
> 1) {
535 pager
->cpgr_slots
.cpgr_islots
= kalloc(num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
536 bzero(pager
->cpgr_slots
.cpgr_islots
, num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
538 pager
->cpgr_slots
.cpgr_dslots
= kalloc(pager
->cpgr_num_slots
* sizeof (pager
->cpgr_slots
.cpgr_dslots
[0]));
539 bzero(pager
->cpgr_slots
.cpgr_dslots
, pager
->cpgr_num_slots
* sizeof (pager
->cpgr_slots
.cpgr_dslots
[0]));
543 * Set up associations between this memory object
544 * and this compressor_pager structure
547 pager
->cpgr_pager_ops
= &compressor_pager_ops
;
548 pager
->cpgr_pager_header
.io_bits
= IKOT_MEMORY_OBJECT
;
550 *new_mem_obj
= (memory_object_t
) pager
;
556 compressor_pager_slots_chunk_free(
557 compressor_slot_t
*chunk
,
561 vm_compressor_free(chunk
, num_slots
);
564 for (i
= 0; i
< num_slots
; i
++) {
566 vm_compressor_free(&chunk
[i
]);
573 compressor_pager_slot_lookup(
574 compressor_pager_t pager
,
576 memory_object_offset_t offset
,
577 compressor_slot_t
**slot_pp
)
583 compressor_slot_t
*chunk
;
584 compressor_slot_t
*t_chunk
;
586 page_num
= (uint32_t)(offset
/PAGE_SIZE
);
587 if (page_num
!= (offset
/PAGE_SIZE
)) {
589 panic("%s: offset 0x%llx overflow\n",
590 __FUNCTION__
, (uint64_t) offset
);
594 if (page_num
> pager
->cpgr_num_slots
) {
599 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
600 if (num_chunks
> 1) {
601 /* we have an array of chunks */
602 chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
;
603 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
605 if (chunk
== NULL
&& do_alloc
) {
606 t_chunk
= kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE
);
607 bzero(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
609 compressor_pager_lock(pager
);
611 if ((chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
]) == NULL
) {
612 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
] = t_chunk
;
615 compressor_pager_unlock(pager
);
618 kfree(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
623 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
624 *slot_pp
= &chunk
[slot_idx
];
628 *slot_pp
= &pager
->cpgr_slots
.cpgr_dslots
[slot_idx
];
633 vm_compressor_pager_init(void)
635 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr
);
636 lck_grp_init(&compressor_pager_lck_grp
, "compressor_pager", &compressor_pager_lck_grp_attr
);
637 lck_attr_setdefault(&compressor_pager_lck_attr
);
639 compressor_pager_zone
= zinit(sizeof (struct compressor_pager
),
640 10000 * sizeof (struct compressor_pager
),
641 8192, "compressor_pager");
642 zone_change(compressor_pager_zone
, Z_CALLERACCT
, FALSE
);
643 zone_change(compressor_pager_zone
, Z_NOENCRYPT
, TRUE
);
645 vm_compressor_init();
649 vm_compressor_pager_put(
650 memory_object_t mem_obj
,
651 memory_object_offset_t offset
,
653 void **current_chead
,
656 compressor_pager_t pager
;
657 compressor_slot_t
*slot_p
;
659 compressor_pager_stats
.data_returns
++;
661 /* This routine is called by the pageout thread. The pageout thread */
662 /* cannot be blocked by read activities unless the read activities */
663 /* Therefore the grant of vs lock must be done on a try versus a */
664 /* blocking basis. The code below relies on the fact that the */
665 /* interface is synchronous. Should this interface be again async */
666 /* for some type of pager in the future the pages will have to be */
667 /* returned through a separate, asynchronous path. */
669 compressor_pager_lookup(mem_obj
, pager
);
671 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
673 panic("%s: offset 0x%llx overflow\n",
674 __FUNCTION__
, (uint64_t) offset
);
675 return KERN_RESOURCE_SHORTAGE
;
678 compressor_pager_slot_lookup(pager
, TRUE
, offset
, &slot_p
);
680 if (slot_p
== NULL
) {
682 panic("compressor_pager_put: out of range");
686 * Already compressed: forget about the old one.
688 * This can happen after a vm_object_do_collapse() when
689 * the "backing_object" had some pages paged out and the
690 * "object" had an equivalent page resident.
692 vm_compressor_free(slot_p
);
694 if (vm_compressor_put(ppnum
, slot_p
, current_chead
, scratch_buf
))
695 return (KERN_RESOURCE_SHORTAGE
);
697 return (KERN_SUCCESS
);
702 vm_compressor_pager_get(
703 memory_object_t mem_obj
,
704 memory_object_offset_t offset
,
709 compressor_pager_t pager
;
711 compressor_slot_t
*slot_p
;
713 compressor_pager_stats
.data_requests
++;
715 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
716 panic("%s: offset 0x%llx overflow\n",
717 __FUNCTION__
, (uint64_t) offset
);
718 return KERN_MEMORY_ERROR
;
721 compressor_pager_lookup(mem_obj
, pager
);
723 /* find the compressor slot for that page */
724 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
726 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
728 kr
= KERN_MEMORY_FAILURE
;
729 } else if (slot_p
== NULL
|| *slot_p
== 0) {
730 /* compressor does not have this page */
731 kr
= KERN_MEMORY_ERROR
;
733 /* compressor does have this page */
736 *my_fault_type
= DBG_COMPRESSOR_FAULT
;
738 if (kr
== KERN_SUCCESS
) {
741 /* get the page from the compressor */
742 if ((retval
= vm_compressor_get(ppnum
, slot_p
, flags
)) == -1)
743 kr
= KERN_MEMORY_FAILURE
;
744 else if (retval
== 1)
745 *my_fault_type
= DBG_COMPRESSOR_SWAPIN_FAULT
;
746 else if (retval
== -2) {
747 assert((flags
& C_DONT_BLOCK
));
755 vm_compressor_pager_state_clr(
756 memory_object_t mem_obj
,
757 memory_object_offset_t offset
)
759 compressor_pager_t pager
;
760 compressor_slot_t
*slot_p
;
762 compressor_pager_stats
.state_clr
++;
764 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
766 panic("%s: offset 0x%llx overflow\n",
767 __FUNCTION__
, (uint64_t) offset
);
771 compressor_pager_lookup(mem_obj
, pager
);
773 /* find the compressor slot for that page */
774 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
776 if (slot_p
&& *slot_p
!= 0) {
777 vm_compressor_free(slot_p
);
782 vm_compressor_pager_state_get(
783 memory_object_t mem_obj
,
784 memory_object_offset_t offset
)
786 compressor_pager_t pager
;
787 compressor_slot_t
*slot_p
;
789 compressor_pager_stats
.state_get
++;
791 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
793 panic("%s: offset 0x%llx overflow\n",
794 __FUNCTION__
, (uint64_t) offset
);
795 return VM_EXTERNAL_STATE_ABSENT
;
798 compressor_pager_lookup(mem_obj
, pager
);
800 /* find the compressor slot for that page */
801 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
803 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
805 return VM_EXTERNAL_STATE_ABSENT
;
806 } else if (slot_p
== NULL
|| *slot_p
== 0) {
807 /* compressor does not have this page */
808 return VM_EXTERNAL_STATE_ABSENT
;
810 /* compressor does have this page */
811 return VM_EXTERNAL_STATE_EXISTS
;