2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
66 #include <mach/memory_object_control.h>
67 #include <mach/memory_object_types.h>
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj
);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj
);
79 kern_return_t
compressor_memory_object_init(
80 memory_object_t mem_obj
,
81 memory_object_control_t control
,
82 memory_object_cluster_size_t pager_page_size
);
83 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj
);
84 kern_return_t
compressor_memory_object_data_request(
85 memory_object_t mem_obj
,
86 memory_object_offset_t offset
,
87 memory_object_cluster_size_t length
,
88 __unused vm_prot_t protection_required
,
89 memory_object_fault_info_t fault_info
);
90 kern_return_t
compressor_memory_object_data_return(
91 memory_object_t mem_obj
,
92 memory_object_offset_t offset
,
93 memory_object_cluster_size_t size
,
94 __unused memory_object_offset_t
*resid_offset
,
95 __unused
int *io_error
,
96 __unused boolean_t dirty
,
97 __unused boolean_t kernel_copy
,
98 __unused
int upl_flags
);
99 kern_return_t
compressor_memory_object_data_initialize(
100 memory_object_t mem_obj
,
101 memory_object_offset_t offset
,
102 memory_object_cluster_size_t size
);
103 kern_return_t
compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj
,
105 __unused memory_object_offset_t offset
,
106 __unused memory_object_size_t size
,
107 __unused vm_prot_t desired_access
);
108 kern_return_t
compressor_memory_object_synchronize(
109 memory_object_t mem_obj
,
110 memory_object_offset_t offset
,
111 memory_object_size_t length
,
112 __unused vm_sync_t flags
);
113 kern_return_t
compressor_memory_object_map(
114 __unused memory_object_t mem_obj
,
115 __unused vm_prot_t prot
);
116 kern_return_t
compressor_memory_object_last_unmap(memory_object_t mem_obj
);
117 kern_return_t
compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj
,
119 __unused boolean_t reclaim_backing_store
);
121 const struct memory_object_pager_ops compressor_pager_ops
= {
122 compressor_memory_object_reference
,
123 compressor_memory_object_deallocate
,
124 compressor_memory_object_init
,
125 compressor_memory_object_terminate
,
126 compressor_memory_object_data_request
,
127 compressor_memory_object_data_return
,
128 compressor_memory_object_data_initialize
,
129 compressor_memory_object_data_unlock
,
130 compressor_memory_object_synchronize
,
131 compressor_memory_object_map
,
132 compressor_memory_object_last_unmap
,
133 compressor_memory_object_data_reclaim
,
137 /* internal data structures */
140 uint64_t data_returns
;
141 uint64_t data_requests
;
147 } compressor_pager_stats
;
149 typedef int compressor_slot_t
;
151 typedef struct compressor_pager
{
152 /* mandatory generic header */
153 struct memory_object cpgr_hdr
;
155 /* pager-specific data */
157 unsigned int cpgr_references
;
158 unsigned int cpgr_num_slots
;
159 unsigned int cpgr_num_slots_occupied
;
161 compressor_slot_t cpgr_eslots
[2]; /* embedded slots */
162 compressor_slot_t
*cpgr_dslots
; /* direct slots */
163 compressor_slot_t
**cpgr_islots
; /* indirect slots */
165 } *compressor_pager_t
;
167 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
177 zone_t compressor_pager_zone
;
179 lck_grp_t compressor_pager_lck_grp
;
180 lck_grp_attr_t compressor_pager_lck_grp_attr
;
181 lck_attr_t compressor_pager_lck_attr
;
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t
*chunk
,
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager
,
203 memory_object_offset_t offset
,
204 compressor_slot_t
**slot_pp
);
207 compressor_memory_object_init(
208 memory_object_t mem_obj
,
209 memory_object_control_t control
,
210 __unused memory_object_cluster_size_t pager_page_size
)
212 compressor_pager_t pager
;
214 assert(pager_page_size
== PAGE_SIZE
);
216 memory_object_control_reference(control
);
218 compressor_pager_lookup(mem_obj
, pager
);
219 compressor_pager_lock(pager
);
221 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
222 panic("compressor_memory_object_init: bad request");
224 pager
->cpgr_hdr
.mo_control
= control
;
226 compressor_pager_unlock(pager
);
232 compressor_memory_object_synchronize(
233 __unused memory_object_t mem_obj
,
234 __unused memory_object_offset_t offset
,
235 __unused memory_object_size_t length
,
236 __unused vm_sync_t flags
)
238 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
243 compressor_memory_object_map(
244 __unused memory_object_t mem_obj
,
245 __unused vm_prot_t prot
)
247 panic("compressor_memory_object_map");
252 compressor_memory_object_last_unmap(
253 __unused memory_object_t mem_obj
)
255 panic("compressor_memory_object_last_unmap");
260 compressor_memory_object_data_reclaim(
261 __unused memory_object_t mem_obj
,
262 __unused boolean_t reclaim_backing_store
)
264 panic("compressor_memory_object_data_reclaim");
269 compressor_memory_object_terminate(
270 memory_object_t mem_obj
)
272 memory_object_control_t control
;
273 compressor_pager_t pager
;
276 * control port is a receive right, not a send right.
279 compressor_pager_lookup(mem_obj
, pager
);
280 compressor_pager_lock(pager
);
283 * After memory_object_terminate both memory_object_init
284 * and a no-senders notification are possible, so we need
285 * to clean up our reference to the memory_object_control
286 * to prepare for a new init.
289 control
= pager
->cpgr_hdr
.mo_control
;
290 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
292 compressor_pager_unlock(pager
);
295 * Now we deallocate our reference on the control.
297 memory_object_control_deallocate(control
);
302 compressor_memory_object_reference(
303 memory_object_t mem_obj
)
305 compressor_pager_t pager
;
307 compressor_pager_lookup(mem_obj
, pager
);
312 compressor_pager_lock(pager
);
313 assert(pager
->cpgr_references
> 0);
314 pager
->cpgr_references
++;
315 compressor_pager_unlock(pager
);
319 compressor_memory_object_deallocate(
320 memory_object_t mem_obj
)
322 compressor_pager_t pager
;
323 unsigned int num_slots_freed
;
326 * Because we don't give out multiple first references
327 * for a memory object, there can't be a race
328 * between getting a deallocate call and creating
329 * a new reference for the object.
332 compressor_pager_lookup(mem_obj
, pager
);
337 compressor_pager_lock(pager
);
338 if (--pager
->cpgr_references
> 0) {
339 compressor_pager_unlock(pager
);
344 * We shouldn't get a deallocation call
345 * when the kernel has the object cached.
347 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
348 panic("compressor_memory_object_deallocate(): bad request");
352 * Unlock the pager (though there should be no one
355 compressor_pager_unlock(pager
);
357 /* free the compressor slots */
360 compressor_slot_t
*chunk
;
362 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
363 if (num_chunks
> 1) {
364 /* we have an array of chunks */
365 for (i
= 0; i
< num_chunks
; i
++) {
366 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
369 compressor_pager_slots_chunk_free(
371 COMPRESSOR_SLOTS_PER_CHUNK
,
374 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
375 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
378 kfree(pager
->cpgr_slots
.cpgr_islots
,
379 num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
380 pager
->cpgr_slots
.cpgr_islots
= NULL
;
381 } else if (pager
->cpgr_num_slots
> 2) {
382 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
384 compressor_pager_slots_chunk_free(
386 pager
->cpgr_num_slots
,
389 pager
->cpgr_slots
.cpgr_dslots
= NULL
;
391 (pager
->cpgr_num_slots
*
392 sizeof(pager
->cpgr_slots
.cpgr_dslots
[0])));
394 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
396 compressor_pager_slots_chunk_free(
398 pager
->cpgr_num_slots
,
403 compressor_pager_lock_destroy(pager
);
404 zfree(compressor_pager_zone
, pager
);
408 compressor_memory_object_data_request(
409 memory_object_t mem_obj
,
410 memory_object_offset_t offset
,
411 memory_object_cluster_size_t length
,
412 __unused vm_prot_t protection_required
,
413 __unused memory_object_fault_info_t fault_info
)
415 compressor_pager_t pager
;
417 compressor_slot_t
*slot_p
;
419 compressor_pager_stats
.data_requests
++;
422 * Request must be on a page boundary and a multiple of pages.
424 if ((offset
& PAGE_MASK
) != 0 || (length
& PAGE_MASK
) != 0) {
425 panic("compressor_memory_object_data_request(): bad alignment");
428 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
429 panic("%s: offset 0x%llx overflow\n",
430 __FUNCTION__
, (uint64_t) offset
);
434 compressor_pager_lookup(mem_obj
, pager
);
437 /* we're only querying the pager for this page */
439 panic("compressor: data_request");
442 /* find the compressor slot for that page */
443 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
445 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
448 } else if (slot_p
== NULL
|| *slot_p
== 0) {
449 /* compressor does not have this page */
452 /* compressor does have this page */
459 * memory_object_data_initialize: check whether we already have each page, and
460 * write it if we do not. The implementation is far from optimized, and
461 * also assumes that the default_pager is single-threaded.
463 /* It is questionable whether or not a pager should decide what is relevant */
464 /* and what is not in data sent from the kernel. Data initialize has been */
465 /* changed to copy back all data sent to it in preparation for its eventual */
466 /* merge with data return. It is the kernel that should decide what pages */
467 /* to write back. As of the writing of this note, this is indeed the case */
468 /* the kernel writes back one page at a time through this interface */
471 compressor_memory_object_data_initialize(
472 memory_object_t mem_obj
,
473 memory_object_offset_t offset
,
474 memory_object_cluster_size_t size
)
476 compressor_pager_t pager
;
477 memory_object_offset_t cur_offset
;
479 compressor_pager_lookup(mem_obj
, pager
);
480 compressor_pager_lock(pager
);
482 for (cur_offset
= offset
;
483 cur_offset
< offset
+ size
;
484 cur_offset
+= PAGE_SIZE
) {
485 panic("do a data_return() if slot for this page is empty");
488 compressor_pager_unlock(pager
);
494 compressor_memory_object_data_unlock(
495 __unused memory_object_t mem_obj
,
496 __unused memory_object_offset_t offset
,
497 __unused memory_object_size_t size
,
498 __unused vm_prot_t desired_access
)
500 panic("compressor_memory_object_data_unlock()");
507 compressor_memory_object_data_return(
508 __unused memory_object_t mem_obj
,
509 __unused memory_object_offset_t offset
,
510 __unused memory_object_cluster_size_t size
,
511 __unused memory_object_offset_t
*resid_offset
,
512 __unused
int *io_error
,
513 __unused boolean_t dirty
,
514 __unused boolean_t kernel_copy
,
515 __unused
int upl_flags
)
517 panic("compressor: data_return");
522 * Routine: default_pager_memory_object_create
524 * Handle requests for memory objects from the
527 * Because we only give out the default memory
528 * manager port to the kernel, we don't have to
529 * be so paranoid about the contents.
532 compressor_memory_object_create(
533 memory_object_size_t new_size
,
534 memory_object_t
*new_mem_obj
)
536 compressor_pager_t pager
;
539 if ((uint32_t)(new_size
/ PAGE_SIZE
) != (new_size
/ PAGE_SIZE
)) {
540 /* 32-bit overflow for number of pages */
541 panic("%s: size 0x%llx overflow\n",
542 __FUNCTION__
, (uint64_t) new_size
);
543 return KERN_INVALID_ARGUMENT
;
546 pager
= (compressor_pager_t
) zalloc(compressor_pager_zone
);
548 return KERN_RESOURCE_SHORTAGE
;
551 compressor_pager_lock_init(pager
);
552 pager
->cpgr_references
= 1;
553 pager
->cpgr_num_slots
= (uint32_t)(new_size
/ PAGE_SIZE
);
554 pager
->cpgr_num_slots_occupied
= 0;
556 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
557 if (num_chunks
> 1) {
558 pager
->cpgr_slots
.cpgr_islots
= kalloc(num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
559 bzero(pager
->cpgr_slots
.cpgr_islots
, num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
560 } else if (pager
->cpgr_num_slots
> 2) {
561 pager
->cpgr_slots
.cpgr_dslots
= kalloc(pager
->cpgr_num_slots
* sizeof(pager
->cpgr_slots
.cpgr_dslots
[0]));
562 bzero(pager
->cpgr_slots
.cpgr_dslots
, pager
->cpgr_num_slots
* sizeof(pager
->cpgr_slots
.cpgr_dslots
[0]));
564 pager
->cpgr_slots
.cpgr_eslots
[0] = 0;
565 pager
->cpgr_slots
.cpgr_eslots
[1] = 0;
569 * Set up associations between this memory object
570 * and this compressor_pager structure
572 pager
->cpgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
573 pager
->cpgr_hdr
.mo_pager_ops
= &compressor_pager_ops
;
574 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
576 *new_mem_obj
= (memory_object_t
) pager
;
582 compressor_pager_slots_chunk_free(
583 compressor_slot_t
*chunk
,
590 unsigned int num_slots_freed
;
596 for (i
= 0; i
< num_slots
; i
++) {
598 retval
= vm_compressor_free(&chunk
[i
], flags
);
604 assert(flags
& C_DONT_BLOCK
);
613 return num_slots_freed
;
617 compressor_pager_slot_lookup(
618 compressor_pager_t pager
,
620 memory_object_offset_t offset
,
621 compressor_slot_t
**slot_pp
)
627 compressor_slot_t
*chunk
;
628 compressor_slot_t
*t_chunk
;
630 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
631 if (page_num
!= (offset
/ PAGE_SIZE
)) {
633 panic("%s: offset 0x%llx overflow\n",
634 __FUNCTION__
, (uint64_t) offset
);
638 if (page_num
>= pager
->cpgr_num_slots
) {
643 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
644 if (num_chunks
> 1) {
645 /* we have an array of chunks */
646 chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
;
647 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
649 if (chunk
== NULL
&& do_alloc
) {
650 t_chunk
= kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE
);
651 bzero(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
653 compressor_pager_lock(pager
);
655 if ((chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
]) == NULL
) {
657 * On some platforms, the memory stores from
658 * the bzero(t_chunk) above might not have been
659 * made visible and another thread might see
660 * the contents of this new chunk before it's
661 * been fully zero-filled.
662 * This memory barrier should take care of this
663 * according to the platform requirements.
665 __c11_atomic_thread_fence(memory_order_release
);
667 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
] = t_chunk
;
670 compressor_pager_unlock(pager
);
673 kfree(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
679 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
680 *slot_pp
= &chunk
[slot_idx
];
682 } else if (pager
->cpgr_num_slots
> 2) {
684 *slot_pp
= &pager
->cpgr_slots
.cpgr_dslots
[slot_idx
];
687 *slot_pp
= &pager
->cpgr_slots
.cpgr_eslots
[slot_idx
];
692 vm_compressor_pager_init(void)
694 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr
);
695 lck_grp_init(&compressor_pager_lck_grp
, "compressor_pager", &compressor_pager_lck_grp_attr
);
696 lck_attr_setdefault(&compressor_pager_lck_attr
);
698 compressor_pager_zone
= zinit(sizeof(struct compressor_pager
),
699 10000 * sizeof(struct compressor_pager
),
700 8192, "compressor_pager");
701 zone_change(compressor_pager_zone
, Z_CALLERACCT
, FALSE
);
702 zone_change(compressor_pager_zone
, Z_NOENCRYPT
, TRUE
);
704 vm_compressor_init();
708 vm_compressor_pager_put(
709 memory_object_t mem_obj
,
710 memory_object_offset_t offset
,
712 void **current_chead
,
714 int *compressed_count_delta_p
)
716 compressor_pager_t pager
;
717 compressor_slot_t
*slot_p
;
719 compressor_pager_stats
.put
++;
721 *compressed_count_delta_p
= 0;
723 /* This routine is called by the pageout thread. The pageout thread */
724 /* cannot be blocked by read activities unless the read activities */
725 /* Therefore the grant of vs lock must be done on a try versus a */
726 /* blocking basis. The code below relies on the fact that the */
727 /* interface is synchronous. Should this interface be again async */
728 /* for some type of pager in the future the pages will have to be */
729 /* returned through a separate, asynchronous path. */
731 compressor_pager_lookup(mem_obj
, pager
);
733 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
735 panic("%s: offset 0x%llx overflow\n",
736 __FUNCTION__
, (uint64_t) offset
);
737 return KERN_RESOURCE_SHORTAGE
;
740 compressor_pager_slot_lookup(pager
, TRUE
, offset
, &slot_p
);
742 if (slot_p
== NULL
) {
744 panic("vm_compressor_pager_put: out of range");
748 * Already compressed: forget about the old one.
750 * This can happen after a vm_object_do_collapse() when
751 * the "backing_object" had some pages paged out and the
752 * "object" had an equivalent page resident.
754 vm_compressor_free(slot_p
, 0);
755 *compressed_count_delta_p
-= 1;
759 * If the compressor operation succeeds, we presumably don't need to
760 * undo any previous WIMG update, as all live mappings should be
764 if (vm_compressor_put(ppnum
, slot_p
, current_chead
, scratch_buf
)) {
765 return KERN_RESOURCE_SHORTAGE
;
767 *compressed_count_delta_p
+= 1;
774 vm_compressor_pager_get(
775 memory_object_t mem_obj
,
776 memory_object_offset_t offset
,
780 int *compressed_count_delta_p
)
782 compressor_pager_t pager
;
784 compressor_slot_t
*slot_p
;
786 compressor_pager_stats
.get
++;
788 *compressed_count_delta_p
= 0;
790 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
791 panic("%s: offset 0x%llx overflow\n",
792 __FUNCTION__
, (uint64_t) offset
);
793 return KERN_MEMORY_ERROR
;
796 compressor_pager_lookup(mem_obj
, pager
);
798 /* find the compressor slot for that page */
799 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
801 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
803 kr
= KERN_MEMORY_FAILURE
;
804 } else if (slot_p
== NULL
|| *slot_p
== 0) {
805 /* compressor does not have this page */
806 kr
= KERN_MEMORY_ERROR
;
808 /* compressor does have this page */
811 *my_fault_type
= DBG_COMPRESSOR_FAULT
;
813 if (kr
== KERN_SUCCESS
) {
816 /* get the page from the compressor */
817 retval
= vm_compressor_get(ppnum
, slot_p
, flags
);
819 kr
= KERN_MEMORY_FAILURE
;
820 } else if (retval
== 1) {
821 *my_fault_type
= DBG_COMPRESSOR_SWAPIN_FAULT
;
822 } else if (retval
== -2) {
823 assert((flags
& C_DONT_BLOCK
));
828 if (kr
== KERN_SUCCESS
) {
829 assert(slot_p
!= NULL
);
832 * We got the page for a copy-on-write fault
833 * and we kept the original in place. Slot
837 *compressed_count_delta_p
-= 1;
845 vm_compressor_pager_state_clr(
846 memory_object_t mem_obj
,
847 memory_object_offset_t offset
)
849 compressor_pager_t pager
;
850 compressor_slot_t
*slot_p
;
851 unsigned int num_slots_freed
;
853 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
855 compressor_pager_stats
.state_clr
++;
857 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
859 panic("%s: offset 0x%llx overflow\n",
860 __FUNCTION__
, (uint64_t) offset
);
864 compressor_pager_lookup(mem_obj
, pager
);
866 /* find the compressor slot for that page */
867 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
870 if (slot_p
&& *slot_p
!= 0) {
871 vm_compressor_free(slot_p
, 0);
873 assert(*slot_p
== 0);
876 return num_slots_freed
;
880 vm_compressor_pager_state_get(
881 memory_object_t mem_obj
,
882 memory_object_offset_t offset
)
884 compressor_pager_t pager
;
885 compressor_slot_t
*slot_p
;
887 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
889 compressor_pager_stats
.state_get
++;
891 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
893 panic("%s: offset 0x%llx overflow\n",
894 __FUNCTION__
, (uint64_t) offset
);
895 return VM_EXTERNAL_STATE_ABSENT
;
898 compressor_pager_lookup(mem_obj
, pager
);
900 /* find the compressor slot for that page */
901 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
903 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
905 return VM_EXTERNAL_STATE_ABSENT
;
906 } else if (slot_p
== NULL
|| *slot_p
== 0) {
907 /* compressor does not have this page */
908 return VM_EXTERNAL_STATE_ABSENT
;
910 /* compressor does have this page */
911 return VM_EXTERNAL_STATE_EXISTS
;
916 vm_compressor_pager_reap_pages(
917 memory_object_t mem_obj
,
920 compressor_pager_t pager
;
924 compressor_slot_t
*chunk
;
925 unsigned int num_slots_freed
;
927 compressor_pager_lookup(mem_obj
, pager
);
932 compressor_pager_lock(pager
);
934 /* reap the compressor slots */
937 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
938 if (num_chunks
> 1) {
939 /* we have an array of chunks */
940 for (i
= 0; i
< num_chunks
; i
++) {
941 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
944 compressor_pager_slots_chunk_free(
946 COMPRESSOR_SLOTS_PER_CHUNK
,
950 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
951 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
955 } else if (pager
->cpgr_num_slots
> 2) {
956 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
958 compressor_pager_slots_chunk_free(
960 pager
->cpgr_num_slots
,
964 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
966 compressor_pager_slots_chunk_free(
968 pager
->cpgr_num_slots
,
973 compressor_pager_unlock(pager
);
975 return num_slots_freed
;
979 vm_compressor_pager_transfer(
980 memory_object_t dst_mem_obj
,
981 memory_object_offset_t dst_offset
,
982 memory_object_t src_mem_obj
,
983 memory_object_offset_t src_offset
)
985 compressor_pager_t src_pager
, dst_pager
;
986 compressor_slot_t
*src_slot_p
, *dst_slot_p
;
988 compressor_pager_stats
.transfer
++;
990 /* find the compressor slot for the destination */
991 assert((uint32_t) dst_offset
== dst_offset
);
992 compressor_pager_lookup(dst_mem_obj
, dst_pager
);
993 assert(dst_offset
/ PAGE_SIZE
< dst_pager
->cpgr_num_slots
);
994 compressor_pager_slot_lookup(dst_pager
, TRUE
, (uint32_t) dst_offset
,
996 assert(dst_slot_p
!= NULL
);
997 assert(*dst_slot_p
== 0);
999 /* find the compressor slot for the source */
1000 assert((uint32_t) src_offset
== src_offset
);
1001 compressor_pager_lookup(src_mem_obj
, src_pager
);
1002 assert(src_offset
/ PAGE_SIZE
< src_pager
->cpgr_num_slots
);
1003 compressor_pager_slot_lookup(src_pager
, FALSE
, (uint32_t) src_offset
,
1005 assert(src_slot_p
!= NULL
);
1006 assert(*src_slot_p
!= 0);
1008 /* transfer the slot from source to destination */
1009 vm_compressor_transfer(dst_slot_p
, src_slot_p
);
1010 OSAddAtomic(-1, &src_pager
->cpgr_num_slots_occupied
);
1011 OSAddAtomic(+1, &dst_pager
->cpgr_num_slots_occupied
);
1014 memory_object_offset_t
1015 vm_compressor_pager_next_compressed(
1016 memory_object_t mem_obj
,
1017 memory_object_offset_t offset
)
1019 compressor_pager_t pager
;
1020 uint32_t num_chunks
;
1024 compressor_slot_t
*chunk
;
1026 compressor_pager_lookup(mem_obj
, pager
);
1028 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
1029 if (page_num
!= (offset
/ PAGE_SIZE
)) {
1031 return (memory_object_offset_t
) -1;
1033 if (page_num
>= pager
->cpgr_num_slots
) {
1035 return (memory_object_offset_t
) -1;
1038 num_chunks
= ((pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) /
1039 COMPRESSOR_SLOTS_PER_CHUNK
);
1041 if (num_chunks
== 1) {
1042 if (pager
->cpgr_num_slots
> 2) {
1043 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
1045 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
1047 for (slot_idx
= page_num
;
1048 slot_idx
< pager
->cpgr_num_slots
;
1050 if (chunk
[slot_idx
] != 0) {
1051 /* found a non-NULL slot in this chunk */
1052 return (memory_object_offset_t
) (slot_idx
*
1056 return (memory_object_offset_t
) -1;
1059 /* we have an array of chunks; find the next non-NULL chunk */
1061 for (chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
,
1062 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
1063 chunk_idx
< num_chunks
;
1066 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
1067 if (chunk
== NULL
) {
1068 /* no chunk here: try the next one */
1071 /* search for an occupied slot in this chunk */
1073 slot_idx
< COMPRESSOR_SLOTS_PER_CHUNK
;
1075 if (chunk
[slot_idx
] != 0) {
1076 /* found an occupied slot in this chunk */
1079 next_slot
= ((chunk_idx
*
1080 COMPRESSOR_SLOTS_PER_CHUNK
) +
1082 if (next_slot
>= pager
->cpgr_num_slots
) {
1083 /* went beyond end of object */
1084 return (memory_object_offset_t
) -1;
1086 return (memory_object_offset_t
) (next_slot
*
1091 return (memory_object_offset_t
) -1;
1095 vm_compressor_pager_get_count(
1096 memory_object_t mem_obj
)
1098 compressor_pager_t pager
;
1100 compressor_pager_lookup(mem_obj
, pager
);
1101 if (pager
== NULL
) {
1106 * The caller should have the VM object locked and one
1107 * needs that lock to do a page-in or page-out, so no
1108 * need to lock the pager here.
1110 assert(pager
->cpgr_num_slots_occupied
>= 0);
1112 return pager
->cpgr_num_slots_occupied
;
1116 vm_compressor_pager_count(
1117 memory_object_t mem_obj
,
1118 int compressed_count_delta
,
1119 boolean_t shared_lock
,
1120 vm_object_t object __unused
)
1122 compressor_pager_t pager
;
1124 if (compressed_count_delta
== 0) {
1128 compressor_pager_lookup(mem_obj
, pager
);
1129 if (pager
== NULL
) {
1133 if (compressed_count_delta
< 0) {
1134 assert(pager
->cpgr_num_slots_occupied
>=
1135 (unsigned int) -compressed_count_delta
);
1139 * The caller should have the VM object locked,
1140 * shared or exclusive.
1143 vm_object_lock_assert_shared(object
);
1144 OSAddAtomic(compressed_count_delta
,
1145 &pager
->cpgr_num_slots_occupied
);
1147 vm_object_lock_assert_exclusive(object
);
1148 pager
->cpgr_num_slots_occupied
+= compressed_count_delta
;
1154 vm_compressor_pager_relocate(
1155 memory_object_t mem_obj
,
1156 memory_object_offset_t offset
,
1157 void **current_chead
)
1160 * Has the page at this offset been compressed?
1163 compressor_slot_t
*slot_p
;
1164 compressor_pager_t dst_pager
;
1168 compressor_pager_lookup(mem_obj
, dst_pager
);
1169 if (dst_pager
== NULL
) {
1170 return KERN_FAILURE
;
1173 compressor_pager_slot_lookup(dst_pager
, FALSE
, offset
, &slot_p
);
1174 return vm_compressor_relocate(current_chead
, slot_p
);
1176 #endif /* CONFIG_FREEZE */