2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
66 #include <machine/atomic.h>
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_protos.h>
78 /* memory_object interfaces */
79 void compressor_memory_object_reference(memory_object_t mem_obj
);
80 void compressor_memory_object_deallocate(memory_object_t mem_obj
);
81 kern_return_t
compressor_memory_object_init(
82 memory_object_t mem_obj
,
83 memory_object_control_t control
,
84 memory_object_cluster_size_t pager_page_size
);
85 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj
);
86 kern_return_t
compressor_memory_object_data_request(
87 memory_object_t mem_obj
,
88 memory_object_offset_t offset
,
89 memory_object_cluster_size_t length
,
90 __unused vm_prot_t protection_required
,
91 memory_object_fault_info_t fault_info
);
92 kern_return_t
compressor_memory_object_data_return(
93 memory_object_t mem_obj
,
94 memory_object_offset_t offset
,
95 memory_object_cluster_size_t size
,
96 __unused memory_object_offset_t
*resid_offset
,
97 __unused
int *io_error
,
98 __unused boolean_t dirty
,
99 __unused boolean_t kernel_copy
,
100 __unused
int upl_flags
);
101 kern_return_t
compressor_memory_object_data_initialize(
102 memory_object_t mem_obj
,
103 memory_object_offset_t offset
,
104 memory_object_cluster_size_t size
);
105 kern_return_t
compressor_memory_object_data_unlock(
106 __unused memory_object_t mem_obj
,
107 __unused memory_object_offset_t offset
,
108 __unused memory_object_size_t size
,
109 __unused vm_prot_t desired_access
);
110 kern_return_t
compressor_memory_object_synchronize(
111 memory_object_t mem_obj
,
112 memory_object_offset_t offset
,
113 memory_object_size_t length
,
114 __unused vm_sync_t flags
);
115 kern_return_t
compressor_memory_object_map(
116 __unused memory_object_t mem_obj
,
117 __unused vm_prot_t prot
);
118 kern_return_t
compressor_memory_object_last_unmap(memory_object_t mem_obj
);
119 kern_return_t
compressor_memory_object_data_reclaim(
120 __unused memory_object_t mem_obj
,
121 __unused boolean_t reclaim_backing_store
);
123 const struct memory_object_pager_ops compressor_pager_ops
= {
124 .memory_object_reference
= compressor_memory_object_reference
,
125 .memory_object_deallocate
= compressor_memory_object_deallocate
,
126 .memory_object_init
= compressor_memory_object_init
,
127 .memory_object_terminate
= compressor_memory_object_terminate
,
128 .memory_object_data_request
= compressor_memory_object_data_request
,
129 .memory_object_data_return
= compressor_memory_object_data_return
,
130 .memory_object_data_initialize
= compressor_memory_object_data_initialize
,
131 .memory_object_data_unlock
= compressor_memory_object_data_unlock
,
132 .memory_object_synchronize
= compressor_memory_object_synchronize
,
133 .memory_object_map
= compressor_memory_object_map
,
134 .memory_object_last_unmap
= compressor_memory_object_last_unmap
,
135 .memory_object_data_reclaim
= compressor_memory_object_data_reclaim
,
136 .memory_object_pager_name
= "compressor pager"
139 /* internal data structures */
142 uint64_t data_returns
;
143 uint64_t data_requests
;
149 } compressor_pager_stats
;
151 typedef int compressor_slot_t
;
153 typedef struct compressor_pager
{
154 /* mandatory generic header */
155 struct memory_object cpgr_hdr
;
157 /* pager-specific data */
159 unsigned int cpgr_references
;
160 unsigned int cpgr_num_slots
;
161 unsigned int cpgr_num_slots_occupied
;
163 compressor_slot_t cpgr_eslots
[2]; /* embedded slots */
164 compressor_slot_t
*cpgr_dslots
; /* direct slots */
165 compressor_slot_t
**cpgr_islots
; /* indirect slots */
167 } *compressor_pager_t
;
169 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
171 if (_mem_obj_ == NULL || \
172 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
175 _cpgr_ = (compressor_pager_t) _mem_obj_; \
179 zone_t compressor_pager_zone
;
181 lck_grp_t compressor_pager_lck_grp
;
182 lck_grp_attr_t compressor_pager_lck_grp_attr
;
183 lck_attr_t compressor_pager_lck_attr
;
185 #define compressor_pager_lock(_cpgr_) \
186 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_unlock(_cpgr_) \
188 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
189 #define compressor_pager_lock_init(_cpgr_) \
190 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
191 #define compressor_pager_lock_destroy(_cpgr_) \
192 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
194 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
195 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
197 /* forward declarations */
198 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t
*chunk
,
202 void compressor_pager_slot_lookup(
203 compressor_pager_t pager
,
205 memory_object_offset_t offset
,
206 compressor_slot_t
**slot_pp
);
209 compressor_memory_object_init(
210 memory_object_t mem_obj
,
211 memory_object_control_t control
,
212 __unused memory_object_cluster_size_t pager_page_size
)
214 compressor_pager_t pager
;
216 assert(pager_page_size
== PAGE_SIZE
);
218 memory_object_control_reference(control
);
220 compressor_pager_lookup(mem_obj
, pager
);
221 compressor_pager_lock(pager
);
223 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
224 panic("compressor_memory_object_init: bad request");
226 pager
->cpgr_hdr
.mo_control
= control
;
228 compressor_pager_unlock(pager
);
234 compressor_memory_object_synchronize(
235 __unused memory_object_t mem_obj
,
236 __unused memory_object_offset_t offset
,
237 __unused memory_object_size_t length
,
238 __unused vm_sync_t flags
)
240 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
245 compressor_memory_object_map(
246 __unused memory_object_t mem_obj
,
247 __unused vm_prot_t prot
)
249 panic("compressor_memory_object_map");
254 compressor_memory_object_last_unmap(
255 __unused memory_object_t mem_obj
)
257 panic("compressor_memory_object_last_unmap");
262 compressor_memory_object_data_reclaim(
263 __unused memory_object_t mem_obj
,
264 __unused boolean_t reclaim_backing_store
)
266 panic("compressor_memory_object_data_reclaim");
271 compressor_memory_object_terminate(
272 memory_object_t mem_obj
)
274 memory_object_control_t control
;
275 compressor_pager_t pager
;
278 * control port is a receive right, not a send right.
281 compressor_pager_lookup(mem_obj
, pager
);
282 compressor_pager_lock(pager
);
285 * After memory_object_terminate both memory_object_init
286 * and a no-senders notification are possible, so we need
287 * to clean up our reference to the memory_object_control
288 * to prepare for a new init.
291 control
= pager
->cpgr_hdr
.mo_control
;
292 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
294 compressor_pager_unlock(pager
);
297 * Now we deallocate our reference on the control.
299 memory_object_control_deallocate(control
);
304 compressor_memory_object_reference(
305 memory_object_t mem_obj
)
307 compressor_pager_t pager
;
309 compressor_pager_lookup(mem_obj
, pager
);
314 compressor_pager_lock(pager
);
315 assert(pager
->cpgr_references
> 0);
316 pager
->cpgr_references
++;
317 compressor_pager_unlock(pager
);
321 compressor_memory_object_deallocate(
322 memory_object_t mem_obj
)
324 compressor_pager_t pager
;
325 unsigned int num_slots_freed
;
328 * Because we don't give out multiple first references
329 * for a memory object, there can't be a race
330 * between getting a deallocate call and creating
331 * a new reference for the object.
334 compressor_pager_lookup(mem_obj
, pager
);
339 compressor_pager_lock(pager
);
340 if (--pager
->cpgr_references
> 0) {
341 compressor_pager_unlock(pager
);
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
349 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
350 panic("compressor_memory_object_deallocate(): bad request");
354 * Unlock the pager (though there should be no one
357 compressor_pager_unlock(pager
);
359 /* free the compressor slots */
362 compressor_slot_t
*chunk
;
364 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
365 if (num_chunks
> 1) {
366 /* we have an array of chunks */
367 for (i
= 0; i
< num_chunks
; i
++) {
368 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
371 compressor_pager_slots_chunk_free(
373 COMPRESSOR_SLOTS_PER_CHUNK
,
376 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
377 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
380 kfree(pager
->cpgr_slots
.cpgr_islots
,
381 num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
382 pager
->cpgr_slots
.cpgr_islots
= NULL
;
383 } else if (pager
->cpgr_num_slots
> 2) {
384 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
386 compressor_pager_slots_chunk_free(
388 pager
->cpgr_num_slots
,
391 pager
->cpgr_slots
.cpgr_dslots
= NULL
;
393 (pager
->cpgr_num_slots
*
394 sizeof(pager
->cpgr_slots
.cpgr_dslots
[0])));
396 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
398 compressor_pager_slots_chunk_free(
400 pager
->cpgr_num_slots
,
405 compressor_pager_lock_destroy(pager
);
406 zfree(compressor_pager_zone
, pager
);
410 compressor_memory_object_data_request(
411 memory_object_t mem_obj
,
412 memory_object_offset_t offset
,
413 memory_object_cluster_size_t length
,
414 __unused vm_prot_t protection_required
,
415 __unused memory_object_fault_info_t fault_info
)
417 compressor_pager_t pager
;
419 compressor_slot_t
*slot_p
;
421 compressor_pager_stats
.data_requests
++;
424 * Request must be on a page boundary and a multiple of pages.
426 if ((offset
& PAGE_MASK
) != 0 || (length
& PAGE_MASK
) != 0) {
427 panic("compressor_memory_object_data_request(): bad alignment");
430 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
431 panic("%s: offset 0x%llx overflow\n",
432 __FUNCTION__
, (uint64_t) offset
);
436 compressor_pager_lookup(mem_obj
, pager
);
439 /* we're only querying the pager for this page */
441 panic("compressor: data_request");
444 /* find the compressor slot for that page */
445 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
447 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
450 } else if (slot_p
== NULL
|| *slot_p
== 0) {
451 /* compressor does not have this page */
454 /* compressor does have this page */
461 * memory_object_data_initialize: check whether we already have each page, and
462 * write it if we do not. The implementation is far from optimized, and
463 * also assumes that the default_pager is single-threaded.
465 /* It is questionable whether or not a pager should decide what is relevant */
466 /* and what is not in data sent from the kernel. Data initialize has been */
467 /* changed to copy back all data sent to it in preparation for its eventual */
468 /* merge with data return. It is the kernel that should decide what pages */
469 /* to write back. As of the writing of this note, this is indeed the case */
470 /* the kernel writes back one page at a time through this interface */
473 compressor_memory_object_data_initialize(
474 memory_object_t mem_obj
,
475 memory_object_offset_t offset
,
476 memory_object_cluster_size_t size
)
478 compressor_pager_t pager
;
479 memory_object_offset_t cur_offset
;
481 compressor_pager_lookup(mem_obj
, pager
);
482 compressor_pager_lock(pager
);
484 for (cur_offset
= offset
;
485 cur_offset
< offset
+ size
;
486 cur_offset
+= PAGE_SIZE
) {
487 panic("do a data_return() if slot for this page is empty");
490 compressor_pager_unlock(pager
);
496 compressor_memory_object_data_unlock(
497 __unused memory_object_t mem_obj
,
498 __unused memory_object_offset_t offset
,
499 __unused memory_object_size_t size
,
500 __unused vm_prot_t desired_access
)
502 panic("compressor_memory_object_data_unlock()");
509 compressor_memory_object_data_return(
510 __unused memory_object_t mem_obj
,
511 __unused memory_object_offset_t offset
,
512 __unused memory_object_cluster_size_t size
,
513 __unused memory_object_offset_t
*resid_offset
,
514 __unused
int *io_error
,
515 __unused boolean_t dirty
,
516 __unused boolean_t kernel_copy
,
517 __unused
int upl_flags
)
519 panic("compressor: data_return");
524 * Routine: default_pager_memory_object_create
526 * Handle requests for memory objects from the
529 * Because we only give out the default memory
530 * manager port to the kernel, we don't have to
531 * be so paranoid about the contents.
534 compressor_memory_object_create(
535 memory_object_size_t new_size
,
536 memory_object_t
*new_mem_obj
)
538 compressor_pager_t pager
;
541 if ((uint32_t)(new_size
/ PAGE_SIZE
) != (new_size
/ PAGE_SIZE
)) {
542 /* 32-bit overflow for number of pages */
543 panic("%s: size 0x%llx overflow\n",
544 __FUNCTION__
, (uint64_t) new_size
);
545 return KERN_INVALID_ARGUMENT
;
548 pager
= (compressor_pager_t
) zalloc(compressor_pager_zone
);
550 return KERN_RESOURCE_SHORTAGE
;
553 compressor_pager_lock_init(pager
);
554 pager
->cpgr_references
= 1;
555 pager
->cpgr_num_slots
= (uint32_t)(new_size
/ PAGE_SIZE
);
556 pager
->cpgr_num_slots_occupied
= 0;
558 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
559 if (num_chunks
> 1) {
560 pager
->cpgr_slots
.cpgr_islots
= kalloc(num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
561 bzero(pager
->cpgr_slots
.cpgr_islots
, num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
562 } else if (pager
->cpgr_num_slots
> 2) {
563 pager
->cpgr_slots
.cpgr_dslots
= kalloc(pager
->cpgr_num_slots
* sizeof(pager
->cpgr_slots
.cpgr_dslots
[0]));
564 bzero(pager
->cpgr_slots
.cpgr_dslots
, pager
->cpgr_num_slots
* sizeof(pager
->cpgr_slots
.cpgr_dslots
[0]));
566 pager
->cpgr_slots
.cpgr_eslots
[0] = 0;
567 pager
->cpgr_slots
.cpgr_eslots
[1] = 0;
571 * Set up associations between this memory object
572 * and this compressor_pager structure
574 pager
->cpgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
575 pager
->cpgr_hdr
.mo_pager_ops
= &compressor_pager_ops
;
576 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
578 *new_mem_obj
= (memory_object_t
) pager
;
584 compressor_pager_slots_chunk_free(
585 compressor_slot_t
*chunk
,
592 unsigned int num_slots_freed
;
598 for (i
= 0; i
< num_slots
; i
++) {
600 retval
= vm_compressor_free(&chunk
[i
], flags
);
606 assert(flags
& C_DONT_BLOCK
);
615 return num_slots_freed
;
619 compressor_pager_slot_lookup(
620 compressor_pager_t pager
,
622 memory_object_offset_t offset
,
623 compressor_slot_t
**slot_pp
)
629 compressor_slot_t
*chunk
;
630 compressor_slot_t
*t_chunk
;
632 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
633 if (page_num
!= (offset
/ PAGE_SIZE
)) {
635 panic("%s: offset 0x%llx overflow\n",
636 __FUNCTION__
, (uint64_t) offset
);
640 if (page_num
>= pager
->cpgr_num_slots
) {
645 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
646 if (num_chunks
> 1) {
647 /* we have an array of chunks */
648 chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
;
649 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
651 if (chunk
== NULL
&& do_alloc
) {
652 t_chunk
= kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE
);
653 bzero(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
655 compressor_pager_lock(pager
);
657 if ((chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
]) == NULL
) {
659 * On some platforms, the memory stores from
660 * the bzero(t_chunk) above might not have been
661 * made visible and another thread might see
662 * the contents of this new chunk before it's
663 * been fully zero-filled.
664 * This memory barrier should take care of this
665 * according to the platform requirements.
667 os_atomic_thread_fence(release
);
669 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
] = t_chunk
;
672 compressor_pager_unlock(pager
);
675 kfree(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
681 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
682 *slot_pp
= &chunk
[slot_idx
];
684 } else if (pager
->cpgr_num_slots
> 2) {
686 *slot_pp
= &pager
->cpgr_slots
.cpgr_dslots
[slot_idx
];
689 *slot_pp
= &pager
->cpgr_slots
.cpgr_eslots
[slot_idx
];
694 vm_compressor_pager_init(void)
696 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr
);
697 lck_grp_init(&compressor_pager_lck_grp
, "compressor_pager", &compressor_pager_lck_grp_attr
);
698 lck_attr_setdefault(&compressor_pager_lck_attr
);
700 compressor_pager_zone
= zinit(sizeof(struct compressor_pager
),
701 10000 * sizeof(struct compressor_pager
),
702 8192, "compressor_pager");
703 zone_change(compressor_pager_zone
, Z_CALLERACCT
, FALSE
);
704 zone_change(compressor_pager_zone
, Z_NOENCRYPT
, TRUE
);
706 vm_compressor_init();
710 vm_compressor_pager_put(
711 memory_object_t mem_obj
,
712 memory_object_offset_t offset
,
714 void **current_chead
,
716 int *compressed_count_delta_p
)
718 compressor_pager_t pager
;
719 compressor_slot_t
*slot_p
;
721 compressor_pager_stats
.put
++;
723 *compressed_count_delta_p
= 0;
725 /* This routine is called by the pageout thread. The pageout thread */
726 /* cannot be blocked by read activities unless the read activities */
727 /* Therefore the grant of vs lock must be done on a try versus a */
728 /* blocking basis. The code below relies on the fact that the */
729 /* interface is synchronous. Should this interface be again async */
730 /* for some type of pager in the future the pages will have to be */
731 /* returned through a separate, asynchronous path. */
733 compressor_pager_lookup(mem_obj
, pager
);
735 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
737 panic("%s: offset 0x%llx overflow\n",
738 __FUNCTION__
, (uint64_t) offset
);
739 return KERN_RESOURCE_SHORTAGE
;
742 compressor_pager_slot_lookup(pager
, TRUE
, offset
, &slot_p
);
744 if (slot_p
== NULL
) {
746 panic("vm_compressor_pager_put: out of range");
750 * Already compressed: forget about the old one.
752 * This can happen after a vm_object_do_collapse() when
753 * the "backing_object" had some pages paged out and the
754 * "object" had an equivalent page resident.
756 vm_compressor_free(slot_p
, 0);
757 *compressed_count_delta_p
-= 1;
761 * If the compressor operation succeeds, we presumably don't need to
762 * undo any previous WIMG update, as all live mappings should be
766 if (vm_compressor_put(ppnum
, slot_p
, current_chead
, scratch_buf
)) {
767 return KERN_RESOURCE_SHORTAGE
;
769 *compressed_count_delta_p
+= 1;
776 vm_compressor_pager_get(
777 memory_object_t mem_obj
,
778 memory_object_offset_t offset
,
782 int *compressed_count_delta_p
)
784 compressor_pager_t pager
;
786 compressor_slot_t
*slot_p
;
788 compressor_pager_stats
.get
++;
790 *compressed_count_delta_p
= 0;
792 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
793 panic("%s: offset 0x%llx overflow\n",
794 __FUNCTION__
, (uint64_t) offset
);
795 return KERN_MEMORY_ERROR
;
798 compressor_pager_lookup(mem_obj
, pager
);
800 /* find the compressor slot for that page */
801 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
803 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
805 kr
= KERN_MEMORY_FAILURE
;
806 } else if (slot_p
== NULL
|| *slot_p
== 0) {
807 /* compressor does not have this page */
808 kr
= KERN_MEMORY_ERROR
;
810 /* compressor does have this page */
813 *my_fault_type
= DBG_COMPRESSOR_FAULT
;
815 if (kr
== KERN_SUCCESS
) {
818 /* get the page from the compressor */
819 retval
= vm_compressor_get(ppnum
, slot_p
, flags
);
821 kr
= KERN_MEMORY_FAILURE
;
822 } else if (retval
== 1) {
823 *my_fault_type
= DBG_COMPRESSOR_SWAPIN_FAULT
;
824 } else if (retval
== -2) {
825 assert((flags
& C_DONT_BLOCK
));
830 if (kr
== KERN_SUCCESS
) {
831 assert(slot_p
!= NULL
);
834 * We got the page for a copy-on-write fault
835 * and we kept the original in place. Slot
839 *compressed_count_delta_p
-= 1;
847 vm_compressor_pager_state_clr(
848 memory_object_t mem_obj
,
849 memory_object_offset_t offset
)
851 compressor_pager_t pager
;
852 compressor_slot_t
*slot_p
;
853 unsigned int num_slots_freed
;
855 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
857 compressor_pager_stats
.state_clr
++;
859 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
861 panic("%s: offset 0x%llx overflow\n",
862 __FUNCTION__
, (uint64_t) offset
);
866 compressor_pager_lookup(mem_obj
, pager
);
868 /* find the compressor slot for that page */
869 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
872 if (slot_p
&& *slot_p
!= 0) {
873 vm_compressor_free(slot_p
, 0);
875 assert(*slot_p
== 0);
878 return num_slots_freed
;
882 vm_compressor_pager_state_get(
883 memory_object_t mem_obj
,
884 memory_object_offset_t offset
)
886 compressor_pager_t pager
;
887 compressor_slot_t
*slot_p
;
889 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
891 compressor_pager_stats
.state_get
++;
893 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
895 panic("%s: offset 0x%llx overflow\n",
896 __FUNCTION__
, (uint64_t) offset
);
897 return VM_EXTERNAL_STATE_ABSENT
;
900 compressor_pager_lookup(mem_obj
, pager
);
902 /* find the compressor slot for that page */
903 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
905 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
907 return VM_EXTERNAL_STATE_ABSENT
;
908 } else if (slot_p
== NULL
|| *slot_p
== 0) {
909 /* compressor does not have this page */
910 return VM_EXTERNAL_STATE_ABSENT
;
912 /* compressor does have this page */
913 return VM_EXTERNAL_STATE_EXISTS
;
918 vm_compressor_pager_reap_pages(
919 memory_object_t mem_obj
,
922 compressor_pager_t pager
;
926 compressor_slot_t
*chunk
;
927 unsigned int num_slots_freed
;
929 compressor_pager_lookup(mem_obj
, pager
);
934 compressor_pager_lock(pager
);
936 /* reap the compressor slots */
939 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
940 if (num_chunks
> 1) {
941 /* we have an array of chunks */
942 for (i
= 0; i
< num_chunks
; i
++) {
943 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
946 compressor_pager_slots_chunk_free(
948 COMPRESSOR_SLOTS_PER_CHUNK
,
952 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
953 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
957 } else if (pager
->cpgr_num_slots
> 2) {
958 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
960 compressor_pager_slots_chunk_free(
962 pager
->cpgr_num_slots
,
966 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
968 compressor_pager_slots_chunk_free(
970 pager
->cpgr_num_slots
,
975 compressor_pager_unlock(pager
);
977 return num_slots_freed
;
981 vm_compressor_pager_transfer(
982 memory_object_t dst_mem_obj
,
983 memory_object_offset_t dst_offset
,
984 memory_object_t src_mem_obj
,
985 memory_object_offset_t src_offset
)
987 compressor_pager_t src_pager
, dst_pager
;
988 compressor_slot_t
*src_slot_p
, *dst_slot_p
;
990 compressor_pager_stats
.transfer
++;
992 /* find the compressor slot for the destination */
993 assert((uint32_t) dst_offset
== dst_offset
);
994 compressor_pager_lookup(dst_mem_obj
, dst_pager
);
995 assert(dst_offset
/ PAGE_SIZE
< dst_pager
->cpgr_num_slots
);
996 compressor_pager_slot_lookup(dst_pager
, TRUE
, (uint32_t) dst_offset
,
998 assert(dst_slot_p
!= NULL
);
999 assert(*dst_slot_p
== 0);
1001 /* find the compressor slot for the source */
1002 assert((uint32_t) src_offset
== src_offset
);
1003 compressor_pager_lookup(src_mem_obj
, src_pager
);
1004 assert(src_offset
/ PAGE_SIZE
< src_pager
->cpgr_num_slots
);
1005 compressor_pager_slot_lookup(src_pager
, FALSE
, (uint32_t) src_offset
,
1007 assert(src_slot_p
!= NULL
);
1008 assert(*src_slot_p
!= 0);
1010 /* transfer the slot from source to destination */
1011 vm_compressor_transfer(dst_slot_p
, src_slot_p
);
1012 OSAddAtomic(-1, &src_pager
->cpgr_num_slots_occupied
);
1013 OSAddAtomic(+1, &dst_pager
->cpgr_num_slots_occupied
);
1016 memory_object_offset_t
1017 vm_compressor_pager_next_compressed(
1018 memory_object_t mem_obj
,
1019 memory_object_offset_t offset
)
1021 compressor_pager_t pager
;
1022 uint32_t num_chunks
;
1026 compressor_slot_t
*chunk
;
1028 compressor_pager_lookup(mem_obj
, pager
);
1030 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
1031 if (page_num
!= (offset
/ PAGE_SIZE
)) {
1033 return (memory_object_offset_t
) -1;
1035 if (page_num
>= pager
->cpgr_num_slots
) {
1037 return (memory_object_offset_t
) -1;
1040 num_chunks
= ((pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) /
1041 COMPRESSOR_SLOTS_PER_CHUNK
);
1043 if (num_chunks
== 1) {
1044 if (pager
->cpgr_num_slots
> 2) {
1045 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
1047 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
1049 for (slot_idx
= page_num
;
1050 slot_idx
< pager
->cpgr_num_slots
;
1052 if (chunk
[slot_idx
] != 0) {
1053 /* found a non-NULL slot in this chunk */
1054 return (memory_object_offset_t
) (slot_idx
*
1058 return (memory_object_offset_t
) -1;
1061 /* we have an array of chunks; find the next non-NULL chunk */
1063 for (chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
,
1064 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
1065 chunk_idx
< num_chunks
;
1068 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
1069 if (chunk
== NULL
) {
1070 /* no chunk here: try the next one */
1073 /* search for an occupied slot in this chunk */
1075 slot_idx
< COMPRESSOR_SLOTS_PER_CHUNK
;
1077 if (chunk
[slot_idx
] != 0) {
1078 /* found an occupied slot in this chunk */
1081 next_slot
= ((chunk_idx
*
1082 COMPRESSOR_SLOTS_PER_CHUNK
) +
1084 if (next_slot
>= pager
->cpgr_num_slots
) {
1085 /* went beyond end of object */
1086 return (memory_object_offset_t
) -1;
1088 return (memory_object_offset_t
) (next_slot
*
1093 return (memory_object_offset_t
) -1;
1097 vm_compressor_pager_get_count(
1098 memory_object_t mem_obj
)
1100 compressor_pager_t pager
;
1102 compressor_pager_lookup(mem_obj
, pager
);
1103 if (pager
== NULL
) {
1108 * The caller should have the VM object locked and one
1109 * needs that lock to do a page-in or page-out, so no
1110 * need to lock the pager here.
1112 assert(pager
->cpgr_num_slots_occupied
>= 0);
1114 return pager
->cpgr_num_slots_occupied
;
1118 vm_compressor_pager_count(
1119 memory_object_t mem_obj
,
1120 int compressed_count_delta
,
1121 boolean_t shared_lock
,
1122 vm_object_t object __unused
)
1124 compressor_pager_t pager
;
1126 if (compressed_count_delta
== 0) {
1130 compressor_pager_lookup(mem_obj
, pager
);
1131 if (pager
== NULL
) {
1135 if (compressed_count_delta
< 0) {
1136 assert(pager
->cpgr_num_slots_occupied
>=
1137 (unsigned int) -compressed_count_delta
);
1141 * The caller should have the VM object locked,
1142 * shared or exclusive.
1145 vm_object_lock_assert_shared(object
);
1146 OSAddAtomic(compressed_count_delta
,
1147 &pager
->cpgr_num_slots_occupied
);
1149 vm_object_lock_assert_exclusive(object
);
1150 pager
->cpgr_num_slots_occupied
+= compressed_count_delta
;
1156 vm_compressor_pager_relocate(
1157 memory_object_t mem_obj
,
1158 memory_object_offset_t offset
,
1159 void **current_chead
)
1162 * Has the page at this offset been compressed?
1165 compressor_slot_t
*slot_p
;
1166 compressor_pager_t dst_pager
;
1170 compressor_pager_lookup(mem_obj
, dst_pager
);
1171 if (dst_pager
== NULL
) {
1172 return KERN_FAILURE
;
1175 compressor_pager_slot_lookup(dst_pager
, FALSE
, offset
, &slot_p
);
1176 return vm_compressor_relocate(current_chead
, slot_p
);
1178 #endif /* CONFIG_FREEZE */