2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
64 #include <kern/ipc_kobject.h>
66 #include <machine/atomic.h>
68 #include <mach/memory_object_control.h>
69 #include <mach/memory_object_types.h>
72 #include <vm/memory_object.h>
73 #include <vm/vm_compressor_pager.h>
74 #include <vm/vm_external.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_protos.h>
78 /* memory_object interfaces */
79 void compressor_memory_object_reference(memory_object_t mem_obj
);
80 void compressor_memory_object_deallocate(memory_object_t mem_obj
);
81 kern_return_t
compressor_memory_object_init(
82 memory_object_t mem_obj
,
83 memory_object_control_t control
,
84 memory_object_cluster_size_t pager_page_size
);
85 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj
);
86 kern_return_t
compressor_memory_object_data_request(
87 memory_object_t mem_obj
,
88 memory_object_offset_t offset
,
89 memory_object_cluster_size_t length
,
90 __unused vm_prot_t protection_required
,
91 memory_object_fault_info_t fault_info
);
92 kern_return_t
compressor_memory_object_data_return(
93 memory_object_t mem_obj
,
94 memory_object_offset_t offset
,
95 memory_object_cluster_size_t size
,
96 __unused memory_object_offset_t
*resid_offset
,
97 __unused
int *io_error
,
98 __unused boolean_t dirty
,
99 __unused boolean_t kernel_copy
,
100 __unused
int upl_flags
);
101 kern_return_t
compressor_memory_object_data_initialize(
102 memory_object_t mem_obj
,
103 memory_object_offset_t offset
,
104 memory_object_cluster_size_t size
);
105 kern_return_t
compressor_memory_object_data_unlock(
106 __unused memory_object_t mem_obj
,
107 __unused memory_object_offset_t offset
,
108 __unused memory_object_size_t size
,
109 __unused vm_prot_t desired_access
);
110 kern_return_t
compressor_memory_object_synchronize(
111 memory_object_t mem_obj
,
112 memory_object_offset_t offset
,
113 memory_object_size_t length
,
114 __unused vm_sync_t flags
);
115 kern_return_t
compressor_memory_object_map(
116 __unused memory_object_t mem_obj
,
117 __unused vm_prot_t prot
);
118 kern_return_t
compressor_memory_object_last_unmap(memory_object_t mem_obj
);
119 kern_return_t
compressor_memory_object_data_reclaim(
120 __unused memory_object_t mem_obj
,
121 __unused boolean_t reclaim_backing_store
);
123 const struct memory_object_pager_ops compressor_pager_ops
= {
124 .memory_object_reference
= compressor_memory_object_reference
,
125 .memory_object_deallocate
= compressor_memory_object_deallocate
,
126 .memory_object_init
= compressor_memory_object_init
,
127 .memory_object_terminate
= compressor_memory_object_terminate
,
128 .memory_object_data_request
= compressor_memory_object_data_request
,
129 .memory_object_data_return
= compressor_memory_object_data_return
,
130 .memory_object_data_initialize
= compressor_memory_object_data_initialize
,
131 .memory_object_data_unlock
= compressor_memory_object_data_unlock
,
132 .memory_object_synchronize
= compressor_memory_object_synchronize
,
133 .memory_object_map
= compressor_memory_object_map
,
134 .memory_object_last_unmap
= compressor_memory_object_last_unmap
,
135 .memory_object_data_reclaim
= compressor_memory_object_data_reclaim
,
136 .memory_object_backing_object
= NULL
,
137 .memory_object_pager_name
= "compressor pager"
140 /* internal data structures */
143 uint64_t data_returns
;
144 uint64_t data_requests
;
150 } compressor_pager_stats
;
152 typedef int compressor_slot_t
;
154 typedef struct compressor_pager
{
155 /* mandatory generic header */
156 struct memory_object cpgr_hdr
;
158 /* pager-specific data */
160 #if MEMORY_OBJECT_HAS_REFCOUNT
161 #define cpgr_references cpgr_hdr.mo_ref
163 os_ref_atomic_t cpgr_references
;
165 unsigned int cpgr_num_slots
;
166 unsigned int cpgr_num_slots_occupied
;
168 compressor_slot_t cpgr_eslots
[2]; /* embedded slots */
169 compressor_slot_t
*cpgr_dslots
; /* direct slots */
170 compressor_slot_t
**cpgr_islots
; /* indirect slots */
172 } *compressor_pager_t
;
174 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
176 if (_mem_obj_ == NULL || \
177 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
180 _cpgr_ = (compressor_pager_t) _mem_obj_; \
184 zone_t compressor_pager_zone
;
186 LCK_GRP_DECLARE(compressor_pager_lck_grp
, "compressor_pager");
188 #define compressor_pager_lock(_cpgr_) \
189 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
190 #define compressor_pager_unlock(_cpgr_) \
191 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
192 #define compressor_pager_lock_init(_cpgr_) \
193 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL)
194 #define compressor_pager_lock_destroy(_cpgr_) \
195 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
197 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
198 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
200 /* forward declarations */
201 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t
*chunk
,
205 void compressor_pager_slot_lookup(
206 compressor_pager_t pager
,
208 memory_object_offset_t offset
,
209 compressor_slot_t
**slot_pp
);
211 #if defined(__LP64__)
213 /* restricted VA zones for slots */
215 #define NUM_SLOTS_ZONES 3
217 static const size_t compressor_slots_zones_sizes
[NUM_SLOTS_ZONES
] = {
220 COMPRESSOR_SLOTS_CHUNK_SIZE
223 static const char * compressor_slots_zones_names
[NUM_SLOTS_ZONES
] = {
224 "compressor_slots.16",
225 "compressor_slots.64",
226 "compressor_slots.512"
230 compressor_slots_zones
[NUM_SLOTS_ZONES
];
232 #endif /* defined(__LP64__) */
235 zfree_slot_array(compressor_slot_t
*slots
, size_t size
);
236 static compressor_slot_t
*
237 zalloc_slot_array(size_t size
, zalloc_flags_t
);
241 compressor_memory_object_init(
242 memory_object_t mem_obj
,
243 memory_object_control_t control
,
244 __unused memory_object_cluster_size_t pager_page_size
)
246 compressor_pager_t pager
;
248 assert(pager_page_size
== PAGE_SIZE
);
250 memory_object_control_reference(control
);
252 compressor_pager_lookup(mem_obj
, pager
);
253 compressor_pager_lock(pager
);
255 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
256 panic("compressor_memory_object_init: bad request");
258 pager
->cpgr_hdr
.mo_control
= control
;
260 compressor_pager_unlock(pager
);
266 compressor_memory_object_synchronize(
267 __unused memory_object_t mem_obj
,
268 __unused memory_object_offset_t offset
,
269 __unused memory_object_size_t length
,
270 __unused vm_sync_t flags
)
272 panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n");
277 compressor_memory_object_map(
278 __unused memory_object_t mem_obj
,
279 __unused vm_prot_t prot
)
281 panic("compressor_memory_object_map");
286 compressor_memory_object_last_unmap(
287 __unused memory_object_t mem_obj
)
289 panic("compressor_memory_object_last_unmap");
294 compressor_memory_object_data_reclaim(
295 __unused memory_object_t mem_obj
,
296 __unused boolean_t reclaim_backing_store
)
298 panic("compressor_memory_object_data_reclaim");
303 compressor_memory_object_terminate(
304 memory_object_t mem_obj
)
306 memory_object_control_t control
;
307 compressor_pager_t pager
;
310 * control port is a receive right, not a send right.
313 compressor_pager_lookup(mem_obj
, pager
);
314 compressor_pager_lock(pager
);
317 * After memory_object_terminate both memory_object_init
318 * and a no-senders notification are possible, so we need
319 * to clean up our reference to the memory_object_control
320 * to prepare for a new init.
323 control
= pager
->cpgr_hdr
.mo_control
;
324 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
326 compressor_pager_unlock(pager
);
329 * Now we deallocate our reference on the control.
331 memory_object_control_deallocate(control
);
336 compressor_memory_object_reference(
337 memory_object_t mem_obj
)
339 compressor_pager_t pager
;
341 compressor_pager_lookup(mem_obj
, pager
);
346 compressor_pager_lock(pager
);
347 os_ref_retain_locked_raw(&pager
->cpgr_references
, NULL
);
348 compressor_pager_unlock(pager
);
352 compressor_memory_object_deallocate(
353 memory_object_t mem_obj
)
355 compressor_pager_t pager
;
356 unsigned int num_slots_freed
;
359 * Because we don't give out multiple first references
360 * for a memory object, there can't be a race
361 * between getting a deallocate call and creating
362 * a new reference for the object.
365 compressor_pager_lookup(mem_obj
, pager
);
370 compressor_pager_lock(pager
);
371 if (os_ref_release_locked_raw(&pager
->cpgr_references
, NULL
) > 0) {
372 compressor_pager_unlock(pager
);
377 * We shouldn't get a deallocation call
378 * when the kernel has the object cached.
380 if (pager
->cpgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
381 panic("compressor_memory_object_deallocate(): bad request");
385 * Unlock the pager (though there should be no one
388 compressor_pager_unlock(pager
);
390 /* free the compressor slots */
393 compressor_slot_t
*chunk
;
395 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
396 if (num_chunks
> 1) {
397 /* we have an array of chunks */
398 for (i
= 0; i
< num_chunks
; i
++) {
399 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
402 compressor_pager_slots_chunk_free(
404 COMPRESSOR_SLOTS_PER_CHUNK
,
407 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
408 zfree_slot_array(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
411 kheap_free(KHEAP_DEFAULT
, pager
->cpgr_slots
.cpgr_islots
,
412 num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]));
413 pager
->cpgr_slots
.cpgr_islots
= NULL
;
414 } else if (pager
->cpgr_num_slots
> 2) {
415 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
417 compressor_pager_slots_chunk_free(
419 pager
->cpgr_num_slots
,
422 pager
->cpgr_slots
.cpgr_dslots
= NULL
;
423 zfree_slot_array(chunk
,
424 (pager
->cpgr_num_slots
*
425 sizeof(pager
->cpgr_slots
.cpgr_dslots
[0])));
427 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
429 compressor_pager_slots_chunk_free(
431 pager
->cpgr_num_slots
,
436 compressor_pager_lock_destroy(pager
);
437 zfree(compressor_pager_zone
, pager
);
441 compressor_memory_object_data_request(
442 memory_object_t mem_obj
,
443 memory_object_offset_t offset
,
444 memory_object_cluster_size_t length
,
445 __unused vm_prot_t protection_required
,
446 __unused memory_object_fault_info_t fault_info
)
448 compressor_pager_t pager
;
450 compressor_slot_t
*slot_p
;
452 compressor_pager_stats
.data_requests
++;
455 * Request must be on a page boundary and a multiple of pages.
457 if ((offset
& PAGE_MASK
) != 0 || (length
& PAGE_MASK
) != 0) {
458 panic("compressor_memory_object_data_request(): bad alignment");
461 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
462 panic("%s: offset 0x%llx overflow\n",
463 __FUNCTION__
, (uint64_t) offset
);
467 compressor_pager_lookup(mem_obj
, pager
);
470 /* we're only querying the pager for this page */
472 panic("compressor: data_request");
475 /* find the compressor slot for that page */
476 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
478 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
481 } else if (slot_p
== NULL
|| *slot_p
== 0) {
482 /* compressor does not have this page */
485 /* compressor does have this page */
492 * memory_object_data_initialize: check whether we already have each page, and
493 * write it if we do not. The implementation is far from optimized, and
494 * also assumes that the default_pager is single-threaded.
496 /* It is questionable whether or not a pager should decide what is relevant */
497 /* and what is not in data sent from the kernel. Data initialize has been */
498 /* changed to copy back all data sent to it in preparation for its eventual */
499 /* merge with data return. It is the kernel that should decide what pages */
500 /* to write back. As of the writing of this note, this is indeed the case */
501 /* the kernel writes back one page at a time through this interface */
504 compressor_memory_object_data_initialize(
505 memory_object_t mem_obj
,
506 memory_object_offset_t offset
,
507 memory_object_cluster_size_t size
)
509 compressor_pager_t pager
;
510 memory_object_offset_t cur_offset
;
512 compressor_pager_lookup(mem_obj
, pager
);
513 compressor_pager_lock(pager
);
515 for (cur_offset
= offset
;
516 cur_offset
< offset
+ size
;
517 cur_offset
+= PAGE_SIZE
) {
518 panic("do a data_return() if slot for this page is empty");
521 compressor_pager_unlock(pager
);
527 compressor_memory_object_data_unlock(
528 __unused memory_object_t mem_obj
,
529 __unused memory_object_offset_t offset
,
530 __unused memory_object_size_t size
,
531 __unused vm_prot_t desired_access
)
533 panic("compressor_memory_object_data_unlock()");
540 compressor_memory_object_data_return(
541 __unused memory_object_t mem_obj
,
542 __unused memory_object_offset_t offset
,
543 __unused memory_object_cluster_size_t size
,
544 __unused memory_object_offset_t
*resid_offset
,
545 __unused
int *io_error
,
546 __unused boolean_t dirty
,
547 __unused boolean_t kernel_copy
,
548 __unused
int upl_flags
)
550 panic("compressor: data_return");
555 * Routine: default_pager_memory_object_create
557 * Handle requests for memory objects from the
560 * Because we only give out the default memory
561 * manager port to the kernel, we don't have to
562 * be so paranoid about the contents.
565 compressor_memory_object_create(
566 memory_object_size_t new_size
,
567 memory_object_t
*new_mem_obj
)
569 compressor_pager_t pager
;
572 if ((uint32_t)(new_size
/ PAGE_SIZE
) != (new_size
/ PAGE_SIZE
)) {
573 /* 32-bit overflow for number of pages */
574 panic("%s: size 0x%llx overflow\n",
575 __FUNCTION__
, (uint64_t) new_size
);
576 return KERN_INVALID_ARGUMENT
;
579 pager
= (compressor_pager_t
) zalloc(compressor_pager_zone
);
581 return KERN_RESOURCE_SHORTAGE
;
584 compressor_pager_lock_init(pager
);
585 os_ref_init_raw(&pager
->cpgr_references
, NULL
);
586 pager
->cpgr_num_slots
= (uint32_t)(new_size
/ PAGE_SIZE
);
587 pager
->cpgr_num_slots_occupied
= 0;
589 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
590 if (num_chunks
> 1) {
591 pager
->cpgr_slots
.cpgr_islots
= kheap_alloc(KHEAP_DEFAULT
,
592 num_chunks
* sizeof(pager
->cpgr_slots
.cpgr_islots
[0]),
594 } else if (pager
->cpgr_num_slots
> 2) {
595 pager
->cpgr_slots
.cpgr_dslots
= zalloc_slot_array(pager
->cpgr_num_slots
*
596 sizeof(pager
->cpgr_slots
.cpgr_dslots
[0]), Z_WAITOK
| Z_ZERO
);
598 pager
->cpgr_slots
.cpgr_eslots
[0] = 0;
599 pager
->cpgr_slots
.cpgr_eslots
[1] = 0;
603 * Set up associations between this memory object
604 * and this compressor_pager structure
606 pager
->cpgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
607 pager
->cpgr_hdr
.mo_pager_ops
= &compressor_pager_ops
;
608 pager
->cpgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
610 *new_mem_obj
= (memory_object_t
) pager
;
616 compressor_pager_slots_chunk_free(
617 compressor_slot_t
*chunk
,
624 unsigned int num_slots_freed
;
630 for (i
= 0; i
< num_slots
; i
++) {
632 retval
= vm_compressor_free(&chunk
[i
], flags
);
638 assert(flags
& C_DONT_BLOCK
);
647 return num_slots_freed
;
651 compressor_pager_slot_lookup(
652 compressor_pager_t pager
,
654 memory_object_offset_t offset
,
655 compressor_slot_t
**slot_pp
)
661 compressor_slot_t
*chunk
;
662 compressor_slot_t
*t_chunk
;
664 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
665 if (page_num
!= (offset
/ PAGE_SIZE
)) {
667 panic("%s: offset 0x%llx overflow\n",
668 __FUNCTION__
, (uint64_t) offset
);
672 if (page_num
>= pager
->cpgr_num_slots
) {
677 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
678 if (num_chunks
> 1) {
679 /* we have an array of chunks */
680 chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
;
681 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
683 if (chunk
== NULL
&& do_alloc
) {
684 t_chunk
= zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE
,
687 compressor_pager_lock(pager
);
689 if ((chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
]) == NULL
) {
691 * On some platforms, the memory stores from
692 * the bzero(t_chunk) above might not have been
693 * made visible and another thread might see
694 * the contents of this new chunk before it's
695 * been fully zero-filled.
696 * This memory barrier should take care of this
697 * according to the platform requirements.
699 os_atomic_thread_fence(release
);
701 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
] = t_chunk
;
704 compressor_pager_unlock(pager
);
707 zfree_slot_array(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
713 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
714 *slot_pp
= &chunk
[slot_idx
];
716 } else if (pager
->cpgr_num_slots
> 2) {
718 *slot_pp
= &pager
->cpgr_slots
.cpgr_dslots
[slot_idx
];
721 *slot_pp
= &pager
->cpgr_slots
.cpgr_eslots
[slot_idx
];
726 vm_compressor_pager_init(void)
728 /* embedded slot pointers in compressor_pager get packed, so VA restricted */
729 compressor_pager_zone
= zone_create_ext("compressor_pager",
730 sizeof(struct compressor_pager
), ZC_NOENCRYPT
,
731 ZONE_ID_ANY
, ^(zone_t z
){
732 #if defined(__LP64__)
733 zone_set_submap_idx(z
, Z_SUBMAP_IDX_VA_RESTRICTED
);
736 #endif /* defined(__LP64__) */
739 #if defined(__LP64__)
740 for (unsigned int idx
= 0; idx
< NUM_SLOTS_ZONES
; idx
++) {
741 compressor_slots_zones
[idx
] = zone_create_ext(
742 compressor_slots_zones_names
[idx
],
743 compressor_slots_zones_sizes
[idx
], ZC_NONE
,
744 ZONE_ID_ANY
, ^(zone_t z
){
745 zone_set_submap_idx(z
, Z_SUBMAP_IDX_VA_RESTRICTED
);
748 #endif /* defined(__LP64__) */
750 vm_compressor_init();
753 static compressor_slot_t
*
754 zalloc_slot_array(size_t size
, zalloc_flags_t flags
)
756 #if defined(__LP64__)
757 compressor_slot_t
*slots
= NULL
;
759 assert(size
<= COMPRESSOR_SLOTS_CHUNK_SIZE
);
760 for (unsigned int idx
= 0; idx
< NUM_SLOTS_ZONES
; idx
++) {
761 if (size
> compressor_slots_zones_sizes
[idx
]) {
764 slots
= zalloc_flags(compressor_slots_zones
[idx
], flags
);
768 #else /* defined(__LP64__) */
769 return kheap_alloc(KHEAP_DATA_BUFFERS
, size
, flags
);
770 #endif /* !defined(__LP64__) */
774 zfree_slot_array(compressor_slot_t
*slots
, size_t size
)
776 #if defined(__LP64__)
777 assert(size
<= COMPRESSOR_SLOTS_CHUNK_SIZE
);
778 for (unsigned int idx
= 0; idx
< NUM_SLOTS_ZONES
; idx
++) {
779 if (size
> compressor_slots_zones_sizes
[idx
]) {
782 zfree(compressor_slots_zones
[idx
], slots
);
785 #else /* defined(__LP64__) */
786 kheap_free(KHEAP_DATA_BUFFERS
, slots
, size
);
787 #endif /* !defined(__LP64__) */
791 vm_compressor_pager_put(
792 memory_object_t mem_obj
,
793 memory_object_offset_t offset
,
795 void **current_chead
,
797 int *compressed_count_delta_p
)
799 compressor_pager_t pager
;
800 compressor_slot_t
*slot_p
;
802 compressor_pager_stats
.put
++;
804 *compressed_count_delta_p
= 0;
806 /* This routine is called by the pageout thread. The pageout thread */
807 /* cannot be blocked by read activities unless the read activities */
808 /* Therefore the grant of vs lock must be done on a try versus a */
809 /* blocking basis. The code below relies on the fact that the */
810 /* interface is synchronous. Should this interface be again async */
811 /* for some type of pager in the future the pages will have to be */
812 /* returned through a separate, asynchronous path. */
814 compressor_pager_lookup(mem_obj
, pager
);
816 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
818 panic("%s: offset 0x%llx overflow\n",
819 __FUNCTION__
, (uint64_t) offset
);
820 return KERN_RESOURCE_SHORTAGE
;
823 compressor_pager_slot_lookup(pager
, TRUE
, offset
, &slot_p
);
825 if (slot_p
== NULL
) {
827 panic("vm_compressor_pager_put: out of range");
831 * Already compressed: forget about the old one.
833 * This can happen after a vm_object_do_collapse() when
834 * the "backing_object" had some pages paged out and the
835 * "object" had an equivalent page resident.
837 vm_compressor_free(slot_p
, 0);
838 *compressed_count_delta_p
-= 1;
842 * If the compressor operation succeeds, we presumably don't need to
843 * undo any previous WIMG update, as all live mappings should be
847 if (vm_compressor_put(ppnum
, slot_p
, current_chead
, scratch_buf
)) {
848 return KERN_RESOURCE_SHORTAGE
;
850 *compressed_count_delta_p
+= 1;
857 vm_compressor_pager_get(
858 memory_object_t mem_obj
,
859 memory_object_offset_t offset
,
863 int *compressed_count_delta_p
)
865 compressor_pager_t pager
;
867 compressor_slot_t
*slot_p
;
869 compressor_pager_stats
.get
++;
871 *compressed_count_delta_p
= 0;
873 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
874 panic("%s: offset 0x%llx overflow\n",
875 __FUNCTION__
, (uint64_t) offset
);
876 return KERN_MEMORY_ERROR
;
879 compressor_pager_lookup(mem_obj
, pager
);
881 /* find the compressor slot for that page */
882 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
884 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
886 kr
= KERN_MEMORY_FAILURE
;
887 } else if (slot_p
== NULL
|| *slot_p
== 0) {
888 /* compressor does not have this page */
889 kr
= KERN_MEMORY_ERROR
;
891 /* compressor does have this page */
894 *my_fault_type
= DBG_COMPRESSOR_FAULT
;
896 if (kr
== KERN_SUCCESS
) {
899 /* get the page from the compressor */
900 retval
= vm_compressor_get(ppnum
, slot_p
, flags
);
902 kr
= KERN_MEMORY_FAILURE
;
903 } else if (retval
== 1) {
904 *my_fault_type
= DBG_COMPRESSOR_SWAPIN_FAULT
;
905 } else if (retval
== -2) {
906 assert((flags
& C_DONT_BLOCK
));
911 if (kr
== KERN_SUCCESS
) {
912 assert(slot_p
!= NULL
);
915 * We got the page for a copy-on-write fault
916 * and we kept the original in place. Slot
920 *compressed_count_delta_p
-= 1;
928 vm_compressor_pager_state_clr(
929 memory_object_t mem_obj
,
930 memory_object_offset_t offset
)
932 compressor_pager_t pager
;
933 compressor_slot_t
*slot_p
;
934 unsigned int num_slots_freed
;
936 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
938 compressor_pager_stats
.state_clr
++;
940 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
942 panic("%s: offset 0x%llx overflow\n",
943 __FUNCTION__
, (uint64_t) offset
);
947 compressor_pager_lookup(mem_obj
, pager
);
949 /* find the compressor slot for that page */
950 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
953 if (slot_p
&& *slot_p
!= 0) {
954 vm_compressor_free(slot_p
, 0);
956 assert(*slot_p
== 0);
959 return num_slots_freed
;
963 vm_compressor_pager_state_get(
964 memory_object_t mem_obj
,
965 memory_object_offset_t offset
)
967 compressor_pager_t pager
;
968 compressor_slot_t
*slot_p
;
970 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
972 compressor_pager_stats
.state_get
++;
974 if ((uint32_t)(offset
/ PAGE_SIZE
) != (offset
/ PAGE_SIZE
)) {
976 panic("%s: offset 0x%llx overflow\n",
977 __FUNCTION__
, (uint64_t) offset
);
978 return VM_EXTERNAL_STATE_ABSENT
;
981 compressor_pager_lookup(mem_obj
, pager
);
983 /* find the compressor slot for that page */
984 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
986 if (offset
/ PAGE_SIZE
>= pager
->cpgr_num_slots
) {
988 return VM_EXTERNAL_STATE_ABSENT
;
989 } else if (slot_p
== NULL
|| *slot_p
== 0) {
990 /* compressor does not have this page */
991 return VM_EXTERNAL_STATE_ABSENT
;
993 /* compressor does have this page */
994 return VM_EXTERNAL_STATE_EXISTS
;
999 vm_compressor_pager_reap_pages(
1000 memory_object_t mem_obj
,
1003 compressor_pager_t pager
;
1007 compressor_slot_t
*chunk
;
1008 unsigned int num_slots_freed
;
1010 compressor_pager_lookup(mem_obj
, pager
);
1011 if (pager
== NULL
) {
1015 compressor_pager_lock(pager
);
1017 /* reap the compressor slots */
1018 num_slots_freed
= 0;
1020 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
1021 if (num_chunks
> 1) {
1022 /* we have an array of chunks */
1023 for (i
= 0; i
< num_chunks
; i
++) {
1024 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
1025 if (chunk
!= NULL
) {
1027 compressor_pager_slots_chunk_free(
1029 COMPRESSOR_SLOTS_PER_CHUNK
,
1032 if (failures
== 0) {
1033 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
1034 zfree_slot_array(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
1038 } else if (pager
->cpgr_num_slots
> 2) {
1039 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
1041 compressor_pager_slots_chunk_free(
1043 pager
->cpgr_num_slots
,
1047 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
1049 compressor_pager_slots_chunk_free(
1051 pager
->cpgr_num_slots
,
1056 compressor_pager_unlock(pager
);
1058 return num_slots_freed
;
1062 vm_compressor_pager_transfer(
1063 memory_object_t dst_mem_obj
,
1064 memory_object_offset_t dst_offset
,
1065 memory_object_t src_mem_obj
,
1066 memory_object_offset_t src_offset
)
1068 compressor_pager_t src_pager
, dst_pager
;
1069 compressor_slot_t
*src_slot_p
, *dst_slot_p
;
1071 compressor_pager_stats
.transfer
++;
1073 /* find the compressor slot for the destination */
1074 assert((uint32_t) dst_offset
== dst_offset
);
1075 compressor_pager_lookup(dst_mem_obj
, dst_pager
);
1076 assert(dst_offset
/ PAGE_SIZE
< dst_pager
->cpgr_num_slots
);
1077 compressor_pager_slot_lookup(dst_pager
, TRUE
, (uint32_t) dst_offset
,
1079 assert(dst_slot_p
!= NULL
);
1080 assert(*dst_slot_p
== 0);
1082 /* find the compressor slot for the source */
1083 assert((uint32_t) src_offset
== src_offset
);
1084 compressor_pager_lookup(src_mem_obj
, src_pager
);
1085 assert(src_offset
/ PAGE_SIZE
< src_pager
->cpgr_num_slots
);
1086 compressor_pager_slot_lookup(src_pager
, FALSE
, (uint32_t) src_offset
,
1088 assert(src_slot_p
!= NULL
);
1089 assert(*src_slot_p
!= 0);
1091 /* transfer the slot from source to destination */
1092 vm_compressor_transfer(dst_slot_p
, src_slot_p
);
1093 OSAddAtomic(-1, &src_pager
->cpgr_num_slots_occupied
);
1094 OSAddAtomic(+1, &dst_pager
->cpgr_num_slots_occupied
);
1097 memory_object_offset_t
1098 vm_compressor_pager_next_compressed(
1099 memory_object_t mem_obj
,
1100 memory_object_offset_t offset
)
1102 compressor_pager_t pager
;
1103 uint32_t num_chunks
;
1107 compressor_slot_t
*chunk
;
1109 compressor_pager_lookup(mem_obj
, pager
);
1111 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
1112 if (page_num
!= (offset
/ PAGE_SIZE
)) {
1114 return (memory_object_offset_t
) -1;
1116 if (page_num
>= pager
->cpgr_num_slots
) {
1118 return (memory_object_offset_t
) -1;
1121 num_chunks
= ((pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) /
1122 COMPRESSOR_SLOTS_PER_CHUNK
);
1124 if (num_chunks
== 1) {
1125 if (pager
->cpgr_num_slots
> 2) {
1126 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
1128 chunk
= &pager
->cpgr_slots
.cpgr_eslots
[0];
1130 for (slot_idx
= page_num
;
1131 slot_idx
< pager
->cpgr_num_slots
;
1133 if (chunk
[slot_idx
] != 0) {
1134 /* found a non-NULL slot in this chunk */
1135 return (memory_object_offset_t
) (slot_idx
*
1139 return (memory_object_offset_t
) -1;
1142 /* we have an array of chunks; find the next non-NULL chunk */
1144 for (chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
,
1145 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
1146 chunk_idx
< num_chunks
;
1149 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
1150 if (chunk
== NULL
) {
1151 /* no chunk here: try the next one */
1154 /* search for an occupied slot in this chunk */
1156 slot_idx
< COMPRESSOR_SLOTS_PER_CHUNK
;
1158 if (chunk
[slot_idx
] != 0) {
1159 /* found an occupied slot in this chunk */
1162 next_slot
= ((chunk_idx
*
1163 COMPRESSOR_SLOTS_PER_CHUNK
) +
1165 if (next_slot
>= pager
->cpgr_num_slots
) {
1166 /* went beyond end of object */
1167 return (memory_object_offset_t
) -1;
1169 return (memory_object_offset_t
) (next_slot
*
1174 return (memory_object_offset_t
) -1;
1178 vm_compressor_pager_get_count(
1179 memory_object_t mem_obj
)
1181 compressor_pager_t pager
;
1183 compressor_pager_lookup(mem_obj
, pager
);
1184 if (pager
== NULL
) {
1189 * The caller should have the VM object locked and one
1190 * needs that lock to do a page-in or page-out, so no
1191 * need to lock the pager here.
1193 assert(pager
->cpgr_num_slots_occupied
>= 0);
1195 return pager
->cpgr_num_slots_occupied
;
1199 vm_compressor_pager_count(
1200 memory_object_t mem_obj
,
1201 int compressed_count_delta
,
1202 boolean_t shared_lock
,
1203 vm_object_t object __unused
)
1205 compressor_pager_t pager
;
1207 if (compressed_count_delta
== 0) {
1211 compressor_pager_lookup(mem_obj
, pager
);
1212 if (pager
== NULL
) {
1216 if (compressed_count_delta
< 0) {
1217 assert(pager
->cpgr_num_slots_occupied
>=
1218 (unsigned int) -compressed_count_delta
);
1222 * The caller should have the VM object locked,
1223 * shared or exclusive.
1226 vm_object_lock_assert_shared(object
);
1227 OSAddAtomic(compressed_count_delta
,
1228 &pager
->cpgr_num_slots_occupied
);
1230 vm_object_lock_assert_exclusive(object
);
1231 pager
->cpgr_num_slots_occupied
+= compressed_count_delta
;
1237 vm_compressor_pager_relocate(
1238 memory_object_t mem_obj
,
1239 memory_object_offset_t offset
,
1240 void **current_chead
)
1243 * Has the page at this offset been compressed?
1246 compressor_slot_t
*slot_p
;
1247 compressor_pager_t dst_pager
;
1251 compressor_pager_lookup(mem_obj
, dst_pager
);
1252 if (dst_pager
== NULL
) {
1253 return KERN_FAILURE
;
1256 compressor_pager_slot_lookup(dst_pager
, FALSE
, offset
, &slot_p
);
1257 return vm_compressor_relocate(current_chead
, slot_p
);
1259 #endif /* CONFIG_FREEZE */
1261 #if DEVELOPMENT || DEBUG
1264 vm_compressor_pager_inject_error(memory_object_t mem_obj
,
1265 memory_object_offset_t offset
)
1267 kern_return_t result
= KERN_FAILURE
;
1268 compressor_slot_t
*slot_p
;
1269 compressor_pager_t pager
;
1273 compressor_pager_lookup(mem_obj
, pager
);
1274 if (pager
!= NULL
) {
1275 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
1276 if (slot_p
!= NULL
&& *slot_p
!= 0) {
1277 vm_compressor_inject_error(slot_p
);
1278 result
= KERN_SUCCESS
;