2 * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include <kern/host_statistics.h>
63 #include <kern/kalloc.h>
65 #include <mach/memory_object_control.h>
66 #include <mach/memory_object_types.h>
67 #include <mach/memory_object_server.h>
70 #include <vm/memory_object.h>
71 #include <vm/vm_compressor_pager.h>
72 #include <vm/vm_external.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_protos.h>
76 /* memory_object interfaces */
77 void compressor_memory_object_reference(memory_object_t mem_obj
);
78 void compressor_memory_object_deallocate(memory_object_t mem_obj
);
79 kern_return_t
compressor_memory_object_init(
80 memory_object_t mem_obj
,
81 memory_object_control_t control
,
82 memory_object_cluster_size_t pager_page_size
);
83 kern_return_t
compressor_memory_object_terminate(memory_object_t mem_obj
);
84 kern_return_t
compressor_memory_object_data_request(
85 memory_object_t mem_obj
,
86 memory_object_offset_t offset
,
87 memory_object_cluster_size_t length
,
88 __unused vm_prot_t protection_required
,
89 memory_object_fault_info_t fault_info
);
90 kern_return_t
compressor_memory_object_data_return(
91 memory_object_t mem_obj
,
92 memory_object_offset_t offset
,
93 memory_object_cluster_size_t size
,
94 __unused memory_object_offset_t
*resid_offset
,
95 __unused
int *io_error
,
96 __unused boolean_t dirty
,
97 __unused boolean_t kernel_copy
,
98 __unused
int upl_flags
);
99 kern_return_t
compressor_memory_object_data_initialize(
100 memory_object_t mem_obj
,
101 memory_object_offset_t offset
,
102 memory_object_cluster_size_t size
);
103 kern_return_t
compressor_memory_object_data_unlock(
104 __unused memory_object_t mem_obj
,
105 __unused memory_object_offset_t offset
,
106 __unused memory_object_size_t size
,
107 __unused vm_prot_t desired_access
);
108 kern_return_t
compressor_memory_object_synchronize(
109 memory_object_t mem_obj
,
110 memory_object_offset_t offset
,
111 memory_object_size_t length
,
112 __unused vm_sync_t flags
);
113 kern_return_t
compressor_memory_object_map(
114 __unused memory_object_t mem_obj
,
115 __unused vm_prot_t prot
);
116 kern_return_t
compressor_memory_object_last_unmap(memory_object_t mem_obj
);
117 kern_return_t
compressor_memory_object_data_reclaim(
118 __unused memory_object_t mem_obj
,
119 __unused boolean_t reclaim_backing_store
);
121 const struct memory_object_pager_ops compressor_pager_ops
= {
122 compressor_memory_object_reference
,
123 compressor_memory_object_deallocate
,
124 compressor_memory_object_init
,
125 compressor_memory_object_terminate
,
126 compressor_memory_object_data_request
,
127 compressor_memory_object_data_return
,
128 compressor_memory_object_data_initialize
,
129 compressor_memory_object_data_unlock
,
130 compressor_memory_object_synchronize
,
131 compressor_memory_object_map
,
132 compressor_memory_object_last_unmap
,
133 compressor_memory_object_data_reclaim
,
137 /* internal data structures */
140 uint64_t data_returns
;
141 uint64_t data_requests
;
147 } compressor_pager_stats
;
149 typedef int compressor_slot_t
;
151 typedef struct compressor_pager
{
152 struct ipc_object_header cpgr_pager_header
; /* fake ip_kotype */
153 memory_object_pager_ops_t cpgr_pager_ops
; /* == &compressor_pager_ops */
154 memory_object_control_t cpgr_control
;
157 unsigned int cpgr_references
;
158 unsigned int cpgr_num_slots
;
159 unsigned int cpgr_num_slots_occupied_pager
;
160 unsigned int cpgr_num_slots_occupied
;
162 compressor_slot_t
*cpgr_dslots
;
163 compressor_slot_t
**cpgr_islots
;
165 } *compressor_pager_t
;
167 #define compressor_pager_lookup(_mem_obj_, _cpgr_) \
169 if (_mem_obj_ == NULL || \
170 _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \
173 _cpgr_ = (compressor_pager_t) _mem_obj_; \
177 zone_t compressor_pager_zone
;
179 lck_grp_t compressor_pager_lck_grp
;
180 lck_grp_attr_t compressor_pager_lck_grp_attr
;
181 lck_attr_t compressor_pager_lck_attr
;
183 #define compressor_pager_lock(_cpgr_) \
184 lck_mtx_lock(&(_cpgr_)->cpgr_lock)
185 #define compressor_pager_unlock(_cpgr_) \
186 lck_mtx_unlock(&(_cpgr_)->cpgr_lock)
187 #define compressor_pager_lock_init(_cpgr_) \
188 lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr)
189 #define compressor_pager_lock_destroy(_cpgr_) \
190 lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp)
192 #define COMPRESSOR_SLOTS_CHUNK_SIZE (512)
193 #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t))
195 /* forward declarations */
196 unsigned int compressor_pager_slots_chunk_free(compressor_slot_t
*chunk
,
200 void compressor_pager_slot_lookup(
201 compressor_pager_t pager
,
203 memory_object_offset_t offset
,
204 compressor_slot_t
**slot_pp
);
207 compressor_memory_object_init(
208 memory_object_t mem_obj
,
209 memory_object_control_t control
,
210 __unused memory_object_cluster_size_t pager_page_size
)
212 compressor_pager_t pager
;
214 assert(pager_page_size
== PAGE_SIZE
);
216 memory_object_control_reference(control
);
218 compressor_pager_lookup(mem_obj
, pager
);
219 compressor_pager_lock(pager
);
221 if (pager
->cpgr_control
!= MEMORY_OBJECT_CONTROL_NULL
)
222 panic("compressor_memory_object_init: bad request");
223 pager
->cpgr_control
= control
;
225 compressor_pager_unlock(pager
);
231 compressor_memory_object_synchronize(
232 memory_object_t mem_obj
,
233 memory_object_offset_t offset
,
234 memory_object_size_t length
,
235 __unused vm_sync_t flags
)
237 compressor_pager_t pager
;
239 compressor_pager_lookup(mem_obj
, pager
);
241 memory_object_synchronize_completed(pager
->cpgr_control
, offset
, length
);
247 compressor_memory_object_map(
248 __unused memory_object_t mem_obj
,
249 __unused vm_prot_t prot
)
251 panic("compressor_memory_object_map");
256 compressor_memory_object_last_unmap(
257 __unused memory_object_t mem_obj
)
259 panic("compressor_memory_object_last_unmap");
264 compressor_memory_object_data_reclaim(
265 __unused memory_object_t mem_obj
,
266 __unused boolean_t reclaim_backing_store
)
268 panic("compressor_memory_object_data_reclaim");
273 compressor_memory_object_terminate(
274 memory_object_t mem_obj
)
276 memory_object_control_t control
;
277 compressor_pager_t pager
;
280 * control port is a receive right, not a send right.
283 compressor_pager_lookup(mem_obj
, pager
);
284 compressor_pager_lock(pager
);
287 * After memory_object_terminate both memory_object_init
288 * and a no-senders notification are possible, so we need
289 * to clean up our reference to the memory_object_control
290 * to prepare for a new init.
293 control
= pager
->cpgr_control
;
294 pager
->cpgr_control
= MEMORY_OBJECT_CONTROL_NULL
;
296 compressor_pager_unlock(pager
);
299 * Now we deallocate our reference on the control.
301 memory_object_control_deallocate(control
);
306 compressor_memory_object_reference(
307 memory_object_t mem_obj
)
309 compressor_pager_t pager
;
311 compressor_pager_lookup(mem_obj
, pager
);
315 compressor_pager_lock(pager
);
316 assert(pager
->cpgr_references
> 0);
317 pager
->cpgr_references
++;
318 compressor_pager_unlock(pager
);
322 compressor_memory_object_deallocate(
323 memory_object_t mem_obj
)
325 compressor_pager_t pager
;
326 unsigned int num_slots_freed
;
329 * Because we don't give out multiple first references
330 * for a memory object, there can't be a race
331 * between getting a deallocate call and creating
332 * a new reference for the object.
335 compressor_pager_lookup(mem_obj
, pager
);
339 compressor_pager_lock(pager
);
340 if (--pager
->cpgr_references
> 0) {
341 compressor_pager_unlock(pager
);
346 * We shouldn't get a deallocation call
347 * when the kernel has the object cached.
349 if (pager
->cpgr_control
!= MEMORY_OBJECT_CONTROL_NULL
)
350 panic("compressor_memory_object_deallocate(): bad request");
353 * Unlock the pager (though there should be no one
356 compressor_pager_unlock(pager
);
358 /* free the compressor slots */
361 compressor_slot_t
*chunk
;
363 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
-1) / COMPRESSOR_SLOTS_PER_CHUNK
;
364 if (num_chunks
> 1) {
365 /* we have an array of chunks */
366 for (i
= 0; i
< num_chunks
; i
++) {
367 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
370 compressor_pager_slots_chunk_free(
372 COMPRESSOR_SLOTS_PER_CHUNK
,
375 assert(pager
->cpgr_num_slots_occupied_pager
>=
377 OSAddAtomic(-num_slots_freed
,
378 &pager
->cpgr_num_slots_occupied_pager
);
379 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
380 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
381 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
384 kfree(pager
->cpgr_slots
.cpgr_islots
,
385 num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
386 pager
->cpgr_slots
.cpgr_islots
= NULL
;
388 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
390 compressor_pager_slots_chunk_free(
392 pager
->cpgr_num_slots
,
395 assert(pager
->cpgr_num_slots_occupied_pager
>= num_slots_freed
);
396 OSAddAtomic(-num_slots_freed
, &pager
->cpgr_num_slots_occupied_pager
);
397 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
398 pager
->cpgr_slots
.cpgr_dslots
= NULL
;
400 (pager
->cpgr_num_slots
*
401 sizeof (pager
->cpgr_slots
.cpgr_dslots
[0])));
403 assert(pager
->cpgr_num_slots_occupied_pager
== 0);
405 compressor_pager_lock_destroy(pager
);
406 zfree(compressor_pager_zone
, pager
);
410 compressor_memory_object_data_request(
411 memory_object_t mem_obj
,
412 memory_object_offset_t offset
,
413 memory_object_cluster_size_t length
,
414 __unused vm_prot_t protection_required
,
415 __unused memory_object_fault_info_t fault_info
)
417 compressor_pager_t pager
;
419 compressor_slot_t
*slot_p
;
421 compressor_pager_stats
.data_requests
++;
424 * Request must be on a page boundary and a multiple of pages.
426 if ((offset
& PAGE_MASK
) != 0 || (length
& PAGE_MASK
) != 0)
427 panic("compressor_memory_object_data_request(): bad alignment");
429 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
430 panic("%s: offset 0x%llx overflow\n",
431 __FUNCTION__
, (uint64_t) offset
);
435 compressor_pager_lookup(mem_obj
, pager
);
438 /* we're only querying the pager for this page */
440 panic("compressor: data_request");
443 /* find the compressor slot for that page */
444 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
446 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
449 } else if (slot_p
== NULL
|| *slot_p
== 0) {
450 /* compressor does not have this page */
453 /* compressor does have this page */
460 * memory_object_data_initialize: check whether we already have each page, and
461 * write it if we do not. The implementation is far from optimized, and
462 * also assumes that the default_pager is single-threaded.
464 /* It is questionable whether or not a pager should decide what is relevant */
465 /* and what is not in data sent from the kernel. Data initialize has been */
466 /* changed to copy back all data sent to it in preparation for its eventual */
467 /* merge with data return. It is the kernel that should decide what pages */
468 /* to write back. As of the writing of this note, this is indeed the case */
469 /* the kernel writes back one page at a time through this interface */
472 compressor_memory_object_data_initialize(
473 memory_object_t mem_obj
,
474 memory_object_offset_t offset
,
475 memory_object_cluster_size_t size
)
477 compressor_pager_t pager
;
478 memory_object_offset_t cur_offset
;
480 compressor_pager_lookup(mem_obj
, pager
);
481 compressor_pager_lock(pager
);
483 for (cur_offset
= offset
;
484 cur_offset
< offset
+ size
;
485 cur_offset
+= PAGE_SIZE
) {
486 panic("do a data_return() if slot for this page is empty");
489 compressor_pager_unlock(pager
);
495 compressor_memory_object_data_unlock(
496 __unused memory_object_t mem_obj
,
497 __unused memory_object_offset_t offset
,
498 __unused memory_object_size_t size
,
499 __unused vm_prot_t desired_access
)
501 panic("compressor_memory_object_data_unlock()");
508 compressor_memory_object_data_return(
509 __unused memory_object_t mem_obj
,
510 __unused memory_object_offset_t offset
,
511 __unused memory_object_cluster_size_t size
,
512 __unused memory_object_offset_t
*resid_offset
,
513 __unused
int *io_error
,
514 __unused boolean_t dirty
,
515 __unused boolean_t kernel_copy
,
516 __unused
int upl_flags
)
518 panic("compressor: data_return");
523 * Routine: default_pager_memory_object_create
525 * Handle requests for memory objects from the
528 * Because we only give out the default memory
529 * manager port to the kernel, we don't have to
530 * be so paranoid about the contents.
533 compressor_memory_object_create(
534 memory_object_size_t new_size
,
535 memory_object_t
*new_mem_obj
)
537 compressor_pager_t pager
;
540 if ((uint32_t)(new_size
/PAGE_SIZE
) != (new_size
/PAGE_SIZE
)) {
541 /* 32-bit overflow for number of pages */
542 panic("%s: size 0x%llx overflow\n",
543 __FUNCTION__
, (uint64_t) new_size
);
544 return KERN_INVALID_ARGUMENT
;
547 pager
= (compressor_pager_t
) zalloc(compressor_pager_zone
);
549 return KERN_RESOURCE_SHORTAGE
;
552 compressor_pager_lock_init(pager
);
553 pager
->cpgr_control
= MEMORY_OBJECT_CONTROL_NULL
;
554 pager
->cpgr_references
= 1;
555 pager
->cpgr_num_slots
= (uint32_t)(new_size
/PAGE_SIZE
);
556 pager
->cpgr_num_slots_occupied_pager
= 0;
557 pager
->cpgr_num_slots_occupied
= 0;
559 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
560 if (num_chunks
> 1) {
561 pager
->cpgr_slots
.cpgr_islots
= kalloc(num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
562 bzero(pager
->cpgr_slots
.cpgr_islots
, num_chunks
* sizeof (pager
->cpgr_slots
.cpgr_islots
[0]));
564 pager
->cpgr_slots
.cpgr_dslots
= kalloc(pager
->cpgr_num_slots
* sizeof (pager
->cpgr_slots
.cpgr_dslots
[0]));
565 bzero(pager
->cpgr_slots
.cpgr_dslots
, pager
->cpgr_num_slots
* sizeof (pager
->cpgr_slots
.cpgr_dslots
[0]));
569 * Set up associations between this memory object
570 * and this compressor_pager structure
573 pager
->cpgr_pager_ops
= &compressor_pager_ops
;
574 pager
->cpgr_pager_header
.io_bits
= IKOT_MEMORY_OBJECT
;
576 *new_mem_obj
= (memory_object_t
) pager
;
582 compressor_pager_slots_chunk_free(
583 compressor_slot_t
*chunk
,
589 unsigned int num_slots_freed
;
594 for (i
= 0; i
< num_slots
; i
++) {
596 if (vm_compressor_free(&chunk
[i
], flags
) == 0)
599 assert(flags
& C_DONT_BLOCK
);
606 return num_slots_freed
;
610 compressor_pager_slot_lookup(
611 compressor_pager_t pager
,
613 memory_object_offset_t offset
,
614 compressor_slot_t
**slot_pp
)
620 compressor_slot_t
*chunk
;
621 compressor_slot_t
*t_chunk
;
623 page_num
= (uint32_t)(offset
/PAGE_SIZE
);
624 if (page_num
!= (offset
/PAGE_SIZE
)) {
626 panic("%s: offset 0x%llx overflow\n",
627 __FUNCTION__
, (uint64_t) offset
);
631 if (page_num
> pager
->cpgr_num_slots
) {
636 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) / COMPRESSOR_SLOTS_PER_CHUNK
;
637 if (num_chunks
> 1) {
638 /* we have an array of chunks */
639 chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
;
640 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
642 if (chunk
== NULL
&& do_alloc
) {
643 t_chunk
= kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE
);
644 bzero(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
646 compressor_pager_lock(pager
);
648 if ((chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
]) == NULL
) {
649 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
] = t_chunk
;
652 compressor_pager_unlock(pager
);
655 kfree(t_chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
660 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
661 *slot_pp
= &chunk
[slot_idx
];
665 *slot_pp
= &pager
->cpgr_slots
.cpgr_dslots
[slot_idx
];
670 vm_compressor_pager_init(void)
672 lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr
);
673 lck_grp_init(&compressor_pager_lck_grp
, "compressor_pager", &compressor_pager_lck_grp_attr
);
674 lck_attr_setdefault(&compressor_pager_lck_attr
);
676 compressor_pager_zone
= zinit(sizeof (struct compressor_pager
),
677 10000 * sizeof (struct compressor_pager
),
678 8192, "compressor_pager");
679 zone_change(compressor_pager_zone
, Z_CALLERACCT
, FALSE
);
680 zone_change(compressor_pager_zone
, Z_NOENCRYPT
, TRUE
);
682 vm_compressor_init();
686 vm_compressor_pager_put(
687 memory_object_t mem_obj
,
688 memory_object_offset_t offset
,
690 void **current_chead
,
692 int *compressed_count_delta_p
)
694 compressor_pager_t pager
;
695 compressor_slot_t
*slot_p
;
697 compressor_pager_stats
.put
++;
699 *compressed_count_delta_p
= 0;
701 /* This routine is called by the pageout thread. The pageout thread */
702 /* cannot be blocked by read activities unless the read activities */
703 /* Therefore the grant of vs lock must be done on a try versus a */
704 /* blocking basis. The code below relies on the fact that the */
705 /* interface is synchronous. Should this interface be again async */
706 /* for some type of pager in the future the pages will have to be */
707 /* returned through a separate, asynchronous path. */
709 compressor_pager_lookup(mem_obj
, pager
);
711 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
713 panic("%s: offset 0x%llx overflow\n",
714 __FUNCTION__
, (uint64_t) offset
);
715 return KERN_RESOURCE_SHORTAGE
;
718 compressor_pager_slot_lookup(pager
, TRUE
, offset
, &slot_p
);
720 if (slot_p
== NULL
) {
722 panic("vm_compressor_pager_put: out of range");
726 * Already compressed: forget about the old one.
728 * This can happen after a vm_object_do_collapse() when
729 * the "backing_object" had some pages paged out and the
730 * "object" had an equivalent page resident.
732 vm_compressor_free(slot_p
, 0);
733 assert(pager
->cpgr_num_slots_occupied_pager
>= 1);
734 OSAddAtomic(-1, &pager
->cpgr_num_slots_occupied_pager
);
735 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
736 *compressed_count_delta_p
-= 1;
738 if (vm_compressor_put(ppnum
, slot_p
, current_chead
, scratch_buf
))
739 return (KERN_RESOURCE_SHORTAGE
);
740 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
741 OSAddAtomic(+1, &pager
->cpgr_num_slots_occupied_pager
);
742 assert(pager
->cpgr_num_slots_occupied_pager
> 0);
743 *compressed_count_delta_p
+= 1;
745 return (KERN_SUCCESS
);
750 vm_compressor_pager_get(
751 memory_object_t mem_obj
,
752 memory_object_offset_t offset
,
756 int *compressed_count_delta_p
)
758 compressor_pager_t pager
;
760 compressor_slot_t
*slot_p
;
762 compressor_pager_stats
.get
++;
764 *compressed_count_delta_p
= 0;
766 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
767 panic("%s: offset 0x%llx overflow\n",
768 __FUNCTION__
, (uint64_t) offset
);
769 return KERN_MEMORY_ERROR
;
772 compressor_pager_lookup(mem_obj
, pager
);
774 /* find the compressor slot for that page */
775 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
777 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
779 kr
= KERN_MEMORY_FAILURE
;
780 } else if (slot_p
== NULL
|| *slot_p
== 0) {
781 /* compressor does not have this page */
782 kr
= KERN_MEMORY_ERROR
;
784 /* compressor does have this page */
787 *my_fault_type
= DBG_COMPRESSOR_FAULT
;
789 if (kr
== KERN_SUCCESS
) {
792 /* get the page from the compressor */
793 retval
= vm_compressor_get(ppnum
, slot_p
, flags
);
795 kr
= KERN_MEMORY_FAILURE
;
796 else if (retval
== 1)
797 *my_fault_type
= DBG_COMPRESSOR_SWAPIN_FAULT
;
798 else if (retval
== -2) {
799 assert((flags
& C_DONT_BLOCK
));
804 if (kr
== KERN_SUCCESS
) {
805 assert(slot_p
!= NULL
);
808 * We got the page for a copy-on-write fault
809 * and we kept the original in place. Slot
813 assert(pager
->cpgr_num_slots_occupied_pager
>= 1);
814 OSAddAtomic(-1, &pager
->cpgr_num_slots_occupied_pager
);
815 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
816 *compressed_count_delta_p
-= 1;
824 vm_compressor_pager_state_clr(
825 memory_object_t mem_obj
,
826 memory_object_offset_t offset
)
828 compressor_pager_t pager
;
829 compressor_slot_t
*slot_p
;
830 unsigned int num_slots_freed
;
832 compressor_pager_stats
.state_clr
++;
834 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
836 panic("%s: offset 0x%llx overflow\n",
837 __FUNCTION__
, (uint64_t) offset
);
841 compressor_pager_lookup(mem_obj
, pager
);
843 /* find the compressor slot for that page */
844 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
847 if (slot_p
&& *slot_p
!= 0) {
848 vm_compressor_free(slot_p
, 0);
850 assert(*slot_p
== 0);
851 assert(pager
->cpgr_num_slots_occupied_pager
>= 1);
852 OSAddAtomic(-1, &pager
->cpgr_num_slots_occupied_pager
);
853 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
856 return num_slots_freed
;
860 vm_compressor_pager_state_get(
861 memory_object_t mem_obj
,
862 memory_object_offset_t offset
)
864 compressor_pager_t pager
;
865 compressor_slot_t
*slot_p
;
867 compressor_pager_stats
.state_get
++;
869 if ((uint32_t)(offset
/PAGE_SIZE
) != (offset
/PAGE_SIZE
)) {
871 panic("%s: offset 0x%llx overflow\n",
872 __FUNCTION__
, (uint64_t) offset
);
873 return VM_EXTERNAL_STATE_ABSENT
;
876 compressor_pager_lookup(mem_obj
, pager
);
878 /* find the compressor slot for that page */
879 compressor_pager_slot_lookup(pager
, FALSE
, offset
, &slot_p
);
881 if (offset
/ PAGE_SIZE
> pager
->cpgr_num_slots
) {
883 return VM_EXTERNAL_STATE_ABSENT
;
884 } else if (slot_p
== NULL
|| *slot_p
== 0) {
885 /* compressor does not have this page */
886 return VM_EXTERNAL_STATE_ABSENT
;
888 /* compressor does have this page */
889 return VM_EXTERNAL_STATE_EXISTS
;
894 vm_compressor_pager_reap_pages(
895 memory_object_t mem_obj
,
898 compressor_pager_t pager
;
902 compressor_slot_t
*chunk
;
903 unsigned int num_slots_freed
;
905 compressor_pager_lookup(mem_obj
, pager
);
909 compressor_pager_lock(pager
);
911 /* reap the compressor slots */
914 num_chunks
= (pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
-1) / COMPRESSOR_SLOTS_PER_CHUNK
;
915 if (num_chunks
> 1) {
916 /* we have an array of chunks */
917 for (i
= 0; i
< num_chunks
; i
++) {
918 chunk
= pager
->cpgr_slots
.cpgr_islots
[i
];
921 compressor_pager_slots_chunk_free(
923 COMPRESSOR_SLOTS_PER_CHUNK
,
927 pager
->cpgr_slots
.cpgr_islots
[i
] = NULL
;
928 kfree(chunk
, COMPRESSOR_SLOTS_CHUNK_SIZE
);
933 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
935 compressor_pager_slots_chunk_free(
937 pager
->cpgr_num_slots
,
941 OSAddAtomic(-num_slots_freed
, &pager
->cpgr_num_slots_occupied_pager
);
943 compressor_pager_unlock(pager
);
945 return num_slots_freed
;
949 vm_compressor_pager_get_slots_occupied(
950 memory_object_t mem_obj
)
952 compressor_pager_t pager
;
954 compressor_pager_lookup(mem_obj
, pager
);
958 assert(pager
->cpgr_num_slots_occupied_pager
>= 0);
960 return pager
->cpgr_num_slots_occupied_pager
;
964 vm_compressor_pager_transfer(
965 memory_object_t dst_mem_obj
,
966 memory_object_offset_t dst_offset
,
967 memory_object_t src_mem_obj
,
968 memory_object_offset_t src_offset
)
970 compressor_pager_t src_pager
, dst_pager
;
971 compressor_slot_t
*src_slot_p
, *dst_slot_p
;
973 compressor_pager_stats
.transfer
++;
975 /* find the compressor slot for the destination */
976 assert((uint32_t) dst_offset
== dst_offset
);
977 compressor_pager_lookup(dst_mem_obj
, dst_pager
);
978 assert(dst_offset
/ PAGE_SIZE
<= dst_pager
->cpgr_num_slots
);
979 compressor_pager_slot_lookup(dst_pager
, TRUE
, (uint32_t) dst_offset
,
981 assert(dst_slot_p
!= NULL
);
982 assert(*dst_slot_p
== 0);
984 /* find the compressor slot for the source */
985 assert((uint32_t) src_offset
== src_offset
);
986 compressor_pager_lookup(src_mem_obj
, src_pager
);
987 assert(src_offset
/ PAGE_SIZE
<= src_pager
->cpgr_num_slots
);
988 compressor_pager_slot_lookup(src_pager
, FALSE
, (uint32_t) src_offset
,
990 assert(src_slot_p
!= NULL
);
991 assert(*src_slot_p
!= 0);
993 /* transfer the slot from source to destination */
994 vm_compressor_transfer(dst_slot_p
, src_slot_p
);
995 OSAddAtomic(-1, &src_pager
->cpgr_num_slots_occupied_pager
);
996 OSAddAtomic(+1, &dst_pager
->cpgr_num_slots_occupied_pager
);
997 OSAddAtomic(-1, &src_pager
->cpgr_num_slots_occupied
);
998 OSAddAtomic(+1, &dst_pager
->cpgr_num_slots_occupied
);
1001 memory_object_offset_t
1002 vm_compressor_pager_next_compressed(
1003 memory_object_t mem_obj
,
1004 memory_object_offset_t offset
)
1006 compressor_pager_t pager
;
1007 uint32_t num_chunks
;
1011 compressor_slot_t
*chunk
;
1013 compressor_pager_lookup(mem_obj
, pager
);
1015 page_num
= (uint32_t)(offset
/ PAGE_SIZE
);
1016 if (page_num
!= (offset
/PAGE_SIZE
)) {
1018 return (memory_object_offset_t
) -1;
1020 if (page_num
> pager
->cpgr_num_slots
) {
1022 return (memory_object_offset_t
) -1;
1024 num_chunks
= ((pager
->cpgr_num_slots
+ COMPRESSOR_SLOTS_PER_CHUNK
- 1) /
1025 COMPRESSOR_SLOTS_PER_CHUNK
);
1027 if (num_chunks
== 1) {
1028 chunk
= pager
->cpgr_slots
.cpgr_dslots
;
1029 for (slot_idx
= page_num
;
1030 slot_idx
< pager
->cpgr_num_slots
;
1032 if (chunk
[slot_idx
] != 0) {
1033 /* found a non-NULL slot in this chunk */
1034 return (memory_object_offset_t
) (slot_idx
*
1038 return (memory_object_offset_t
) -1;
1041 /* we have an array of chunks; find the next non-NULL chunk */
1043 for (chunk_idx
= page_num
/ COMPRESSOR_SLOTS_PER_CHUNK
,
1044 slot_idx
= page_num
% COMPRESSOR_SLOTS_PER_CHUNK
;
1045 chunk_idx
< num_chunks
;
1048 chunk
= pager
->cpgr_slots
.cpgr_islots
[chunk_idx
];
1049 if (chunk
== NULL
) {
1050 /* no chunk here: try the next one */
1053 /* search for an occupied slot in this chunk */
1055 slot_idx
< COMPRESSOR_SLOTS_PER_CHUNK
;
1057 if (chunk
[slot_idx
] != 0) {
1058 /* found an occupied slot in this chunk */
1061 next_slot
= ((chunk_idx
*
1062 COMPRESSOR_SLOTS_PER_CHUNK
) +
1064 if (next_slot
> pager
->cpgr_num_slots
) {
1065 /* went beyond end of object */
1066 return (memory_object_offset_t
) -1;
1068 return (memory_object_offset_t
) (next_slot
*
1073 return (memory_object_offset_t
) -1;
1077 vm_compressor_pager_get_count(
1078 memory_object_t mem_obj
)
1080 compressor_pager_t pager
;
1082 compressor_pager_lookup(mem_obj
, pager
);
1087 * The caller should have the VM object locked and one
1088 * needs that lock to do a page-in or page-out, so no
1089 * need to lock the pager here.
1091 assert(pager
->cpgr_num_slots_occupied
>= 0);
1093 return pager
->cpgr_num_slots_occupied
;
1097 vm_compressor_pager_count(
1098 memory_object_t mem_obj
,
1099 int compressed_count_delta
,
1100 boolean_t shared_lock
,
1101 vm_object_t object __unused
)
1103 compressor_pager_t pager
;
1105 if (compressed_count_delta
== 0) {
1109 compressor_pager_lookup(mem_obj
, pager
);
1113 if (compressed_count_delta
< 0) {
1114 assert(pager
->cpgr_num_slots_occupied
>=
1115 (unsigned int) -compressed_count_delta
);
1119 * The caller should have the VM object locked,
1120 * shared or exclusive.
1123 vm_object_lock_assert_shared(object
);
1124 OSAddAtomic(compressed_count_delta
,
1125 &pager
->cpgr_num_slots_occupied
);
1127 vm_object_lock_assert_exclusive(object
);
1128 pager
->cpgr_num_slots_occupied
+= compressed_count_delta
;