2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_compressor.h>
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_protos.h>
41 #include <mach/mach_host.h> /* for host_info() */
42 #include <kern/ledger.h>
43 #include <kern/policy_internal.h>
44 #include <kern/thread_group.h>
45 #include <san/kasan.h>
48 #include <i386/misc_protos.h>
51 #include <IOKit/IOHibernatePrivate.h>
53 #if POPCOUNT_THE_COMPRESSED_DATA
54 boolean_t popcount_c_segs
= TRUE
;
56 static inline uint32_t vmc_pop(uintptr_t ins
, int sz
) {
59 if (__probable(popcount_c_segs
== FALSE
)) {
65 uint64_t *ins64
= (uint64_t *) ins
;
66 uint64_t *ins642
= (uint64_t *) (ins
+ 8);
67 rv1
= __builtin_popcountll(*ins64
);
68 rv2
= __builtin_popcountll(*ins642
);
75 uint32_t *ins32
= (uint32_t *) ins
;
76 rv
+= __builtin_popcount(*ins32
);
82 char *ins8
= (char *)ins
;
83 rv
+= __builtin_popcount(*ins8
);
92 * vm_compressor_mode has a heirarchy of control to set its value.
93 * boot-args are checked first, then device-tree, and finally
94 * the default value that is defined below. See vm_fault_init() for
95 * the boot-arg & device-tree code.
101 int vm_compressor_mode
= VM_PAGER_FREEZER_DEFAULT
;
103 void *freezer_chead
; /* The chead used to track c_segs allocated for the exclusive use of holding just one task's compressed memory.*/
104 char *freezer_compressor_scratch_buf
= NULL
;
106 #define VM_MAX_FREEZER_CSEG_SWAP_COUNT 64 /* The maximum number of c_segs holding just one task's compressed memory that can be swapped out to disk.*/
107 extern int c_freezer_swapout_count
; /* This count keeps track of the # of c_segs holding just one task's compressed memory on the swapout queue. This count is used during each freeze i.e. on a per-task basis.*/
109 #else /* CONFIG_FREEZE */
110 int vm_compressor_mode
= VM_PAGER_NOT_CONFIGURED
;
111 #endif /* CONFIG_FREEZE */
115 #else /* CONFIG_EMBEDDED */
116 int vm_compressor_mode
= VM_PAGER_COMPRESSOR_WITH_SWAP
;
119 #endif /* CONFIG_EMBEDDED */
121 int vm_compressor_is_active
= 0;
122 int vm_compression_limit
= 0;
123 int vm_compressor_available
= 0;
125 extern void vm_pageout_io_throttle(void);
127 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
128 extern unsigned int hash_string(char *cp
, int len
);
129 static unsigned int vmc_hash(char *, int);
130 boolean_t checksum_c_segs
= TRUE
;
132 unsigned int vmc_hash(char *cp
, int len
) {
133 if (__probable(checksum_c_segs
== FALSE
)) {
136 return hash_string(cp
, len
);
140 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
141 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
144 struct c_sv_hash_entry
{
147 uint32_t c_sv_he_ref
;
148 uint32_t c_sv_he_data
;
150 uint64_t c_sv_he_record
;
155 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
156 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
157 #define he_record c_sv_he_un.c_sv_he_record
159 #define C_SV_HASH_MAX_MISS 32
160 #define C_SV_HASH_SIZE ((1 << 10))
161 #define C_SV_HASH_MASK ((1 << 10) - 1)
162 #define C_SV_CSEG_ID ((1 << 22) - 1)
172 #define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) >> 2)
173 #define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START)
176 uint32_t c_segment_count
= 0;
177 uint32_t c_segment_count_max
= 0;
179 uint64_t c_generation_id
= 0;
180 uint64_t c_generation_id_flush_barrier
;
183 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
185 boolean_t hibernate_no_swapspace
= FALSE
;
186 clock_sec_t hibernate_flushing_deadline
= 0;
189 #if RECORD_THE_COMPRESSED_DATA
190 char *c_compressed_record_sbuf
;
191 char *c_compressed_record_ebuf
;
192 char *c_compressed_record_cptr
;
196 queue_head_t c_age_list_head
;
197 queue_head_t c_swapout_list_head
;
198 queue_head_t c_swappedin_list_head
;
199 queue_head_t c_swappedout_list_head
;
200 queue_head_t c_swappedout_sparse_list_head
;
201 queue_head_t c_major_list_head
;
202 queue_head_t c_filling_list_head
;
203 queue_head_t c_bad_list_head
;
205 uint32_t c_age_count
= 0;
206 uint32_t c_swapout_count
= 0;
207 uint32_t c_swappedin_count
= 0;
208 uint32_t c_swappedout_count
= 0;
209 uint32_t c_swappedout_sparse_count
= 0;
210 uint32_t c_major_count
= 0;
211 uint32_t c_filling_count
= 0;
212 uint32_t c_empty_count
= 0;
213 uint32_t c_bad_count
= 0;
216 queue_head_t c_minor_list_head
;
217 uint32_t c_minor_count
= 0;
219 int c_overage_swapped_count
= 0;
220 int c_overage_swapped_limit
= 0;
222 int c_seg_fixed_array_len
;
223 union c_segu
*c_segments
;
224 vm_offset_t c_buffers
;
225 vm_size_t c_buffers_size
;
226 caddr_t c_segments_next_page
;
227 boolean_t c_segments_busy
;
228 uint32_t c_segments_available
;
229 uint32_t c_segments_limit
;
230 uint32_t c_segments_nearing_limit
;
232 uint32_t c_segment_svp_in_hash
;
233 uint32_t c_segment_svp_hash_succeeded
;
234 uint32_t c_segment_svp_hash_failed
;
235 uint32_t c_segment_svp_zero_compressions
;
236 uint32_t c_segment_svp_nonzero_compressions
;
237 uint32_t c_segment_svp_zero_decompressions
;
238 uint32_t c_segment_svp_nonzero_decompressions
;
240 uint32_t c_segment_noncompressible_pages
;
242 uint32_t c_segment_pages_compressed
;
243 uint32_t c_segment_pages_compressed_limit
;
244 uint32_t c_segment_pages_compressed_nearing_limit
;
245 uint32_t c_free_segno_head
= (uint32_t)-1;
247 uint32_t vm_compressor_minorcompact_threshold_divisor
= 10;
248 uint32_t vm_compressor_majorcompact_threshold_divisor
= 10;
249 uint32_t vm_compressor_unthrottle_threshold_divisor
= 10;
250 uint32_t vm_compressor_catchup_threshold_divisor
= 10;
252 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
255 lck_grp_attr_t vm_compressor_lck_grp_attr
;
256 lck_attr_t vm_compressor_lck_attr
;
257 lck_grp_t vm_compressor_lck_grp
;
258 lck_mtx_t
*c_list_lock
;
259 lck_rw_t c_master_lock
;
260 boolean_t decompressions_blocked
= FALSE
;
262 zone_t compressor_segment_zone
;
263 int c_compressor_swap_trigger
= 0;
265 uint32_t compressor_cpus
;
266 char *compressor_scratch_bufs
;
267 char *kdp_compressor_scratch_buf
;
268 char *kdp_compressor_decompressed_page
;
269 addr64_t kdp_compressor_decompressed_page_paddr
;
270 ppnum_t kdp_compressor_decompressed_page_ppnum
;
272 clock_sec_t start_of_sample_period_sec
= 0;
273 clock_nsec_t start_of_sample_period_nsec
= 0;
274 clock_sec_t start_of_eval_period_sec
= 0;
275 clock_nsec_t start_of_eval_period_nsec
= 0;
276 uint32_t sample_period_decompression_count
= 0;
277 uint32_t sample_period_compression_count
= 0;
278 uint32_t last_eval_decompression_count
= 0;
279 uint32_t last_eval_compression_count
= 0;
281 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
283 boolean_t vm_swapout_ripe_segments
= FALSE
;
284 uint32_t vm_ripe_target_age
= (60 * 60 * 48);
286 uint32_t swapout_target_age
= 0;
287 uint32_t age_of_decompressions_during_sample_period
[DECOMPRESSION_SAMPLE_MAX_AGE
];
288 uint32_t overage_decompressions_during_sample_period
= 0;
289 uint32_t vm_compressor_pages_grabbed
= 0;
292 void do_fastwake_warmup(queue_head_t
*, boolean_t
);
293 boolean_t fastwake_warmup
= FALSE
;
294 boolean_t fastwake_recording_in_progress
= FALSE
;
295 clock_sec_t dont_trim_until_ts
= 0;
297 uint64_t c_segment_warmup_count
;
298 uint64_t first_c_segment_to_warm_generation_id
= 0;
299 uint64_t last_c_segment_to_warm_generation_id
= 0;
300 boolean_t hibernate_flushing
= FALSE
;
302 int64_t c_segment_input_bytes
__attribute__((aligned(8))) = 0;
303 int64_t c_segment_compressed_bytes
__attribute__((aligned(8))) = 0;
304 int64_t compressor_bytes_used
__attribute__((aligned(8))) = 0;
307 struct c_sv_hash_entry c_segment_sv_hash_table
[C_SV_HASH_SIZE
] __attribute__ ((aligned (8)));
309 static boolean_t
compressor_needs_to_swap(void);
310 static void vm_compressor_swap_trigger_thread(void);
311 static void vm_compressor_do_delayed_compactions(boolean_t
);
312 static void vm_compressor_compact_and_swap(boolean_t
);
313 static void vm_compressor_age_swapped_in_segments(boolean_t
);
316 static void vm_compressor_take_paging_space_action(void);
319 void compute_swapout_target_age(void);
321 boolean_t
c_seg_major_compact(c_segment_t
, c_segment_t
);
322 boolean_t
c_seg_major_compact_ok(c_segment_t
, c_segment_t
);
324 int c_seg_minor_compaction_and_unlock(c_segment_t
, boolean_t
);
325 int c_seg_do_minor_compaction_and_unlock(c_segment_t
, boolean_t
, boolean_t
, boolean_t
);
326 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
);
328 void c_seg_move_to_sparse_list(c_segment_t
);
329 void c_seg_insert_into_q(queue_head_t
*, c_segment_t
);
331 uint64_t vm_available_memory(void);
332 uint64_t vm_compressor_pages_compressed(void);
335 * indicate the need to do a major compaction if
336 * the overall set of in-use compression segments
337 * becomes sparse... on systems that support pressure
338 * driven swapping, this will also cause swapouts to
341 static inline boolean_t
vm_compressor_needs_to_major_compact()
343 uint32_t incore_seg_count
;
345 incore_seg_count
= c_segment_count
- c_swappedout_count
- c_swappedout_sparse_count
;
347 if ((c_segment_count
>= (c_segments_nearing_limit
/ 8)) &&
348 ((incore_seg_count
* C_SEG_MAX_PAGES
) - VM_PAGE_COMPRESSOR_COUNT
) >
349 ((incore_seg_count
/ 8) * C_SEG_MAX_PAGES
))
356 vm_available_memory(void)
358 return (((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE_64
);
363 vm_compressor_pages_compressed(void)
365 return (c_segment_pages_compressed
* PAGE_SIZE_64
);
370 vm_compressor_low_on_space(void)
372 if ((c_segment_pages_compressed
> c_segment_pages_compressed_nearing_limit
) ||
373 (c_segment_count
> c_segments_nearing_limit
))
381 vm_compressor_out_of_space(void)
383 if ((c_segment_pages_compressed
>= c_segment_pages_compressed_limit
) ||
384 (c_segment_count
>= c_segments_limit
))
392 vm_wants_task_throttled(task_t task
)
394 if (task
== kernel_task
)
397 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
398 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
399 (unsigned int)pmap_compressed(task
->map
->pmap
) > (c_segment_pages_compressed
/ 4))
406 #if DEVELOPMENT || DEBUG
407 boolean_t kill_on_no_paging_space
= FALSE
; /* On compressor/swap exhaustion, kill the largest process regardless of
408 * its chosen process policy. Controlled by a boot-arg of the same name. */
409 #endif /* DEVELOPMENT || DEBUG */
413 static uint32_t no_paging_space_action_in_progress
= 0;
414 extern void memorystatus_send_low_swap_note(void);
417 vm_compressor_take_paging_space_action(void)
419 if (no_paging_space_action_in_progress
== 0) {
421 if (OSCompareAndSwap(0, 1, (UInt32
*)&no_paging_space_action_in_progress
)) {
423 if (no_paging_space_action()) {
424 #if DEVELOPMENT || DEBUG
425 if (kill_on_no_paging_space
== TRUE
) {
427 * Since we are choosing to always kill a process, we don't need the
428 * "out of application memory" dialog box in this mode. And, hence we won't
431 no_paging_space_action_in_progress
= 0;
434 #endif /* DEVELOPMENT || DEBUG */
435 memorystatus_send_low_swap_note();
438 no_paging_space_action_in_progress
= 0;
442 #endif /* !CONFIG_EMBEDDED */
446 vm_compressor_init_locks(void)
448 lck_grp_attr_setdefault(&vm_compressor_lck_grp_attr
);
449 lck_grp_init(&vm_compressor_lck_grp
, "vm_compressor", &vm_compressor_lck_grp_attr
);
450 lck_attr_setdefault(&vm_compressor_lck_attr
);
452 lck_rw_init(&c_master_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
457 vm_decompressor_lock(void)
459 PAGE_REPLACEMENT_ALLOWED(TRUE
);
461 decompressions_blocked
= TRUE
;
463 PAGE_REPLACEMENT_ALLOWED(FALSE
);
467 vm_decompressor_unlock(void)
469 PAGE_REPLACEMENT_ALLOWED(TRUE
);
471 decompressions_blocked
= FALSE
;
473 PAGE_REPLACEMENT_ALLOWED(FALSE
);
475 thread_wakeup((event_t
)&decompressions_blocked
);
478 static inline void cslot_copy(c_slot_t cdst
, c_slot_t csrc
) {
479 #if CHECKSUM_THE_DATA
480 cdst
->c_hash_data
= csrc
->c_hash_data
;
482 #if CHECKSUM_THE_COMPRESSED_DATA
483 cdst
->c_hash_compressed_data
= csrc
->c_hash_compressed_data
;
485 #if POPCOUNT_THE_COMPRESSED_DATA
486 cdst
->c_pop_cdata
= csrc
->c_pop_cdata
;
488 cdst
->c_size
= csrc
->c_size
;
489 cdst
->c_packed_ptr
= csrc
->c_packed_ptr
;
490 #if defined(__arm__) || defined(__arm64__)
491 cdst
->c_codec
= csrc
->c_codec
;
495 vm_map_t compressor_map
;
496 uint64_t compressor_pool_max_size
;
497 uint64_t compressor_pool_size
;
498 uint32_t compressor_pool_multiplier
;
500 #if DEVELOPMENT || DEBUG
502 * Compressor segments are write-protected in development/debug
503 * kernels to help debug memory corruption.
504 * In cases where performance is a concern, this can be disabled
505 * via the boot-arg "-disable_cseg_write_protection".
507 boolean_t write_protect_c_segs
= TRUE
;
508 int vm_compressor_test_seg_wp
;
509 uint32_t vm_ktrace_enabled
;
510 #endif /* DEVELOPMENT || DEBUG */
513 vm_compressor_init(void)
516 struct c_slot cs_dummy
;
517 c_slot_t cs
= &cs_dummy
;
518 int c_segment_min_size
;
519 int c_segment_padded_size
;
521 kern_return_t retval
= KERN_SUCCESS
;
522 vm_offset_t start_addr
= 0;
523 vm_size_t c_segments_arr_size
= 0, compressor_submap_size
= 0;
524 vm_map_kernel_flags_t vmk_flags
;
525 #if RECORD_THE_COMPRESSED_DATA
526 vm_size_t c_compressed_record_sbuf_size
= 0;
527 #endif /* RECORD_THE_COMPRESSED_DATA */
529 #if DEVELOPMENT || DEBUG
530 char bootarg_name
[32];
531 if (PE_parse_boot_argn("-kill_on_no_paging_space", bootarg_name
, sizeof (bootarg_name
))) {
532 kill_on_no_paging_space
= TRUE
;
534 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name
, sizeof (bootarg_name
))) {
535 write_protect_c_segs
= FALSE
;
538 PE_parse_boot_argn("vm_compressor_validation", &vmcval
, sizeof(vmcval
));
540 if (kern_feature_override(KF_COMPRSV_OVRD
)) {
544 #if POPCOUNT_THE_COMPRESSED_DATA
545 popcount_c_segs
= FALSE
;
547 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
548 checksum_c_segs
= FALSE
;
550 write_protect_c_segs
= FALSE
;
552 #endif /* DEVELOPMENT || DEBUG */
555 * ensure that any pointer that gets created from
556 * the vm_page zone can be packed properly
558 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_min_address
);
560 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_min_address
)
561 panic("C_SLOT_UNPACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address
);
563 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_max_address
);
565 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_max_address
)
566 panic("C_SLOT_UNPACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address
);
569 assert((C_SEGMENTS_PER_PAGE
* sizeof(union c_segu
)) == PAGE_SIZE
);
571 PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit
, sizeof (vm_compression_limit
));
573 #ifdef CONFIG_EMBEDDED
574 vm_compressor_minorcompact_threshold_divisor
= 20;
575 vm_compressor_majorcompact_threshold_divisor
= 30;
576 vm_compressor_unthrottle_threshold_divisor
= 40;
577 vm_compressor_catchup_threshold_divisor
= 60;
579 if (max_mem
<= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
580 vm_compressor_minorcompact_threshold_divisor
= 11;
581 vm_compressor_majorcompact_threshold_divisor
= 13;
582 vm_compressor_unthrottle_threshold_divisor
= 20;
583 vm_compressor_catchup_threshold_divisor
= 35;
585 vm_compressor_minorcompact_threshold_divisor
= 20;
586 vm_compressor_majorcompact_threshold_divisor
= 25;
587 vm_compressor_unthrottle_threshold_divisor
= 35;
588 vm_compressor_catchup_threshold_divisor
= 50;
592 * vm_page_init_lck_grp is now responsible for calling vm_compressor_init_locks
593 * c_master_lock needs to be available early so that "vm_page_find_contiguous" can
594 * use PAGE_REPLACEMENT_ALLOWED to coordinate with the compressor.
597 c_list_lock
= lck_mtx_alloc_init(&vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
599 queue_init(&c_bad_list_head
);
600 queue_init(&c_age_list_head
);
601 queue_init(&c_minor_list_head
);
602 queue_init(&c_major_list_head
);
603 queue_init(&c_filling_list_head
);
604 queue_init(&c_swapout_list_head
);
605 queue_init(&c_swappedin_list_head
);
606 queue_init(&c_swappedout_list_head
);
607 queue_init(&c_swappedout_sparse_list_head
);
609 c_free_segno_head
= -1;
610 c_segments_available
= 0;
612 if (vm_compression_limit
)
613 compressor_pool_size
= (uint64_t)vm_compression_limit
* PAGE_SIZE_64
;
615 compressor_pool_max_size
= C_SEG_MAX_LIMIT
;
616 compressor_pool_max_size
*= C_SEG_BUFSIZE
;
618 #if defined(__x86_64__)
620 if (vm_compression_limit
== 0) {
622 if (max_mem
<= (4ULL * 1024ULL * 1024ULL * 1024ULL))
623 compressor_pool_size
= 16ULL * max_mem
;
624 else if (max_mem
<= (8ULL * 1024ULL * 1024ULL * 1024ULL))
625 compressor_pool_size
= 8ULL * max_mem
;
626 else if (max_mem
<= (32ULL * 1024ULL * 1024ULL * 1024ULL))
627 compressor_pool_size
= 4ULL * max_mem
;
629 compressor_pool_size
= 2ULL * max_mem
;
631 if (max_mem
<= (8ULL * 1024ULL * 1024ULL * 1024ULL))
632 compressor_pool_multiplier
= 1;
633 else if (max_mem
<= (32ULL * 1024ULL * 1024ULL * 1024ULL))
634 compressor_pool_multiplier
= 2;
636 compressor_pool_multiplier
= 4;
638 #elif defined(__arm__)
640 #define VM_RESERVE_SIZE (1024 * 1024 * 256)
641 #define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450)
643 if (compressor_pool_max_size
> MAX_COMPRESSOR_POOL_SIZE
)
644 compressor_pool_max_size
= MAX_COMPRESSOR_POOL_SIZE
;
646 if (vm_compression_limit
== 0)
647 compressor_pool_size
= ((kernel_map
->max_offset
- kernel_map
->min_offset
) - kernel_map
->size
) - VM_RESERVE_SIZE
;
648 compressor_pool_multiplier
= 1;
650 if (compressor_pool_max_size
> max_mem
)
651 compressor_pool_max_size
= max_mem
;
653 if (vm_compression_limit
== 0)
654 compressor_pool_size
= max_mem
;
655 compressor_pool_multiplier
= 1;
657 if (compressor_pool_size
> compressor_pool_max_size
)
658 compressor_pool_size
= compressor_pool_max_size
;
661 c_segments_limit
= (uint32_t)(compressor_pool_size
/ (vm_size_t
)(C_SEG_ALLOCSIZE
));
662 c_segments_nearing_limit
= (uint32_t)(((uint64_t)c_segments_limit
* 98ULL) / 100ULL);
664 c_segment_pages_compressed_limit
= (c_segments_limit
* (C_SEG_BUFSIZE
/ PAGE_SIZE
) * compressor_pool_multiplier
);
666 if (c_segment_pages_compressed_limit
< (uint32_t)(max_mem
/ PAGE_SIZE
))
667 c_segment_pages_compressed_limit
= (uint32_t)(max_mem
/ PAGE_SIZE
);
669 c_segment_pages_compressed_nearing_limit
= (uint32_t)(((uint64_t)c_segment_pages_compressed_limit
* 98ULL) / 100ULL);
672 * Submap needs space for:
675 * - swap reclaimations -- C_SEG_BUFSIZE
677 c_segments_arr_size
= vm_map_round_page((sizeof(union c_segu
) * c_segments_limit
), VM_MAP_PAGE_MASK(kernel_map
));
678 c_buffers_size
= vm_map_round_page(((vm_size_t
)C_SEG_ALLOCSIZE
* (vm_size_t
)c_segments_limit
), VM_MAP_PAGE_MASK(kernel_map
));
680 compressor_submap_size
= c_segments_arr_size
+ c_buffers_size
+ C_SEG_BUFSIZE
;
682 #if RECORD_THE_COMPRESSED_DATA
683 c_compressed_record_sbuf_size
= (vm_size_t
)C_SEG_ALLOCSIZE
+ (PAGE_SIZE
* 2);
684 compressor_submap_size
+= c_compressed_record_sbuf_size
;
685 #endif /* RECORD_THE_COMPRESSED_DATA */
687 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
688 vmk_flags
.vmkf_permanent
= TRUE
;
689 retval
= kmem_suballoc(kernel_map
, &start_addr
, compressor_submap_size
,
690 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_COMPRESSOR
,
693 if (retval
!= KERN_SUCCESS
) {
695 panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size
);
697 compressor_pool_size
= compressor_pool_size
/ 2;
699 kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size
);
702 if (kernel_memory_allocate(compressor_map
, (vm_offset_t
*)(&c_segments
), (sizeof(union c_segu
) * c_segments_limit
), 0, KMA_KOBJECT
| KMA_VAONLY
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
) != KERN_SUCCESS
)
703 panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n");
704 if (kernel_memory_allocate(compressor_map
, &c_buffers
, c_buffers_size
, 0, KMA_COMPRESSOR
| KMA_VAONLY
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
) != KERN_SUCCESS
)
705 panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n");
708 c_segment_min_size
= sizeof(struct c_segment
) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN
* sizeof(struct c_slot
));
710 for (c_segment_padded_size
= 128; c_segment_padded_size
< c_segment_min_size
; c_segment_padded_size
= c_segment_padded_size
<< 1);
712 compressor_segment_zone
= zinit(c_segment_padded_size
, c_segments_limit
* c_segment_padded_size
, PAGE_SIZE
, "compressor_segment");
713 zone_change(compressor_segment_zone
, Z_CALLERACCT
, FALSE
);
714 zone_change(compressor_segment_zone
, Z_NOENCRYPT
, TRUE
);
716 c_seg_fixed_array_len
= (c_segment_padded_size
- sizeof(struct c_segment
)) / sizeof(struct c_slot
);
718 c_segments_busy
= FALSE
;
720 c_segments_next_page
= (caddr_t
)c_segments
;
721 vm_compressor_algorithm_init();
724 host_basic_info_data_t hinfo
;
725 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
728 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
730 compressor_cpus
= hinfo
.max_cpus
;
731 compressor_scratch_bufs
= kalloc_tag(compressor_cpus
* vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR
);
733 kdp_compressor_scratch_buf
= kalloc_tag(vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR
);
734 kdp_compressor_decompressed_page
= kalloc_tag(PAGE_SIZE
, VM_KERN_MEMORY_COMPRESSOR
);
735 kdp_compressor_decompressed_page_paddr
= kvtophys((vm_offset_t
)kdp_compressor_decompressed_page
);
736 kdp_compressor_decompressed_page_ppnum
= (ppnum_t
) atop(kdp_compressor_decompressed_page_paddr
);
739 freezer_compressor_scratch_buf
= kalloc_tag(vm_compressor_get_encode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR
);
742 #if RECORD_THE_COMPRESSED_DATA
743 if (kernel_memory_allocate(compressor_map
, (vm_offset_t
*)&c_compressed_record_sbuf
, c_compressed_record_sbuf_size
, 0, KMA_KOBJECT
, VM_KERN_MEMORY_COMPRESSOR
) != KERN_SUCCESS
)
744 panic("vm_compressor_init: kernel_memory_allocate failed - c_compressed_record_sbuf\n");
746 c_compressed_record_cptr
= c_compressed_record_sbuf
;
747 c_compressed_record_ebuf
= c_compressed_record_sbuf
+ c_compressed_record_sbuf_size
;
750 if (kernel_thread_start_priority((thread_continue_t
)vm_compressor_swap_trigger_thread
, NULL
,
751 BASEPRI_VM
, &thread
) != KERN_SUCCESS
) {
752 panic("vm_compressor_swap_trigger_thread: create failed");
754 thread_deallocate(thread
);
756 if (vm_pageout_internal_start() != KERN_SUCCESS
) {
757 panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
759 if (VM_CONFIG_SWAP_IS_PRESENT
)
760 vm_compressor_swap_init();
762 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
)
763 vm_compressor_is_active
= 1;
766 memorystatus_freeze_enabled
= TRUE
;
767 #endif /* CONFIG_FREEZE */
769 vm_compressor_available
= 1;
771 vm_page_reactivate_all_throttled();
775 #if VALIDATE_C_SEGMENTS
778 c_seg_validate(c_segment_t c_seg
, boolean_t must_be_compact
)
782 uint32_t c_rounded_size
;
786 if (c_seg
->c_firstemptyslot
< c_seg
->c_nextslot
) {
787 c_indx
= c_seg
->c_firstemptyslot
;
788 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
791 panic("c_seg_validate: no slot backing c_firstemptyslot");
794 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs
->c_size
);
798 for (c_indx
= 0; c_indx
< c_seg
->c_nextslot
; c_indx
++) {
800 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
802 c_size
= UNPACK_C_SIZE(cs
);
804 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
806 bytes_used
+= c_rounded_size
;
808 #if CHECKSUM_THE_COMPRESSED_DATA
810 if (c_size
&& cs
->c_hash_compressed_data
!= (csvhash
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))) {
811 addr64_t csvphys
= kvtophys((vm_offset_t
)&c_seg
->c_store
.c_buffer
[cs
->c_offset
]);
812 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg
, csvphys
, cs
->c_offset
, cs
, c_indx
, c_size
, cs
->c_hash_compressed_data
, csvhash
);
817 if (bytes_used
!= c_seg
->c_bytes_used
)
818 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used
, c_seg
->c_bytes_used
);
820 if (c_seg
->c_bytes_used
> C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
821 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
822 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
824 if (must_be_compact
) {
825 if (c_seg
->c_bytes_used
!= C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
826 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
827 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
835 c_seg_need_delayed_compaction(c_segment_t c_seg
, boolean_t c_list_lock_held
)
837 boolean_t clear_busy
= FALSE
;
839 if (c_list_lock_held
== FALSE
) {
840 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
843 lck_mtx_unlock_always(&c_seg
->c_lock
);
844 lck_mtx_lock_spin_always(c_list_lock
);
845 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
850 assert(c_seg
->c_state
!= C_IS_FILLING
);
852 if (!c_seg
->c_on_minorcompact_q
&& !(C_SEG_IS_ONDISK(c_seg
))) {
853 queue_enter(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
854 c_seg
->c_on_minorcompact_q
= 1;
857 if (c_list_lock_held
== FALSE
)
858 lck_mtx_unlock_always(c_list_lock
);
860 if (clear_busy
== TRUE
)
861 C_SEG_WAKEUP_DONE(c_seg
);
865 unsigned int c_seg_moved_to_sparse_list
= 0;
868 c_seg_move_to_sparse_list(c_segment_t c_seg
)
870 boolean_t clear_busy
= FALSE
;
872 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
875 lck_mtx_unlock_always(&c_seg
->c_lock
);
876 lck_mtx_lock_spin_always(c_list_lock
);
877 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
881 c_seg_switch_state(c_seg
, C_ON_SWAPPEDOUTSPARSE_Q
, FALSE
);
883 c_seg_moved_to_sparse_list
++;
885 lck_mtx_unlock_always(c_list_lock
);
887 if (clear_busy
== TRUE
)
888 C_SEG_WAKEUP_DONE(c_seg
);
893 c_seg_insert_into_q(queue_head_t
*qhead
, c_segment_t c_seg
)
895 c_segment_t c_seg_next
;
897 if (queue_empty(qhead
)) {
898 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
900 c_seg_next
= (c_segment_t
)queue_first(qhead
);
904 if (c_seg
->c_generation_id
< c_seg_next
->c_generation_id
) {
905 queue_insert_before(qhead
, c_seg
, c_seg_next
, c_segment_t
, c_age_list
);
908 c_seg_next
= (c_segment_t
) queue_next(&c_seg_next
->c_age_list
);
910 if (queue_end(qhead
, (queue_entry_t
) c_seg_next
)) {
911 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
919 int try_minor_compaction_failed
= 0;
920 int try_minor_compaction_succeeded
= 0;
923 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
)
926 assert(c_seg
->c_on_minorcompact_q
);
928 * c_seg is currently on the delayed minor compaction
929 * queue and we have c_seg locked... if we can get the
930 * c_list_lock w/o blocking (if we blocked we could deadlock
931 * because the lock order is c_list_lock then c_seg's lock)
932 * we'll pull it from the delayed list and free it directly
934 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
936 * c_list_lock is held, we need to bail
938 try_minor_compaction_failed
++;
940 lck_mtx_unlock_always(&c_seg
->c_lock
);
942 try_minor_compaction_succeeded
++;
945 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, FALSE
);
951 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
, boolean_t need_list_lock
, boolean_t disallow_page_replacement
)
955 assert(c_seg
->c_busy
);
958 * check for the case that can occur when we are not swapping
959 * and this segment has been major compacted in the past
960 * and moved to the majorcompact q to remove it from further
961 * consideration... if the occupancy falls too low we need
962 * to put it back on the age_q so that it will be considered
963 * in the next major compaction sweep... if we don't do this
964 * we will eventually run into the c_segments_limit
966 if (c_seg
->c_state
== C_ON_MAJORCOMPACT_Q
&& C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg
)) {
968 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
970 if (!c_seg
->c_on_minorcompact_q
) {
971 if (clear_busy
== TRUE
)
972 C_SEG_WAKEUP_DONE(c_seg
);
974 lck_mtx_unlock_always(&c_seg
->c_lock
);
978 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
979 c_seg
->c_on_minorcompact_q
= 0;
982 lck_mtx_unlock_always(c_list_lock
);
984 if (disallow_page_replacement
== TRUE
) {
985 lck_mtx_unlock_always(&c_seg
->c_lock
);
987 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
989 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
991 c_seg_freed
= c_seg_minor_compaction_and_unlock(c_seg
, clear_busy
);
993 if (disallow_page_replacement
== TRUE
)
994 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
996 if (need_list_lock
== TRUE
)
997 lck_mtx_lock_spin_always(c_list_lock
);
999 return (c_seg_freed
);
1004 c_seg_wait_on_busy(c_segment_t c_seg
)
1006 c_seg
->c_wanted
= 1;
1007 assert_wait((event_t
) (c_seg
), THREAD_UNINT
);
1009 lck_mtx_unlock_always(&c_seg
->c_lock
);
1010 thread_block(THREAD_CONTINUE_NULL
);
1015 c_seg_switch_state(c_segment_t c_seg
, int new_state
, boolean_t insert_head
)
1017 int old_state
= c_seg
->c_state
;
1019 #if __i386__ || __x86_64__
1020 if (new_state
!= C_IS_FILLING
)
1021 LCK_MTX_ASSERT(&c_seg
->c_lock
, LCK_MTX_ASSERT_OWNED
);
1022 LCK_MTX_ASSERT(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
1024 switch (old_state
) {
1027 assert(new_state
== C_IS_FILLING
|| new_state
== C_IS_FREE
);
1033 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_ON_SWAPOUT_Q
);
1035 queue_remove(&c_filling_list_head
, c_seg
, c_segment_t
, c_age_list
);
1040 assert(new_state
== C_ON_SWAPOUT_Q
|| new_state
== C_ON_MAJORCOMPACT_Q
||
1041 new_state
== C_IS_FREE
);
1043 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1047 case C_ON_SWAPPEDIN_Q
:
1048 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
);
1050 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1051 c_swappedin_count
--;
1054 case C_ON_SWAPOUT_Q
:
1055 assert(new_state
== C_ON_SWAPPEDOUT_Q
|| new_state
== C_ON_SWAPPEDOUTSPARSE_Q
||
1056 new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
|| new_state
== C_IS_EMPTY
);
1058 queue_remove(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1059 thread_wakeup((event_t
)&compaction_swapper_running
);
1063 case C_ON_SWAPPEDOUT_Q
:
1064 assert(new_state
== C_ON_SWAPPEDIN_Q
|| new_state
== C_ON_AGE_Q
||
1065 new_state
== C_ON_SWAPPEDOUTSPARSE_Q
||
1066 new_state
== C_ON_BAD_Q
|| new_state
== C_IS_EMPTY
|| new_state
== C_IS_FREE
);
1068 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1069 c_swappedout_count
--;
1072 case C_ON_SWAPPEDOUTSPARSE_Q
:
1073 assert(new_state
== C_ON_SWAPPEDIN_Q
|| new_state
== C_ON_AGE_Q
||
1074 new_state
== C_ON_BAD_Q
|| new_state
== C_IS_EMPTY
|| new_state
== C_IS_FREE
);
1076 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1077 c_swappedout_sparse_count
--;
1080 case C_ON_MAJORCOMPACT_Q
:
1081 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
);
1083 queue_remove(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1088 assert(new_state
== C_IS_FREE
);
1090 queue_remove(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1095 panic("c_seg %p has bad c_state = %d\n", c_seg
, old_state
);
1100 assert(old_state
!= C_IS_FILLING
);
1105 assert(old_state
== C_ON_SWAPOUT_Q
|| old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1111 assert(old_state
== C_IS_EMPTY
);
1113 queue_enter(&c_filling_list_head
, c_seg
, c_segment_t
, c_age_list
);
1118 assert(old_state
== C_IS_FILLING
|| old_state
== C_ON_SWAPPEDIN_Q
|| old_state
== C_ON_SWAPOUT_Q
||
1119 old_state
== C_ON_MAJORCOMPACT_Q
|| old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1121 if (old_state
== C_IS_FILLING
)
1122 queue_enter(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1124 if (!queue_empty(&c_age_list_head
)) {
1125 c_segment_t c_first
;
1127 c_first
= (c_segment_t
)queue_first(&c_age_list_head
);
1128 c_seg
->c_creation_ts
= c_first
->c_creation_ts
;
1130 queue_enter_first(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1135 case C_ON_SWAPPEDIN_Q
:
1136 assert(c_seg
->c_state
== C_ON_SWAPPEDOUT_Q
|| c_seg
->c_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1138 if (insert_head
== TRUE
)
1139 queue_enter_first(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1141 queue_enter(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1142 c_swappedin_count
++;
1145 case C_ON_SWAPOUT_Q
:
1146 assert(old_state
== C_ON_AGE_Q
|| old_state
== C_IS_FILLING
);
1148 if (insert_head
== TRUE
)
1149 queue_enter_first(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1151 queue_enter(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1155 case C_ON_SWAPPEDOUT_Q
:
1156 assert(c_seg
->c_state
== C_ON_SWAPOUT_Q
);
1158 if (insert_head
== TRUE
)
1159 queue_enter_first(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1161 queue_enter(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1162 c_swappedout_count
++;
1165 case C_ON_SWAPPEDOUTSPARSE_Q
:
1166 assert(c_seg
->c_state
== C_ON_SWAPOUT_Q
|| c_seg
->c_state
== C_ON_SWAPPEDOUT_Q
);
1168 if (insert_head
== TRUE
)
1169 queue_enter_first(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1171 queue_enter(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1173 c_swappedout_sparse_count
++;
1176 case C_ON_MAJORCOMPACT_Q
:
1177 assert(c_seg
->c_state
== C_ON_AGE_Q
);
1179 if (insert_head
== TRUE
)
1180 queue_enter_first(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1182 queue_enter(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1187 assert(c_seg
->c_state
== C_ON_SWAPPEDOUT_Q
|| c_seg
->c_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1189 if (insert_head
== TRUE
)
1190 queue_enter_first(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1192 queue_enter(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1197 panic("c_seg %p requesting bad c_state = %d\n", c_seg
, new_state
);
1199 c_seg
->c_state
= new_state
;
1205 c_seg_free(c_segment_t c_seg
)
1207 assert(c_seg
->c_busy
);
1209 lck_mtx_unlock_always(&c_seg
->c_lock
);
1210 lck_mtx_lock_spin_always(c_list_lock
);
1211 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1213 c_seg_free_locked(c_seg
);
1218 c_seg_free_locked(c_segment_t c_seg
)
1221 int pages_populated
= 0;
1222 int32_t *c_buffer
= NULL
;
1223 uint64_t c_swap_handle
= 0;
1225 assert(c_seg
->c_busy
);
1226 assert(c_seg
->c_slots_used
== 0);
1227 assert(!c_seg
->c_on_minorcompact_q
);
1228 assert(!c_seg
->c_busy_swapping
);
1230 if (c_seg
->c_overage_swap
== TRUE
) {
1231 c_overage_swapped_count
--;
1232 c_seg
->c_overage_swap
= FALSE
;
1234 if ( !(C_SEG_IS_ONDISK(c_seg
)))
1235 c_buffer
= c_seg
->c_store
.c_buffer
;
1237 c_swap_handle
= c_seg
->c_store
.c_swap_handle
;
1239 c_seg_switch_state(c_seg
, C_IS_FREE
, FALSE
);
1241 lck_mtx_unlock_always(c_list_lock
);
1244 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
1245 c_seg
->c_store
.c_buffer
= NULL
;
1247 c_seg
->c_store
.c_swap_handle
= (uint64_t)-1;
1249 lck_mtx_unlock_always(&c_seg
->c_lock
);
1252 if (pages_populated
)
1253 kernel_memory_depopulate(compressor_map
, (vm_offset_t
) c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
1255 } else if (c_swap_handle
) {
1257 * Free swap space on disk.
1259 vm_swap_free(c_swap_handle
);
1261 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1263 * c_seg must remain busy until
1264 * after the call to vm_swap_free
1266 C_SEG_WAKEUP_DONE(c_seg
);
1267 lck_mtx_unlock_always(&c_seg
->c_lock
);
1269 segno
= c_seg
->c_mysegno
;
1271 lck_mtx_lock_spin_always(c_list_lock
);
1273 * because the c_buffer is now associated with the segno,
1274 * we can't put the segno back on the free list until
1275 * after we have depopulated the c_buffer range, or
1276 * we run the risk of depopulating a range that is
1277 * now being used in one of the compressor heads
1279 c_segments
[segno
].c_segno
= c_free_segno_head
;
1280 c_free_segno_head
= segno
;
1283 lck_mtx_unlock_always(c_list_lock
);
1285 lck_mtx_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
1287 if (c_seg
->c_slot_var_array_len
)
1288 kfree(c_seg
->c_slot_var_array
, sizeof(struct c_slot
) * c_seg
->c_slot_var_array_len
);
1290 zfree(compressor_segment_zone
, c_seg
);
1293 #if DEVELOPMENT || DEBUG
1294 int c_seg_trim_page_count
= 0;
1298 c_seg_trim_tail(c_segment_t c_seg
)
1303 uint32_t c_rounded_size
;
1304 uint16_t current_nextslot
;
1305 uint32_t current_populated_offset
;
1307 if (c_seg
->c_bytes_used
== 0)
1309 current_nextslot
= c_seg
->c_nextslot
;
1310 current_populated_offset
= c_seg
->c_populated_offset
;
1312 while (c_seg
->c_nextslot
) {
1314 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, (c_seg
->c_nextslot
- 1));
1316 c_size
= UNPACK_C_SIZE(cs
);
1319 if (current_nextslot
!= c_seg
->c_nextslot
) {
1320 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1321 c_offset
= cs
->c_offset
+ C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1323 c_seg
->c_nextoffset
= c_offset
;
1324 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) &
1325 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
1327 if (c_seg
->c_firstemptyslot
> c_seg
->c_nextslot
)
1328 c_seg
->c_firstemptyslot
= c_seg
->c_nextslot
;
1329 #if DEVELOPMENT || DEBUG
1330 c_seg_trim_page_count
+= ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset
)) -
1331 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) /
1337 c_seg
->c_nextslot
--;
1339 assert(c_seg
->c_nextslot
);
1344 c_seg_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
)
1346 c_slot_mapping_t slot_ptr
;
1347 uint32_t c_offset
= 0;
1348 uint32_t old_populated_offset
;
1349 uint32_t c_rounded_size
;
1356 assert(c_seg
->c_busy
);
1358 #if VALIDATE_C_SEGMENTS
1359 c_seg_validate(c_seg
, FALSE
);
1361 if (c_seg
->c_bytes_used
== 0) {
1365 lck_mtx_unlock_always(&c_seg
->c_lock
);
1367 if (c_seg
->c_firstemptyslot
>= c_seg
->c_nextslot
|| C_SEG_UNUSED_BYTES(c_seg
) < PAGE_SIZE
)
1370 /* TODO: assert first emptyslot's c_size is actually 0 */
1372 #if DEVELOPMENT || DEBUG
1373 C_SEG_MAKE_WRITEABLE(c_seg
);
1376 #if VALIDATE_C_SEGMENTS
1377 c_seg
->c_was_minor_compacted
++;
1379 c_indx
= c_seg
->c_firstemptyslot
;
1380 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
1382 old_populated_offset
= c_seg
->c_populated_offset
;
1383 c_offset
= c_dst
->c_offset
;
1385 for (i
= c_indx
+ 1; i
< c_seg
->c_nextslot
&& c_offset
< c_seg
->c_nextoffset
; i
++) {
1387 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg
, i
);
1389 c_size
= UNPACK_C_SIZE(c_src
);
1394 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1395 /* N.B.: This memcpy may be an overlapping copy */
1396 memcpy(&c_seg
->c_store
.c_buffer
[c_offset
], &c_seg
->c_store
.c_buffer
[c_src
->c_offset
], c_rounded_size
);
1398 cslot_copy(c_dst
, c_src
);
1399 c_dst
->c_offset
= c_offset
;
1401 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
1402 slot_ptr
->s_cindx
= c_indx
;
1404 c_offset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1405 PACK_C_SIZE(c_src
, 0);
1408 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
1410 c_seg
->c_firstemptyslot
= c_indx
;
1411 c_seg
->c_nextslot
= c_indx
;
1412 c_seg
->c_nextoffset
= c_offset
;
1413 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
1414 c_seg
->c_bytes_unused
= 0;
1416 #if VALIDATE_C_SEGMENTS
1417 c_seg_validate(c_seg
, TRUE
);
1419 if (old_populated_offset
> c_seg
->c_populated_offset
) {
1423 gc_size
= C_SEG_OFFSET_TO_BYTES(old_populated_offset
- c_seg
->c_populated_offset
);
1424 gc_ptr
= &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
];
1426 kernel_memory_depopulate(compressor_map
, (vm_offset_t
)gc_ptr
, gc_size
, KMA_COMPRESSOR
);
1429 #if DEVELOPMENT || DEBUG
1430 C_SEG_WRITE_PROTECT(c_seg
);
1434 if (clear_busy
== TRUE
) {
1435 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1436 C_SEG_WAKEUP_DONE(c_seg
);
1437 lck_mtx_unlock_always(&c_seg
->c_lock
);
1444 c_seg_alloc_nextslot(c_segment_t c_seg
)
1446 struct c_slot
*old_slot_array
= NULL
;
1447 struct c_slot
*new_slot_array
= NULL
;
1451 if (c_seg
->c_nextslot
< c_seg_fixed_array_len
)
1454 if ((c_seg
->c_nextslot
- c_seg_fixed_array_len
) >= c_seg
->c_slot_var_array_len
) {
1456 oldlen
= c_seg
->c_slot_var_array_len
;
1457 old_slot_array
= c_seg
->c_slot_var_array
;
1460 newlen
= C_SEG_SLOT_VAR_ARRAY_MIN_LEN
;
1462 newlen
= oldlen
* 2;
1464 new_slot_array
= (struct c_slot
*)kalloc(sizeof(struct c_slot
) * newlen
);
1466 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1469 memcpy((char *)new_slot_array
, (char *)old_slot_array
, sizeof(struct c_slot
) * oldlen
);
1471 c_seg
->c_slot_var_array_len
= newlen
;
1472 c_seg
->c_slot_var_array
= new_slot_array
;
1474 lck_mtx_unlock_always(&c_seg
->c_lock
);
1477 kfree(old_slot_array
, sizeof(struct c_slot
) * oldlen
);
1484 uint64_t asked_permission
;
1485 uint64_t compactions
;
1486 uint64_t moved_slots
;
1487 uint64_t moved_bytes
;
1488 uint64_t wasted_space_in_swapouts
;
1489 uint64_t count_of_swapouts
;
1490 uint64_t count_of_freed_segs
;
1491 } c_seg_major_compact_stats
;
1494 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100)
1498 c_seg_major_compact_ok(
1499 c_segment_t c_seg_dst
,
1500 c_segment_t c_seg_src
)
1503 c_seg_major_compact_stats
.asked_permission
++;
1505 if (c_seg_src
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
&&
1506 c_seg_dst
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
)
1509 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
1511 * destination segment is full... can't compact
1521 c_seg_major_compact(
1522 c_segment_t c_seg_dst
,
1523 c_segment_t c_seg_src
)
1525 c_slot_mapping_t slot_ptr
;
1526 uint32_t c_rounded_size
;
1532 boolean_t keep_compacting
= TRUE
;
1535 * segments are not locked but they are both marked c_busy
1536 * which keeps c_decompress from working on them...
1537 * we can safely allocate new pages, move compressed data
1538 * from c_seg_src to c_seg_dst and update both c_segment's
1539 * state w/o holding the master lock
1541 #if DEVELOPMENT || DEBUG
1542 C_SEG_MAKE_WRITEABLE(c_seg_dst
);
1545 #if VALIDATE_C_SEGMENTS
1546 c_seg_dst
->c_was_major_compacted
++;
1547 c_seg_src
->c_was_major_donor
++;
1549 c_seg_major_compact_stats
.compactions
++;
1551 dst_slot
= c_seg_dst
->c_nextslot
;
1553 for (i
= 0; i
< c_seg_src
->c_nextslot
; i
++) {
1555 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, i
);
1557 c_size
= UNPACK_C_SIZE(c_src
);
1560 /* BATCH: move what we have so far; */
1564 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
- c_seg_dst
->c_nextoffset
) < (unsigned) c_size
) {
1565 int size_to_populate
;
1568 size_to_populate
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
);
1570 if (size_to_populate
== 0) {
1572 keep_compacting
= FALSE
;
1575 if (size_to_populate
> C_SEG_MAX_POPULATE_SIZE
)
1576 size_to_populate
= C_SEG_MAX_POPULATE_SIZE
;
1578 kernel_memory_populate(compressor_map
,
1579 (vm_offset_t
) &c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_populated_offset
],
1582 VM_KERN_MEMORY_COMPRESSOR
);
1584 c_seg_dst
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(size_to_populate
);
1585 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) <= C_SEG_BUFSIZE
);
1587 c_seg_alloc_nextslot(c_seg_dst
);
1589 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1591 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
1593 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1595 c_seg_major_compact_stats
.moved_slots
++;
1596 c_seg_major_compact_stats
.moved_bytes
+= c_size
;
1598 cslot_copy(c_dst
, c_src
);
1599 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
1601 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
)
1602 c_seg_dst
->c_firstemptyslot
++;
1603 c_seg_dst
->c_slots_used
++;
1604 c_seg_dst
->c_nextslot
++;
1605 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
1606 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1608 PACK_C_SIZE(c_src
, 0);
1610 c_seg_src
->c_bytes_used
-= c_rounded_size
;
1611 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
1612 c_seg_src
->c_firstemptyslot
= 0;
1614 assert(c_seg_src
->c_slots_used
);
1615 c_seg_src
->c_slots_used
--;
1617 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
1618 /* dest segment is now full */
1619 keep_compacting
= FALSE
;
1623 #if DEVELOPMENT || DEBUG
1624 C_SEG_WRITE_PROTECT(c_seg_dst
);
1626 if (dst_slot
< c_seg_dst
->c_nextslot
) {
1628 PAGE_REPLACEMENT_ALLOWED(TRUE
);
1630 * we've now locked out c_decompress from
1631 * converting the slot passed into it into
1632 * a c_segment_t which allows us to use
1633 * the backptr to change which c_segment and
1634 * index the slot points to
1636 while (dst_slot
< c_seg_dst
->c_nextslot
) {
1638 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
1640 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
1641 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
1642 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
1643 slot_ptr
->s_cindx
= dst_slot
++;
1645 PAGE_REPLACEMENT_ALLOWED(FALSE
);
1647 return (keep_compacting
);
1652 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec
, clock_nsec_t end_nsec
, clock_sec_t start_sec
, clock_nsec_t start_nsec
)
1655 uint64_t start_msecs
;
1657 end_msecs
= (end_sec
* 1000) + end_nsec
/ 1000000;
1658 start_msecs
= (start_sec
* 1000) + start_nsec
/ 1000000;
1660 return (end_msecs
- start_msecs
);
1665 uint32_t compressor_eval_period_in_msecs
= 250;
1666 uint32_t compressor_sample_min_in_msecs
= 500;
1667 uint32_t compressor_sample_max_in_msecs
= 10000;
1668 uint32_t compressor_thrashing_threshold_per_10msecs
= 50;
1669 uint32_t compressor_thrashing_min_per_10msecs
= 20;
1671 /* When true, reset sample data next chance we get. */
1672 static boolean_t compressor_need_sample_reset
= FALSE
;
1674 extern uint32_t vm_page_filecache_min
;
1678 compute_swapout_target_age(void)
1680 clock_sec_t cur_ts_sec
;
1681 clock_nsec_t cur_ts_nsec
;
1682 uint32_t min_operations_needed_in_this_sample
;
1683 uint64_t elapsed_msecs_in_eval
;
1684 uint64_t elapsed_msecs_in_sample
;
1685 boolean_t need_eval_reset
= FALSE
;
1687 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
1689 elapsed_msecs_in_sample
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_sample_period_sec
, start_of_sample_period_nsec
);
1691 if (compressor_need_sample_reset
||
1692 elapsed_msecs_in_sample
>= compressor_sample_max_in_msecs
) {
1693 compressor_need_sample_reset
= TRUE
;
1694 need_eval_reset
= TRUE
;
1697 elapsed_msecs_in_eval
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_eval_period_sec
, start_of_eval_period_nsec
);
1699 if (elapsed_msecs_in_eval
< compressor_eval_period_in_msecs
)
1701 need_eval_reset
= TRUE
;
1703 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START
, elapsed_msecs_in_eval
, sample_period_compression_count
, sample_period_decompression_count
, 0, 0);
1705 min_operations_needed_in_this_sample
= (compressor_thrashing_min_per_10msecs
* (uint32_t)elapsed_msecs_in_eval
) / 10;
1707 if ((sample_period_compression_count
- last_eval_compression_count
) < min_operations_needed_in_this_sample
||
1708 (sample_period_decompression_count
- last_eval_decompression_count
) < min_operations_needed_in_this_sample
) {
1710 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_compression_count
- last_eval_compression_count
,
1711 sample_period_decompression_count
- last_eval_decompression_count
, 0, 1, 0);
1713 swapout_target_age
= 0;
1715 compressor_need_sample_reset
= TRUE
;
1716 need_eval_reset
= TRUE
;
1719 last_eval_compression_count
= sample_period_compression_count
;
1720 last_eval_decompression_count
= sample_period_decompression_count
;
1722 if (elapsed_msecs_in_sample
< compressor_sample_min_in_msecs
) {
1724 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, 0, 0, 5, 0);
1727 if (sample_period_decompression_count
> ((compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10)) {
1729 uint64_t running_total
;
1730 uint64_t working_target
;
1731 uint64_t aging_target
;
1732 uint32_t oldest_age_of_csegs_sampled
= 0;
1733 uint64_t working_set_approximation
= 0;
1735 swapout_target_age
= 0;
1737 working_target
= (sample_period_decompression_count
/ 100) * 95; /* 95 percent */
1738 aging_target
= (sample_period_decompression_count
/ 100) * 1; /* 1 percent */
1741 for (oldest_age_of_csegs_sampled
= 0; oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
; oldest_age_of_csegs_sampled
++) {
1743 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1745 working_set_approximation
+= oldest_age_of_csegs_sampled
* age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1747 if (running_total
>= working_target
)
1750 if (oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
) {
1752 working_set_approximation
= (working_set_approximation
* 1000) / elapsed_msecs_in_sample
;
1754 if (working_set_approximation
< VM_PAGE_COMPRESSOR_COUNT
) {
1756 running_total
= overage_decompressions_during_sample_period
;
1758 for (oldest_age_of_csegs_sampled
= DECOMPRESSION_SAMPLE_MAX_AGE
- 1; oldest_age_of_csegs_sampled
; oldest_age_of_csegs_sampled
--) {
1759 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1761 if (running_total
>= aging_target
)
1764 swapout_target_age
= (uint32_t)cur_ts_sec
- oldest_age_of_csegs_sampled
;
1766 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 2, 0);
1768 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 0, 3, 0);
1771 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_target
, running_total
, 0, 4, 0);
1773 compressor_need_sample_reset
= TRUE
;
1774 need_eval_reset
= TRUE
;
1776 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_decompression_count
, (compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10, 0, 6, 0);
1778 if (compressor_need_sample_reset
== TRUE
) {
1779 bzero(age_of_decompressions_during_sample_period
, sizeof(age_of_decompressions_during_sample_period
));
1780 overage_decompressions_during_sample_period
= 0;
1782 start_of_sample_period_sec
= cur_ts_sec
;
1783 start_of_sample_period_nsec
= cur_ts_nsec
;
1784 sample_period_decompression_count
= 0;
1785 sample_period_compression_count
= 0;
1786 last_eval_decompression_count
= 0;
1787 last_eval_compression_count
= 0;
1788 compressor_need_sample_reset
= FALSE
;
1790 if (need_eval_reset
== TRUE
) {
1791 start_of_eval_period_sec
= cur_ts_sec
;
1792 start_of_eval_period_nsec
= cur_ts_nsec
;
1797 int compaction_swapper_init_now
= 0;
1798 int compaction_swapper_running
= 0;
1799 int compaction_swapper_awakened
= 0;
1800 int compaction_swapper_abort
= 0;
1804 boolean_t
memorystatus_kill_on_VM_thrashing(boolean_t
);
1805 boolean_t
memorystatus_kill_on_FC_thrashing(boolean_t
);
1806 int compressor_thrashing_induced_jetsam
= 0;
1807 int filecache_thrashing_induced_jetsam
= 0;
1808 static boolean_t vm_compressor_thrashing_detected
= FALSE
;
1809 #endif /* CONFIG_JETSAM */
1812 compressor_needs_to_swap(void)
1814 boolean_t should_swap
= FALSE
;
1816 if (vm_swapout_ripe_segments
== TRUE
&& c_overage_swapped_count
< c_overage_swapped_limit
) {
1822 clock_get_system_nanotime(&now
, &nsec
);
1825 lck_mtx_lock_spin_always(c_list_lock
);
1827 if ( !queue_empty(&c_age_list_head
)) {
1828 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1830 age
= now
- c_seg
->c_creation_ts
;
1832 lck_mtx_unlock_always(c_list_lock
);
1834 if (age
>= vm_ripe_target_age
)
1837 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
1838 if (COMPRESSOR_NEEDS_TO_SWAP()) {
1841 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external
) && vm_page_anonymous_count
< (vm_page_inactive_count
/ 20)) {
1844 if (vm_page_free_count
< (vm_page_free_reserved
- (COMPRESSOR_FREE_RESERVED_LIMIT
* 2)))
1847 compute_swapout_target_age();
1849 if (swapout_target_age
) {
1852 lck_mtx_lock_spin_always(c_list_lock
);
1854 if (!queue_empty(&c_age_list_head
)) {
1856 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1858 if (c_seg
->c_creation_ts
> swapout_target_age
)
1859 swapout_target_age
= 0;
1861 lck_mtx_unlock_always(c_list_lock
);
1863 #if CONFIG_PHANTOM_CACHE
1864 if (vm_phantom_cache_check_pressure())
1867 if (swapout_target_age
)
1871 if (should_swap
|| vm_compressor_low_on_space() == TRUE
) {
1873 if (vm_compressor_thrashing_detected
== FALSE
) {
1874 vm_compressor_thrashing_detected
= TRUE
;
1876 if (swapout_target_age
|| vm_compressor_low_on_space() == TRUE
) {
1877 memorystatus_kill_on_VM_thrashing(TRUE
/* async */);
1878 compressor_thrashing_induced_jetsam
++;
1880 memorystatus_kill_on_FC_thrashing(TRUE
/* async */);
1881 filecache_thrashing_induced_jetsam
++;
1885 * let the jetsam take precedence over
1886 * any major compactions we might have
1887 * been able to do... otherwise we run
1888 * the risk of doing major compactions
1889 * on segments we're about to free up
1890 * due to the jetsam activity.
1892 should_swap
= FALSE
;
1895 #endif /* CONFIG_JETSAM */
1897 if (should_swap
== FALSE
) {
1899 * vm_compressor_needs_to_major_compact returns true only if we're
1900 * about to run out of available compressor segments... in this
1901 * case, we absolutely need to run a major compaction even if
1902 * we've just kicked off a jetsam or we don't otherwise need to
1903 * swap... terminating objects releases
1904 * pages back to the uncompressed cache, but does not guarantee
1905 * that we will free up even a single compression segment
1907 should_swap
= vm_compressor_needs_to_major_compact();
1911 * returning TRUE when swap_supported == FALSE
1912 * will cause the major compaction engine to
1913 * run, but will not trigger any swapping...
1914 * segments that have been major compacted
1915 * will be moved to the majorcompact queue
1917 return (should_swap
);
1922 * This function is called from the jetsam thread after killing something to
1923 * mitigate thrashing.
1925 * We need to restart our thrashing detection heuristics since memory pressure
1926 * has potentially changed significantly, and we don't want to detect on old
1927 * data from before the jetsam.
1930 vm_thrashing_jetsam_done(void)
1932 vm_compressor_thrashing_detected
= FALSE
;
1934 /* Were we compressor-thrashing or filecache-thrashing? */
1935 if (swapout_target_age
) {
1936 swapout_target_age
= 0;
1937 compressor_need_sample_reset
= TRUE
;
1939 #if CONFIG_PHANTOM_CACHE
1941 vm_phantom_cache_restart_sample();
1945 #endif /* CONFIG_JETSAM */
1947 uint32_t vm_wake_compactor_swapper_calls
= 0;
1948 uint32_t vm_run_compactor_already_running
= 0;
1949 uint32_t vm_run_compactor_empty_minor_q
= 0;
1950 uint32_t vm_run_compactor_did_compact
= 0;
1951 uint32_t vm_run_compactor_waited
= 0;
1954 vm_run_compactor(void)
1956 if (c_segment_count
== 0)
1959 lck_mtx_lock_spin_always(c_list_lock
);
1961 if (c_minor_count
== 0) {
1962 vm_run_compactor_empty_minor_q
++;
1964 lck_mtx_unlock_always(c_list_lock
);
1967 if (compaction_swapper_running
) {
1969 if (vm_restricted_to_single_processor
== FALSE
) {
1970 vm_run_compactor_already_running
++;
1972 lck_mtx_unlock_always(c_list_lock
);
1975 vm_run_compactor_waited
++;
1977 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
1979 lck_mtx_unlock_always(c_list_lock
);
1981 thread_block(THREAD_CONTINUE_NULL
);
1985 vm_run_compactor_did_compact
++;
1987 fastwake_warmup
= FALSE
;
1988 compaction_swapper_running
= 1;
1990 vm_compressor_do_delayed_compactions(FALSE
);
1992 compaction_swapper_running
= 0;
1994 lck_mtx_unlock_always(c_list_lock
);
1996 thread_wakeup((event_t
)&compaction_swapper_running
);
2001 vm_wake_compactor_swapper(void)
2003 if (compaction_swapper_running
|| compaction_swapper_awakened
|| c_segment_count
== 0)
2006 if (c_minor_count
|| vm_compressor_needs_to_major_compact()) {
2008 lck_mtx_lock_spin_always(c_list_lock
);
2010 fastwake_warmup
= FALSE
;
2012 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2014 vm_wake_compactor_swapper_calls
++;
2016 compaction_swapper_awakened
= 1;
2017 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2019 lck_mtx_unlock_always(c_list_lock
);
2025 vm_consider_swapping()
2027 c_segment_t c_seg
, c_seg_next
;
2031 assert(VM_CONFIG_SWAP_IS_PRESENT
);
2033 lck_mtx_lock_spin_always(c_list_lock
);
2035 compaction_swapper_abort
= 1;
2037 while (compaction_swapper_running
) {
2038 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
2040 lck_mtx_unlock_always(c_list_lock
);
2042 thread_block(THREAD_CONTINUE_NULL
);
2044 lck_mtx_lock_spin_always(c_list_lock
);
2046 compaction_swapper_abort
= 0;
2047 compaction_swapper_running
= 1;
2049 vm_swapout_ripe_segments
= TRUE
;
2051 if (!queue_empty(&c_major_list_head
)) {
2053 clock_get_system_nanotime(&now
, &nsec
);
2055 c_seg
= (c_segment_t
)queue_first(&c_major_list_head
);
2057 while (!queue_end(&c_major_list_head
, (queue_entry_t
)c_seg
)) {
2059 if (c_overage_swapped_count
>= c_overage_swapped_limit
)
2062 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
2064 if ((now
- c_seg
->c_creation_ts
) >= vm_ripe_target_age
) {
2066 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2068 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
2070 lck_mtx_unlock_always(&c_seg
->c_lock
);
2075 vm_compressor_compact_and_swap(FALSE
);
2077 compaction_swapper_running
= 0;
2079 vm_swapout_ripe_segments
= FALSE
;
2081 lck_mtx_unlock_always(c_list_lock
);
2083 thread_wakeup((event_t
)&compaction_swapper_running
);
2088 vm_consider_waking_compactor_swapper(void)
2090 boolean_t need_wakeup
= FALSE
;
2092 if (c_segment_count
== 0)
2095 if (compaction_swapper_running
|| compaction_swapper_awakened
)
2098 if (!compaction_swapper_inited
&& !compaction_swapper_init_now
) {
2099 compaction_swapper_init_now
= 1;
2103 if (c_minor_count
&& (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2107 } else if (compressor_needs_to_swap()) {
2111 } else if (c_minor_count
) {
2112 uint64_t total_bytes
;
2114 total_bytes
= compressor_object
->resident_page_count
* PAGE_SIZE_64
;
2116 if ((total_bytes
- compressor_bytes_used
) > total_bytes
/ 10)
2119 if (need_wakeup
== TRUE
) {
2121 lck_mtx_lock_spin_always(c_list_lock
);
2123 fastwake_warmup
= FALSE
;
2125 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2126 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER
, DBG_FUNC_NONE
);
2128 compaction_swapper_awakened
= 1;
2129 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2131 lck_mtx_unlock_always(c_list_lock
);
2136 #define C_SWAPOUT_LIMIT 4
2137 #define DELAYED_COMPACTIONS_PER_PASS 30
2140 vm_compressor_do_delayed_compactions(boolean_t flush_all
)
2143 int number_compacted
= 0;
2144 boolean_t needs_to_swap
= FALSE
;
2147 #if !CONFIG_EMBEDDED
2148 LCK_MTX_ASSERT(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
2149 #endif /* !CONFIG_EMBEDDED */
2151 while (!queue_empty(&c_minor_list_head
) && needs_to_swap
== FALSE
) {
2153 c_seg
= (c_segment_t
)queue_first(&c_minor_list_head
);
2155 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2157 if (c_seg
->c_busy
) {
2159 lck_mtx_unlock_always(c_list_lock
);
2160 c_seg_wait_on_busy(c_seg
);
2161 lck_mtx_lock_spin_always(c_list_lock
);
2167 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, TRUE
);
2169 if (VM_CONFIG_SWAP_IS_ACTIVE
&& (number_compacted
++ > DELAYED_COMPACTIONS_PER_PASS
)) {
2171 if ((flush_all
== TRUE
|| compressor_needs_to_swap() == TRUE
) && c_swapout_count
< C_SWAPOUT_LIMIT
)
2172 needs_to_swap
= TRUE
;
2174 number_compacted
= 0;
2176 lck_mtx_lock_spin_always(c_list_lock
);
2181 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
2184 vm_compressor_age_swapped_in_segments(boolean_t flush_all
)
2190 clock_get_system_nanotime(&now
, &nsec
);
2192 while (!queue_empty(&c_swappedin_list_head
)) {
2194 c_seg
= (c_segment_t
)queue_first(&c_swappedin_list_head
);
2196 if (flush_all
== FALSE
&& (now
- c_seg
->c_swappedin_ts
) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT
)
2199 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2201 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
2203 lck_mtx_unlock_always(&c_seg
->c_lock
);
2208 extern int vm_num_swap_files
;
2209 extern int vm_num_pinned_swap_files
;
2210 extern int vm_swappin_enabled
;
2212 extern unsigned int vm_swapfile_total_segs_used
;
2213 extern unsigned int vm_swapfile_total_segs_alloced
;
2217 vm_compressor_flush(void)
2219 uint64_t vm_swap_put_failures_at_start
;
2220 wait_result_t wait_result
= 0;
2221 AbsoluteTime startTime
, endTime
;
2222 clock_sec_t now_sec
;
2223 clock_nsec_t now_nsec
;
2226 HIBLOG("vm_compressor_flush - starting\n");
2228 clock_get_uptime(&startTime
);
2230 lck_mtx_lock_spin_always(c_list_lock
);
2232 fastwake_warmup
= FALSE
;
2233 compaction_swapper_abort
= 1;
2235 while (compaction_swapper_running
) {
2236 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
2238 lck_mtx_unlock_always(c_list_lock
);
2240 thread_block(THREAD_CONTINUE_NULL
);
2242 lck_mtx_lock_spin_always(c_list_lock
);
2244 compaction_swapper_abort
= 0;
2245 compaction_swapper_running
= 1;
2247 hibernate_flushing
= TRUE
;
2248 hibernate_no_swapspace
= FALSE
;
2249 c_generation_id_flush_barrier
= c_generation_id
+ 1000;
2251 clock_get_system_nanotime(&now_sec
, &now_nsec
);
2252 hibernate_flushing_deadline
= now_sec
+ HIBERNATE_FLUSHING_SECS_TO_COMPLETE
;
2254 vm_swap_put_failures_at_start
= vm_swap_put_failures
;
2256 vm_compressor_compact_and_swap(TRUE
);
2258 while (!queue_empty(&c_swapout_list_head
)) {
2260 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 5000, 1000*NSEC_PER_USEC
);
2262 lck_mtx_unlock_always(c_list_lock
);
2264 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
2266 lck_mtx_lock_spin_always(c_list_lock
);
2268 if (wait_result
== THREAD_TIMED_OUT
)
2271 hibernate_flushing
= FALSE
;
2272 compaction_swapper_running
= 0;
2274 if (vm_swap_put_failures
> vm_swap_put_failures_at_start
)
2275 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
2276 vm_swap_put_failures
- vm_swap_put_failures_at_start
, VM_PAGE_COMPRESSOR_COUNT
);
2278 lck_mtx_unlock_always(c_list_lock
);
2280 thread_wakeup((event_t
)&compaction_swapper_running
);
2282 clock_get_uptime(&endTime
);
2283 SUB_ABSOLUTETIME(&endTime
, &startTime
);
2284 absolutetime_to_nanoseconds(endTime
, &nsec
);
2286 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
2287 nsec
/ 1000000ULL, vm_num_swap_files
, vm_num_pinned_swap_files
, vm_swappin_enabled
);
2291 int compaction_swap_trigger_thread_awakened
= 0;
2294 vm_compressor_swap_trigger_thread(void)
2296 current_thread()->options
|= TH_OPT_VMPRIV
;
2299 * compaction_swapper_init_now is set when the first call to
2300 * vm_consider_waking_compactor_swapper is made from
2301 * vm_pageout_scan... since this function is called upon
2302 * thread creation, we want to make sure to delay adjusting
2303 * the tuneables until we are awakened via vm_pageout_scan
2304 * so that we are at a point where the vm_swapfile_open will
2305 * be operating on the correct directory (in case the default
2306 * of /var/vm/ is overridden by the dymanic_pager
2308 if (compaction_swapper_init_now
) {
2309 vm_compaction_swapper_do_init();
2311 if (vm_restricted_to_single_processor
== TRUE
)
2312 thread_vm_bind_group_add();
2313 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
2314 compaction_swapper_init_now
= 0;
2316 lck_mtx_lock_spin_always(c_list_lock
);
2318 compaction_swap_trigger_thread_awakened
++;
2319 compaction_swapper_awakened
= 0;
2321 if (compaction_swapper_running
== 0) {
2323 compaction_swapper_running
= 1;
2325 vm_compressor_compact_and_swap(FALSE
);
2327 compaction_swapper_running
= 0;
2329 assert_wait((event_t
)&c_compressor_swap_trigger
, THREAD_UNINT
);
2331 if (compaction_swapper_running
== 0)
2332 thread_wakeup((event_t
)&compaction_swapper_running
);
2334 lck_mtx_unlock_always(c_list_lock
);
2336 thread_block((thread_continue_t
)vm_compressor_swap_trigger_thread
);
2343 vm_compressor_record_warmup_start(void)
2347 lck_mtx_lock_spin_always(c_list_lock
);
2349 if (first_c_segment_to_warm_generation_id
== 0) {
2350 if (!queue_empty(&c_age_list_head
)) {
2352 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
2354 first_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
2356 first_c_segment_to_warm_generation_id
= 0;
2358 fastwake_recording_in_progress
= TRUE
;
2360 lck_mtx_unlock_always(c_list_lock
);
2365 vm_compressor_record_warmup_end(void)
2369 lck_mtx_lock_spin_always(c_list_lock
);
2371 if (fastwake_recording_in_progress
== TRUE
) {
2373 if (!queue_empty(&c_age_list_head
)) {
2375 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
2377 last_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
2379 last_c_segment_to_warm_generation_id
= first_c_segment_to_warm_generation_id
;
2381 fastwake_recording_in_progress
= FALSE
;
2383 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
2385 lck_mtx_unlock_always(c_list_lock
);
2389 #define DELAY_TRIM_ON_WAKE_SECS 25
2392 vm_compressor_delay_trim(void)
2397 clock_get_system_nanotime(&sec
, &nsec
);
2398 dont_trim_until_ts
= sec
+ DELAY_TRIM_ON_WAKE_SECS
;
2403 vm_compressor_do_warmup(void)
2405 lck_mtx_lock_spin_always(c_list_lock
);
2407 if (first_c_segment_to_warm_generation_id
== last_c_segment_to_warm_generation_id
) {
2408 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
2410 lck_mtx_unlock_always(c_list_lock
);
2414 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2416 fastwake_warmup
= TRUE
;
2418 compaction_swapper_awakened
= 1;
2419 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2421 lck_mtx_unlock_always(c_list_lock
);
2425 do_fastwake_warmup_all(void)
2428 lck_mtx_lock_spin_always(c_list_lock
);
2430 if (queue_empty(&c_swappedout_list_head
) && queue_empty(&c_swappedout_sparse_list_head
)) {
2432 lck_mtx_unlock_always(c_list_lock
);
2436 fastwake_warmup
= TRUE
;
2438 do_fastwake_warmup(&c_swappedout_list_head
, TRUE
);
2440 do_fastwake_warmup(&c_swappedout_sparse_list_head
, TRUE
);
2442 fastwake_warmup
= FALSE
;
2444 lck_mtx_unlock_always(c_list_lock
);
2449 do_fastwake_warmup(queue_head_t
*c_queue
, boolean_t consider_all_cseg
)
2451 c_segment_t c_seg
= NULL
;
2452 AbsoluteTime startTime
, endTime
;
2456 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
2458 clock_get_uptime(&startTime
);
2460 lck_mtx_unlock_always(c_list_lock
);
2462 proc_set_thread_policy(current_thread(),
2463 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER2
);
2465 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2467 lck_mtx_lock_spin_always(c_list_lock
);
2469 while (!queue_empty(c_queue
) && fastwake_warmup
== TRUE
) {
2471 c_seg
= (c_segment_t
) queue_first(c_queue
);
2473 if (consider_all_cseg
== FALSE
) {
2474 if (c_seg
->c_generation_id
< first_c_segment_to_warm_generation_id
||
2475 c_seg
->c_generation_id
> last_c_segment_to_warm_generation_id
)
2478 if (vm_page_free_count
< (AVAILABLE_MEMORY
/ 4))
2482 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2483 lck_mtx_unlock_always(c_list_lock
);
2485 if (c_seg
->c_busy
) {
2486 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2487 c_seg_wait_on_busy(c_seg
);
2488 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2490 if (c_seg_swapin(c_seg
, TRUE
, FALSE
) == 0)
2491 lck_mtx_unlock_always(&c_seg
->c_lock
);
2492 c_segment_warmup_count
++;
2494 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2495 vm_pageout_io_throttle();
2496 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2498 lck_mtx_lock_spin_always(c_list_lock
);
2500 lck_mtx_unlock_always(c_list_lock
);
2502 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2504 proc_set_thread_policy(current_thread(),
2505 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER0
);
2507 clock_get_uptime(&endTime
);
2508 SUB_ABSOLUTETIME(&endTime
, &startTime
);
2509 absolutetime_to_nanoseconds(endTime
, &nsec
);
2511 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec
/ 1000000ULL);
2513 lck_mtx_lock_spin_always(c_list_lock
);
2515 if (consider_all_cseg
== FALSE
) {
2516 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
2522 vm_compressor_compact_and_swap(boolean_t flush_all
)
2524 c_segment_t c_seg
, c_seg_next
;
2525 boolean_t keep_compacting
;
2530 if (fastwake_warmup
== TRUE
) {
2531 uint64_t starting_warmup_count
;
2533 starting_warmup_count
= c_segment_warmup_count
;
2535 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_START
, c_segment_warmup_count
,
2536 first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
, 0, 0);
2537 do_fastwake_warmup(&c_swappedout_list_head
, FALSE
);
2538 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_END
, c_segment_warmup_count
, c_segment_warmup_count
- starting_warmup_count
, 0, 0, 0);
2540 fastwake_warmup
= FALSE
;
2544 * it's possible for the c_age_list_head to be empty if we
2545 * hit our limits for growing the compressor pool and we subsequently
2546 * hibernated... on the next hibernation we could see the queue as
2547 * empty and not proceeed even though we have a bunch of segments on
2548 * the swapped in queue that need to be dealt with.
2550 vm_compressor_do_delayed_compactions(flush_all
);
2552 vm_compressor_age_swapped_in_segments(flush_all
);
2555 * we only need to grab the timestamp once per
2556 * invocation of this function since the
2557 * timescale we're interested in is measured
2560 clock_get_system_nanotime(&now
, &nsec
);
2562 while (!queue_empty(&c_age_list_head
) && compaction_swapper_abort
== 0) {
2564 if (hibernate_flushing
== TRUE
) {
2567 if (hibernate_should_abort()) {
2568 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
2571 if (hibernate_no_swapspace
== TRUE
) {
2572 HIBLOG("vm_compressor_flush - out of swap space\n");
2575 if (vm_swap_files_pinned() == FALSE
) {
2576 HIBLOG("vm_compressor_flush - unpinned swap files\n");
2579 if (hibernate_in_progress_with_pinned_swap
== TRUE
&&
2580 (vm_swapfile_total_segs_alloced
== vm_swapfile_total_segs_used
)) {
2581 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
2584 clock_get_system_nanotime(&sec
, &nsec
);
2586 if (sec
> hibernate_flushing_deadline
) {
2587 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
2591 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
2593 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 100, 1000*NSEC_PER_USEC
);
2595 lck_mtx_unlock_always(c_list_lock
);
2597 thread_block(THREAD_CONTINUE_NULL
);
2599 lck_mtx_lock_spin_always(c_list_lock
);
2604 vm_compressor_do_delayed_compactions(flush_all
);
2606 vm_compressor_age_swapped_in_segments(flush_all
);
2608 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
2610 * we timed out on the above thread_block
2611 * let's loop around and try again
2612 * the timeout allows us to continue
2613 * to do minor compactions to make
2614 * more memory available
2620 * Swap out segments?
2622 if (flush_all
== FALSE
) {
2623 boolean_t needs_to_swap
;
2625 lck_mtx_unlock_always(c_list_lock
);
2627 needs_to_swap
= compressor_needs_to_swap();
2629 #if !CONFIG_EMBEDDED
2630 if (needs_to_swap
== TRUE
&& vm_swap_low_on_space())
2631 vm_compressor_take_paging_space_action();
2632 #endif /* !CONFIG_EMBEDDED */
2634 lck_mtx_lock_spin_always(c_list_lock
);
2636 if (needs_to_swap
== FALSE
)
2639 if (queue_empty(&c_age_list_head
))
2641 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
2643 assert(c_seg
->c_state
== C_ON_AGE_Q
);
2645 if (flush_all
== TRUE
&& c_seg
->c_generation_id
> c_generation_id_flush_barrier
)
2648 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2650 if (c_seg
->c_busy
) {
2652 lck_mtx_unlock_always(c_list_lock
);
2653 c_seg_wait_on_busy(c_seg
);
2654 lck_mtx_lock_spin_always(c_list_lock
);
2660 if (c_seg_do_minor_compaction_and_unlock(c_seg
, FALSE
, TRUE
, TRUE
)) {
2662 * found an empty c_segment and freed it
2663 * so go grab the next guy in the queue
2665 c_seg_major_compact_stats
.count_of_freed_segs
++;
2671 keep_compacting
= TRUE
;
2673 while (keep_compacting
== TRUE
) {
2675 assert(c_seg
->c_busy
);
2677 /* look for another segment to consolidate */
2679 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
2681 if (queue_end(&c_age_list_head
, (queue_entry_t
)c_seg_next
))
2684 assert(c_seg_next
->c_state
== C_ON_AGE_Q
);
2686 if (c_seg_major_compact_ok(c_seg
, c_seg_next
) == FALSE
)
2689 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2691 if (c_seg_next
->c_busy
) {
2693 lck_mtx_unlock_always(c_list_lock
);
2694 c_seg_wait_on_busy(c_seg_next
);
2695 lck_mtx_lock_spin_always(c_list_lock
);
2699 /* grab that segment */
2700 C_SEG_BUSY(c_seg_next
);
2702 if (c_seg_do_minor_compaction_and_unlock(c_seg_next
, FALSE
, TRUE
, TRUE
)) {
2704 * found an empty c_segment and freed it
2705 * so we can't continue to use c_seg_next
2707 c_seg_major_compact_stats
.count_of_freed_segs
++;
2711 /* unlock the list ... */
2712 lck_mtx_unlock_always(c_list_lock
);
2714 /* do the major compaction */
2716 keep_compacting
= c_seg_major_compact(c_seg
, c_seg_next
);
2718 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2720 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2722 * run a minor compaction on the donor segment
2723 * since we pulled at least some of it's
2724 * data into our target... if we've emptied
2725 * it, now is a good time to free it which
2726 * c_seg_minor_compaction_and_unlock also takes care of
2728 * by passing TRUE, we ask for c_busy to be cleared
2729 * and c_wanted to be taken care of
2731 if (c_seg_minor_compaction_and_unlock(c_seg_next
, TRUE
))
2732 c_seg_major_compact_stats
.count_of_freed_segs
++;
2734 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2736 /* relock the list */
2737 lck_mtx_lock_spin_always(c_list_lock
);
2739 } /* major compaction */
2741 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2743 assert(c_seg
->c_busy
);
2744 assert(!c_seg
->c_on_minorcompact_q
);
2746 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
2748 * This mode of putting a generic c_seg on the swapout list is
2749 * only supported when we have general swapping enabled
2751 c_seg_switch_state(c_seg
, C_ON_SWAPOUT_Q
, FALSE
);
2753 if ((vm_swapout_ripe_segments
== TRUE
&& c_overage_swapped_count
< c_overage_swapped_limit
)) {
2755 assert(VM_CONFIG_SWAP_IS_PRESENT
);
2757 * we are running compressor sweeps with swap-behind
2758 * make sure the c_seg has aged enough before swapping it
2761 if ((now
- c_seg
->c_creation_ts
) >= vm_ripe_target_age
) {
2762 c_seg
->c_overage_swap
= TRUE
;
2763 c_overage_swapped_count
++;
2764 c_seg_switch_state(c_seg
, C_ON_SWAPOUT_Q
, FALSE
);
2768 if (c_seg
->c_state
== C_ON_AGE_Q
) {
2770 * this c_seg didn't get moved to the swapout queue
2771 * so we need to move it out of the way...
2772 * we just did a major compaction on it so put it
2775 c_seg_switch_state(c_seg
, C_ON_MAJORCOMPACT_Q
, FALSE
);
2777 c_seg_major_compact_stats
.wasted_space_in_swapouts
+= C_SEG_BUFSIZE
- c_seg
->c_bytes_used
;
2778 c_seg_major_compact_stats
.count_of_swapouts
++;
2780 C_SEG_WAKEUP_DONE(c_seg
);
2782 lck_mtx_unlock_always(&c_seg
->c_lock
);
2784 if (c_swapout_count
) {
2785 lck_mtx_unlock_always(c_list_lock
);
2787 thread_wakeup((event_t
)&c_swapout_list_head
);
2789 lck_mtx_lock_spin_always(c_list_lock
);
2796 c_seg_allocate(c_segment_t
*current_chead
)
2800 int size_to_populate
;
2802 #if !CONFIG_EMBEDDED
2803 if (vm_compressor_low_on_space())
2804 vm_compressor_take_paging_space_action();
2805 #endif /* !CONFIG_EMBEDDED */
2807 if ( (c_seg
= *current_chead
) == NULL
) {
2810 lck_mtx_lock_spin_always(c_list_lock
);
2812 while (c_segments_busy
== TRUE
) {
2813 assert_wait((event_t
) (&c_segments_busy
), THREAD_UNINT
);
2815 lck_mtx_unlock_always(c_list_lock
);
2817 thread_block(THREAD_CONTINUE_NULL
);
2819 lck_mtx_lock_spin_always(c_list_lock
);
2821 if (c_free_segno_head
== (uint32_t)-1) {
2822 uint32_t c_segments_available_new
;
2824 if (c_segments_available
>= c_segments_limit
|| c_segment_pages_compressed
>= c_segment_pages_compressed_limit
) {
2825 lck_mtx_unlock_always(c_list_lock
);
2829 c_segments_busy
= TRUE
;
2830 lck_mtx_unlock_always(c_list_lock
);
2832 kernel_memory_populate(compressor_map
, (vm_offset_t
)c_segments_next_page
,
2833 PAGE_SIZE
, KMA_KOBJECT
, VM_KERN_MEMORY_COMPRESSOR
);
2834 c_segments_next_page
+= PAGE_SIZE
;
2836 c_segments_available_new
= c_segments_available
+ C_SEGMENTS_PER_PAGE
;
2838 if (c_segments_available_new
> c_segments_limit
)
2839 c_segments_available_new
= c_segments_limit
;
2841 for (c_segno
= c_segments_available
+ 1; c_segno
< c_segments_available_new
; c_segno
++)
2842 c_segments
[c_segno
- 1].c_segno
= c_segno
;
2844 lck_mtx_lock_spin_always(c_list_lock
);
2846 c_segments
[c_segno
- 1].c_segno
= c_free_segno_head
;
2847 c_free_segno_head
= c_segments_available
;
2848 c_segments_available
= c_segments_available_new
;
2850 c_segments_busy
= FALSE
;
2851 thread_wakeup((event_t
) (&c_segments_busy
));
2853 c_segno
= c_free_segno_head
;
2854 assert(c_segno
>= 0 && c_segno
< c_segments_limit
);
2856 c_free_segno_head
= (uint32_t)c_segments
[c_segno
].c_segno
;
2859 * do the rest of the bookkeeping now while we're still behind
2860 * the list lock and grab our generation id now into a local
2861 * so that we can install it once we have the c_seg allocated
2864 if (c_segment_count
> c_segment_count_max
)
2865 c_segment_count_max
= c_segment_count
;
2867 lck_mtx_unlock_always(c_list_lock
);
2869 c_seg
= (c_segment_t
)zalloc(compressor_segment_zone
);
2870 bzero((char *)c_seg
, sizeof(struct c_segment
));
2872 c_seg
->c_store
.c_buffer
= (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno
);
2874 lck_mtx_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
2876 c_seg
->c_state
= C_IS_EMPTY
;
2877 c_seg
->c_firstemptyslot
= C_SLOT_MAX_INDEX
;
2878 c_seg
->c_mysegno
= c_segno
;
2880 lck_mtx_lock_spin_always(c_list_lock
);
2882 c_seg_switch_state(c_seg
, C_IS_FILLING
, FALSE
);
2883 c_segments
[c_segno
].c_seg
= c_seg
;
2884 assert(c_segments
[c_segno
].c_segno
> c_segments_available
);
2885 lck_mtx_unlock_always(c_list_lock
);
2887 *current_chead
= c_seg
;
2889 #if DEVELOPMENT || DEBUG
2890 C_SEG_MAKE_WRITEABLE(c_seg
);
2894 c_seg_alloc_nextslot(c_seg
);
2896 size_to_populate
= C_SEG_ALLOCSIZE
- C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
);
2898 if (size_to_populate
) {
2900 min_needed
= PAGE_SIZE
+ (C_SEG_ALLOCSIZE
- C_SEG_BUFSIZE
);
2902 if (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
) < (unsigned) min_needed
) {
2904 if (size_to_populate
> C_SEG_MAX_POPULATE_SIZE
)
2905 size_to_populate
= C_SEG_MAX_POPULATE_SIZE
;
2906 vm_compressor_pages_grabbed
+= size_to_populate
/ PAGE_SIZE
;
2908 kernel_memory_populate(compressor_map
,
2909 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
],
2912 VM_KERN_MEMORY_COMPRESSOR
);
2914 size_to_populate
= 0;
2916 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2918 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2920 if (size_to_populate
)
2921 c_seg
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(size_to_populate
);
2928 c_current_seg_filled(c_segment_t c_seg
, c_segment_t
*current_chead
)
2930 uint32_t unused_bytes
;
2931 uint32_t offset_to_depopulate
;
2932 int new_state
= C_ON_AGE_Q
;
2936 unused_bytes
= trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
));
2938 #ifndef _OPEN_SOURCE
2939 /* TODO: The HW codec can generate, lazily, a '2nd page not mapped'
2940 * exception. So on such a platform, or platforms where we're confident
2941 * the codec does not require a buffer page to absorb trailing writes,
2942 * we can create an unmapped hole at the tail of the segment, rather
2943 * than a populated mapping. This will also guarantee that the codec
2944 * does not overwrite valid data past the edge of the segment and
2945 * thus eliminate the depopulation overhead.
2949 offset_to_depopulate
= C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_nextoffset
)));
2952 * release the extra physical page(s) at the end of the segment
2954 lck_mtx_unlock_always(&c_seg
->c_lock
);
2956 kernel_memory_depopulate(
2958 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[offset_to_depopulate
],
2962 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2964 c_seg
->c_populated_offset
= offset_to_depopulate
;
2966 assert(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
) <= C_SEG_BUFSIZE
);
2968 #if DEVELOPMENT || DEBUG
2970 boolean_t c_seg_was_busy
= FALSE
;
2972 if ( !c_seg
->c_busy
)
2975 c_seg_was_busy
= TRUE
;
2977 lck_mtx_unlock_always(&c_seg
->c_lock
);
2979 C_SEG_WRITE_PROTECT(c_seg
);
2981 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2983 if (c_seg_was_busy
== FALSE
)
2984 C_SEG_WAKEUP_DONE(c_seg
);
2989 if (current_chead
== (c_segment_t
*)&freezer_chead
&&
2990 VM_CONFIG_SWAP_IS_PRESENT
&&
2991 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
&&
2992 c_freezer_swapout_count
< VM_MAX_FREEZER_CSEG_SWAP_COUNT
) {
2993 new_state
= C_ON_SWAPOUT_Q
;
2995 #endif /* CONFIG_FREEZE */
2997 clock_get_system_nanotime(&sec
, &nsec
);
2998 c_seg
->c_creation_ts
= (uint32_t)sec
;
3000 lck_mtx_lock_spin_always(c_list_lock
);
3003 if (c_seg
->c_state
== C_ON_SWAPOUT_Q
)
3004 c_freezer_swapout_count
++;
3005 #endif /* CONFIG_FREEZE */
3007 c_seg
->c_generation_id
= c_generation_id
++;
3008 c_seg_switch_state(c_seg
, new_state
, FALSE
);
3010 if (c_seg
->c_state
== C_ON_AGE_Q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
)
3011 c_seg_need_delayed_compaction(c_seg
, TRUE
);
3013 lck_mtx_unlock_always(c_list_lock
);
3016 if (c_seg
->c_state
== C_ON_SWAPOUT_Q
)
3017 thread_wakeup((event_t
)&c_swapout_list_head
);
3018 #endif /* CONFIG_FREEZE */
3020 *current_chead
= NULL
;
3025 * returns with c_seg locked
3028 c_seg_swapin_requeue(c_segment_t c_seg
, boolean_t has_data
, boolean_t minor_compact_ok
, boolean_t age_on_swapin_q
)
3033 clock_get_system_nanotime(&sec
, &nsec
);
3035 lck_mtx_lock_spin_always(c_list_lock
);
3036 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3038 assert(c_seg
->c_busy_swapping
);
3039 assert(c_seg
->c_busy
);
3041 c_seg
->c_busy_swapping
= 0;
3043 if (c_seg
->c_overage_swap
== TRUE
) {
3044 c_overage_swapped_count
--;
3045 c_seg
->c_overage_swap
= FALSE
;
3047 if (has_data
== TRUE
) {
3048 if (age_on_swapin_q
== TRUE
)
3049 c_seg_switch_state(c_seg
, C_ON_SWAPPEDIN_Q
, FALSE
);
3051 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
3053 if (minor_compact_ok
== TRUE
&& !c_seg
->c_on_minorcompact_q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
)
3054 c_seg_need_delayed_compaction(c_seg
, TRUE
);
3056 c_seg
->c_store
.c_buffer
= (int32_t*) NULL
;
3057 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
3059 c_seg_switch_state(c_seg
, C_ON_BAD_Q
, FALSE
);
3061 c_seg
->c_swappedin_ts
= (uint32_t)sec
;
3063 lck_mtx_unlock_always(c_list_lock
);
3069 * c_seg has to be locked and is returned locked if the c_seg isn't freed
3070 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
3071 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
3075 c_seg_swapin(c_segment_t c_seg
, boolean_t force_minor_compaction
, boolean_t age_on_swapin_q
)
3077 vm_offset_t addr
= 0;
3078 uint32_t io_size
= 0;
3081 assert(C_SEG_IS_ONDISK(c_seg
));
3083 #if !CHECKSUM_THE_SWAP
3084 c_seg_trim_tail(c_seg
);
3086 io_size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
3087 f_offset
= c_seg
->c_store
.c_swap_handle
;
3090 c_seg
->c_busy_swapping
= 1;
3093 * This thread is likely going to block for I/O.
3094 * Make sure it is ready to run when the I/O completes because
3095 * it needs to clear the busy bit on the c_seg so that other
3096 * waiting threads can make progress too. To do that, boost
3097 * the rwlock_count so that the priority is boosted.
3099 set_thread_rwlock_boost();
3100 lck_mtx_unlock_always(&c_seg
->c_lock
);
3102 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3104 addr
= (vm_offset_t
)C_SEG_BUFFER_ADDRESS(c_seg
->c_mysegno
);
3105 c_seg
->c_store
.c_buffer
= (int32_t*) addr
;
3107 kernel_memory_populate(compressor_map
, addr
, io_size
, KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
3109 if (vm_swap_get(c_seg
, f_offset
, io_size
) != KERN_SUCCESS
) {
3110 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3112 kernel_memory_depopulate(compressor_map
, addr
, io_size
, KMA_COMPRESSOR
);
3114 c_seg_swapin_requeue(c_seg
, FALSE
, TRUE
, age_on_swapin_q
);
3117 vm_swap_decrypt(c_seg
);
3118 #endif /* ENCRYPTED_SWAP */
3120 #if CHECKSUM_THE_SWAP
3121 if (c_seg
->cseg_swap_size
!= io_size
)
3122 panic("swapin size doesn't match swapout size");
3124 if (c_seg
->cseg_hash
!= vmc_hash((char*) c_seg
->c_store
.c_buffer
, (int)io_size
)) {
3125 panic("c_seg_swapin - Swap hash mismatch\n");
3127 #endif /* CHECKSUM_THE_SWAP */
3129 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3131 c_seg_swapin_requeue(c_seg
, TRUE
, force_minor_compaction
== TRUE
? FALSE
: TRUE
, age_on_swapin_q
);
3133 OSAddAtomic64(c_seg
->c_bytes_used
, &compressor_bytes_used
);
3135 if (force_minor_compaction
== TRUE
) {
3136 if (c_seg_minor_compaction_and_unlock(c_seg
, FALSE
)) {
3138 * c_seg was completely empty so it was freed,
3139 * so be careful not to reference it again
3141 * Drop the rwlock_count so that the thread priority
3142 * is returned back to where it is supposed to be.
3144 clear_thread_rwlock_boost();
3148 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3151 C_SEG_WAKEUP_DONE(c_seg
);
3154 * Drop the rwlock_count so that the thread priority
3155 * is returned back to where it is supposed to be.
3157 clear_thread_rwlock_boost();
3164 c_segment_sv_hash_drop_ref(int hash_indx
)
3166 struct c_sv_hash_entry o_sv_he
, n_sv_he
;
3170 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_indx
].he_record
;
3172 n_sv_he
.he_ref
= o_sv_he
.he_ref
- 1;
3173 n_sv_he
.he_data
= o_sv_he
.he_data
;
3175 if (OSCompareAndSwap64((UInt64
)o_sv_he
.he_record
, (UInt64
)n_sv_he
.he_record
, (UInt64
*) &c_segment_sv_hash_table
[hash_indx
].he_record
) == TRUE
) {
3176 if (n_sv_he
.he_ref
== 0)
3177 OSAddAtomic(-1, &c_segment_svp_in_hash
);
3185 c_segment_sv_hash_insert(uint32_t data
)
3189 struct c_sv_hash_entry o_sv_he
, n_sv_he
;
3190 boolean_t got_ref
= FALSE
;
3193 OSAddAtomic(1, &c_segment_svp_zero_compressions
);
3195 OSAddAtomic(1, &c_segment_svp_nonzero_compressions
);
3197 hash_sindx
= data
& C_SV_HASH_MASK
;
3199 for (misses
= 0; misses
< C_SV_HASH_MAX_MISS
; misses
++)
3201 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_sindx
].he_record
;
3203 while (o_sv_he
.he_data
== data
|| o_sv_he
.he_ref
== 0) {
3204 n_sv_he
.he_ref
= o_sv_he
.he_ref
+ 1;
3205 n_sv_he
.he_data
= data
;
3207 if (OSCompareAndSwap64((UInt64
)o_sv_he
.he_record
, (UInt64
)n_sv_he
.he_record
, (UInt64
*) &c_segment_sv_hash_table
[hash_sindx
].he_record
) == TRUE
) {
3208 if (n_sv_he
.he_ref
== 1)
3209 OSAddAtomic(1, &c_segment_svp_in_hash
);
3213 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_sindx
].he_record
;
3215 if (got_ref
== TRUE
)
3219 if (hash_sindx
== C_SV_HASH_SIZE
)
3222 if (got_ref
== FALSE
)
3225 return (hash_sindx
);
3229 #if RECORD_THE_COMPRESSED_DATA
3232 c_compressed_record_data(char *src
, int c_size
)
3234 if ((c_compressed_record_cptr
+ c_size
+ 4) >= c_compressed_record_ebuf
)
3235 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
3237 *(int *)((void *)c_compressed_record_cptr
) = c_size
;
3239 c_compressed_record_cptr
+= 4;
3241 memcpy(c_compressed_record_cptr
, src
, c_size
);
3242 c_compressed_record_cptr
+= c_size
;
3248 c_compress_page(char *src
, c_slot_mapping_t slot_ptr
, c_segment_t
*current_chead
, char *scratch_buf
)
3251 int c_rounded_size
= 0;
3256 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START
, *current_chead
, 0, 0, 0, 0);
3258 if ((c_seg
= c_seg_allocate(current_chead
)) == NULL
) {
3262 * returns with c_seg lock held
3263 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
3264 * c_nextslot has been allocated and
3265 * c_store.c_buffer populated
3267 assert(c_seg
->c_state
== C_IS_FILLING
);
3269 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
3271 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(slot_ptr
);
3272 assert(slot_ptr
== (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(cs
));
3274 cs
->c_offset
= c_seg
->c_nextoffset
;
3276 max_csize
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)cs
->c_offset
);
3278 if (max_csize
> PAGE_SIZE
)
3279 max_csize
= PAGE_SIZE
;
3281 #if CHECKSUM_THE_DATA
3282 cs
->c_hash_data
= vmc_hash(src
, PAGE_SIZE
);
3284 boolean_t incomp_copy
= FALSE
;
3285 int max_csize_adj
= (max_csize
- 4);
3287 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC
) {
3288 #if defined(__arm__) || defined(__arm64__)
3289 uint16_t ccodec
= CINVALID
;
3291 if (max_csize
>= C_SEG_OFFSET_ALIGNMENT_BOUNDARY
) {
3292 c_size
= metacompressor((const uint8_t *) src
,
3293 (uint8_t *) &c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3294 max_csize_adj
, &ccodec
,
3295 scratch_buf
, &incomp_copy
);
3296 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
3297 if (c_size
> max_csize_adj
) {
3304 assert(ccodec
== CCWK
|| ccodec
== CCLZ4
);
3305 cs
->c_codec
= ccodec
;
3308 #if defined(__arm__) || defined(__arm64__)
3311 #if defined(__arm64__)
3312 __unreachable_ok_push
3313 if (PAGE_SIZE
== 4096)
3314 c_size
= WKdm_compress_4k((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3315 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3317 c_size
= WKdm_compress_16k((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3318 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3320 __unreachable_ok_pop
3322 c_size
= WKdm_compress_new((const WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3323 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3326 assertf(((c_size
<= max_csize_adj
) && (c_size
>= -1)),
3327 "c_size invalid (%d, %d), cur compressions: %d", c_size
, max_csize_adj
, c_segment_pages_compressed
);
3330 if (max_csize
< PAGE_SIZE
) {
3331 c_current_seg_filled(c_seg
, current_chead
);
3332 assert(*current_chead
== NULL
);
3334 lck_mtx_unlock_always(&c_seg
->c_lock
);
3335 /* TODO: it may be worth requiring codecs to distinguish
3336 * between incompressible inputs and failures due to
3337 * budget exhaustion.
3339 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3344 if (incomp_copy
== FALSE
) {
3345 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
3348 OSAddAtomic(1, &c_segment_noncompressible_pages
);
3350 } else if (c_size
== 0) {
3354 * special case - this is a page completely full of a single 32 bit value
3356 hash_index
= c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src
);
3358 if (hash_index
!= -1) {
3359 slot_ptr
->s_cindx
= hash_index
;
3360 slot_ptr
->s_cseg
= C_SV_CSEG_ID
;
3362 OSAddAtomic(1, &c_segment_svp_hash_succeeded
);
3363 #if RECORD_THE_COMPRESSED_DATA
3364 c_compressed_record_data(src
, 4);
3366 goto sv_compression
;
3370 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
3372 OSAddAtomic(1, &c_segment_svp_hash_failed
);
3375 #if RECORD_THE_COMPRESSED_DATA
3376 c_compressed_record_data((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3378 #if CHECKSUM_THE_COMPRESSED_DATA
3379 cs
->c_hash_compressed_data
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3381 #if POPCOUNT_THE_COMPRESSED_DATA
3382 cs
->c_pop_cdata
= vmc_pop((uintptr_t) &c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3384 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
3386 PACK_C_SIZE(cs
, c_size
);
3387 c_seg
->c_bytes_used
+= c_rounded_size
;
3388 c_seg
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
3389 c_seg
->c_slots_used
++;
3391 slot_ptr
->s_cindx
= c_seg
->c_nextslot
++;
3392 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
3393 slot_ptr
->s_cseg
= c_seg
->c_mysegno
+ 1;
3396 if (c_seg
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
3397 c_current_seg_filled(c_seg
, current_chead
);
3398 assert(*current_chead
== NULL
);
3400 lck_mtx_unlock_always(&c_seg
->c_lock
);
3402 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3404 #if RECORD_THE_COMPRESSED_DATA
3405 if ((c_compressed_record_cptr
- c_compressed_record_sbuf
) >= C_SEG_ALLOCSIZE
) {
3406 c_compressed_record_write(c_compressed_record_sbuf
, (int)(c_compressed_record_cptr
- c_compressed_record_sbuf
));
3407 c_compressed_record_cptr
= c_compressed_record_sbuf
;
3411 OSAddAtomic64(c_size
, &c_segment_compressed_bytes
);
3412 OSAddAtomic64(c_rounded_size
, &compressor_bytes_used
);
3414 OSAddAtomic64(PAGE_SIZE
, &c_segment_input_bytes
);
3416 OSAddAtomic(1, &c_segment_pages_compressed
);
3417 OSAddAtomic(1, &sample_period_compression_count
);
3419 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END
, *current_chead
, c_size
, c_segment_input_bytes
, c_segment_compressed_bytes
, 0);
3424 static inline void sv_decompress(int32_t *ddst
, int32_t pattern
) {
3426 memset_word(ddst
, pattern
, PAGE_SIZE
/ sizeof(int32_t));
3430 /* Unroll the pattern fill loop 4x to encourage the
3431 * compiler to emit NEON stores, cf.
3432 * <rdar://problem/25839866> Loop autovectorization
3434 * We use separate loops for each PAGE_SIZE
3435 * to allow the autovectorizer to engage, as PAGE_SIZE
3436 * is currently not a constant.
3439 __unreachable_ok_push
3440 if (PAGE_SIZE
== 4096) {
3441 for (i
= 0; i
< (4096U / sizeof(int32_t)); i
+= 4) {
3448 assert(PAGE_SIZE
== 16384);
3449 for (i
= 0; i
< (int)(16384U / sizeof(int32_t)); i
+= 4) {
3456 __unreachable_ok_pop
3461 c_decompress_page(char *dst
, volatile c_slot_mapping_t slot_ptr
, int flags
, int *zeroslot
)
3470 boolean_t need_unlock
= TRUE
;
3471 boolean_t consider_defragmenting
= FALSE
;
3472 boolean_t kdp_mode
= FALSE
;
3474 if (__improbable(flags
& C_KDP
)) {
3476 panic("C_KDP passed to decompress page from outside of debugger context");
3479 assert((flags
& C_KEEP
) == C_KEEP
);
3480 assert((flags
& C_DONT_BLOCK
) == C_DONT_BLOCK
);
3482 if ((flags
& (C_DONT_BLOCK
| C_KEEP
)) != (C_DONT_BLOCK
| C_KEEP
)) {
3491 if (__probable(!kdp_mode
)) {
3492 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3494 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock
)) {
3501 * if hibernation is enabled, it indicates (via a call
3502 * to 'vm_decompressor_lock' that no further
3503 * decompressions are allowed once it reaches
3504 * the point of flushing all of the currently dirty
3505 * anonymous memory through the compressor and out
3506 * to disk... in this state we allow freeing of compressed
3507 * pages and must honor the C_DONT_BLOCK case
3509 if (__improbable(dst
&& decompressions_blocked
== TRUE
)) {
3510 if (flags
& C_DONT_BLOCK
) {
3512 if (__probable(!kdp_mode
)) {
3513 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3520 * it's safe to atomically assert and block behind the
3521 * lock held in shared mode because "decompressions_blocked" is
3522 * only set and cleared and the thread_wakeup done when the lock
3523 * is held exclusively
3525 assert_wait((event_t
)&decompressions_blocked
, THREAD_UNINT
);
3527 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3529 thread_block(THREAD_CONTINUE_NULL
);
3534 /* s_cseg is actually "segno+1" */
3535 c_segno
= slot_ptr
->s_cseg
- 1;
3537 if (__improbable(c_segno
>= c_segments_available
))
3538 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
3539 c_segno
, c_segments_available
, slot_ptr
, *(int *)((void *)slot_ptr
));
3541 if (__improbable(c_segments
[c_segno
].c_segno
< c_segments_available
))
3542 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
3543 c_segno
, slot_ptr
, *(int *)((void *)slot_ptr
));
3545 c_seg
= c_segments
[c_segno
].c_seg
;
3547 if (__probable(!kdp_mode
)) {
3548 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3550 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg
->c_lock
)) {
3555 assert(c_seg
->c_state
!= C_IS_EMPTY
&& c_seg
->c_state
!= C_IS_FREE
);
3557 if (dst
== NULL
&& c_seg
->c_busy_swapping
) {
3558 assert(c_seg
->c_busy
);
3560 goto bypass_busy_check
;
3562 if (flags
& C_DONT_BLOCK
) {
3563 if (c_seg
->c_busy
|| (C_SEG_IS_ONDISK(c_seg
) && dst
)) {
3570 if (c_seg
->c_busy
) {
3572 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3574 c_seg_wait_on_busy(c_seg
);
3580 c_indx
= slot_ptr
->s_cindx
;
3582 if (__improbable(c_indx
>= c_seg
->c_nextslot
))
3583 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
3584 c_indx
, c_seg
->c_nextslot
, c_seg
, slot_ptr
, *(int *)((void *)slot_ptr
));
3586 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
3588 c_size
= UNPACK_C_SIZE(cs
);
3590 if (__improbable(c_size
== 0))
3591 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
3592 c_seg
, slot_ptr
, *(int *)((void *)slot_ptr
));
3594 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
3597 uint32_t age_of_cseg
;
3598 clock_sec_t cur_ts_sec
;
3599 clock_nsec_t cur_ts_nsec
;
3601 if (C_SEG_IS_ONDISK(c_seg
)) {
3602 assert(kdp_mode
== FALSE
);
3603 retval
= c_seg_swapin(c_seg
, FALSE
, TRUE
);
3604 assert(retval
== 0);
3608 if (c_seg
->c_state
== C_ON_BAD_Q
) {
3609 assert(c_seg
->c_store
.c_buffer
== NULL
);
3616 #if POPCOUNT_THE_COMPRESSED_DATA
3618 uintptr_t csvaddr
= (uintptr_t) &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
3619 if (cs
->c_pop_cdata
!= (csvpop
= vmc_pop(csvaddr
, c_size
))) {
3620 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop
- cs
->c_pop_cdata
), (void *)csvaddr
, (void *) kvtophys(csvaddr
), c_seg
, cs
, cs
->c_offset
, c_size
, csvpop
, cs
->c_pop_cdata
);
3624 #if CHECKSUM_THE_COMPRESSED_DATA
3626 if (cs
->c_hash_compressed_data
!= (csvhash
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))) {
3627 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg
, cs
, c_size
, cs
->c_hash_compressed_data
, csvhash
);
3630 if (c_rounded_size
== PAGE_SIZE
) {
3632 * page wasn't compressible... just copy it out
3634 memcpy(dst
, &c_seg
->c_store
.c_buffer
[cs
->c_offset
], PAGE_SIZE
);
3635 } else if (c_size
== 4) {
3640 * page was populated with a single value
3641 * that didn't fit into our fast hash
3642 * so we packed it in as a single non-compressed value
3643 * that we need to populate the page with
3645 dptr
= (int32_t *)(uintptr_t)dst
;
3646 data
= *(int32_t *)(&c_seg
->c_store
.c_buffer
[cs
->c_offset
]);
3647 sv_decompress(dptr
, data
);
3652 if (__probable(!kdp_mode
)) {
3654 * we're behind the c_seg lock held in spin mode
3655 * which means pre-emption is disabled... therefore
3656 * the following sequence is atomic and safe
3658 my_cpu_no
= cpu_number();
3660 assert(my_cpu_no
< compressor_cpus
);
3662 scratch_buf
= &compressor_scratch_bufs
[my_cpu_no
* vm_compressor_get_decode_scratch_size()];
3664 scratch_buf
= kdp_compressor_scratch_buf
;
3667 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC
) {
3668 #if defined(__arm__) || defined(__arm64__)
3669 uint16_t c_codec
= cs
->c_codec
;
3670 metadecompressor((const uint8_t *) &c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3671 (uint8_t *)dst
, c_size
, c_codec
, (void *)scratch_buf
);
3674 #if defined(__arm64__)
3675 __unreachable_ok_push
3676 if (PAGE_SIZE
== 4096)
3677 WKdm_decompress_4k((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3678 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
3680 WKdm_decompress_16k((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3681 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
3683 __unreachable_ok_pop
3685 WKdm_decompress_new((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3686 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
3691 #if CHECKSUM_THE_DATA
3692 if (cs
->c_hash_data
!= vmc_hash(dst
, PAGE_SIZE
)) {
3693 #if defined(__arm__) || defined(__arm64__)
3694 int32_t *dinput
= &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
3695 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs
, cs
->c_hash_data
, cs
->c_offset
, c_size
, c_rounded_size
, cs
->c_codec
, *dinput
, *(dinput
+ 1), *(dinput
+ 2));
3697 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs
, cs
->c_hash_data
, cs
->c_offset
, c_size
);
3701 if (c_seg
->c_swappedin_ts
== 0 && !kdp_mode
) {
3703 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
3705 age_of_cseg
= (uint32_t)cur_ts_sec
- c_seg
->c_creation_ts
;
3706 if (age_of_cseg
< DECOMPRESSION_SAMPLE_MAX_AGE
)
3707 OSAddAtomic(1, &age_of_decompressions_during_sample_period
[age_of_cseg
]);
3709 OSAddAtomic(1, &overage_decompressions_during_sample_period
);
3711 OSAddAtomic(1, &sample_period_decompression_count
);
3714 if (flags
& C_KEEP
) {
3718 assert(kdp_mode
== FALSE
);
3720 c_seg
->c_bytes_unused
+= c_rounded_size
;
3721 c_seg
->c_bytes_used
-= c_rounded_size
;
3723 assert(c_seg
->c_slots_used
);
3724 c_seg
->c_slots_used
--;
3728 if (c_indx
< c_seg
->c_firstemptyslot
)
3729 c_seg
->c_firstemptyslot
= c_indx
;
3731 OSAddAtomic(-1, &c_segment_pages_compressed
);
3733 if (c_seg
->c_state
!= C_ON_BAD_Q
&& !(C_SEG_IS_ONDISK(c_seg
))) {
3735 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
3736 * free of a compressed page (i.e. dst == NULL)
3738 OSAddAtomic64(-c_rounded_size
, &compressor_bytes_used
);
3740 if (c_seg
->c_busy_swapping
) {
3742 * bypass case for c_busy_swapping...
3743 * let the swapin/swapout paths deal with putting
3744 * the c_seg on the minor compaction queue if needed
3746 assert(c_seg
->c_busy
);
3749 assert(!c_seg
->c_busy
);
3751 if (c_seg
->c_state
!= C_IS_FILLING
) {
3752 if (c_seg
->c_bytes_used
== 0) {
3753 if ( !(C_SEG_IS_ONDISK(c_seg
))) {
3754 int pages_populated
;
3756 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
3757 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
3759 if (pages_populated
) {
3761 assert(c_seg
->c_state
!= C_ON_BAD_Q
);
3762 assert(c_seg
->c_store
.c_buffer
!= NULL
);
3765 lck_mtx_unlock_always(&c_seg
->c_lock
);
3767 kernel_memory_depopulate(compressor_map
, (vm_offset_t
) c_seg
->c_store
.c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
3769 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3770 C_SEG_WAKEUP_DONE(c_seg
);
3772 if (!c_seg
->c_on_minorcompact_q
&& c_seg
->c_state
!= C_ON_SWAPOUT_Q
)
3773 c_seg_need_delayed_compaction(c_seg
, FALSE
);
3775 if (c_seg
->c_state
!= C_ON_SWAPPEDOUTSPARSE_Q
) {
3777 c_seg_move_to_sparse_list(c_seg
);
3778 consider_defragmenting
= TRUE
;
3781 } else if (c_seg
->c_on_minorcompact_q
) {
3783 assert(c_seg
->c_state
!= C_ON_BAD_Q
);
3785 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg
)) {
3786 c_seg_try_minor_compaction_and_unlock(c_seg
);
3787 need_unlock
= FALSE
;
3789 } else if ( !(C_SEG_IS_ONDISK(c_seg
))) {
3791 if (c_seg
->c_state
!= C_ON_BAD_Q
&& c_seg
->c_state
!= C_ON_SWAPOUT_Q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
3792 c_seg_need_delayed_compaction(c_seg
, FALSE
);
3794 } else if (c_seg
->c_state
!= C_ON_SWAPPEDOUTSPARSE_Q
&& C_SEG_ONDISK_IS_SPARSE(c_seg
)) {
3796 c_seg_move_to_sparse_list(c_seg
);
3797 consider_defragmenting
= TRUE
;
3801 if (__improbable(kdp_mode
)) {
3805 if (need_unlock
== TRUE
)
3806 lck_mtx_unlock_always(&c_seg
->c_lock
);
3808 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3810 if (consider_defragmenting
== TRUE
)
3811 vm_swap_consider_defragmenting();
3814 if ((c_minor_count
&& COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact())
3815 vm_wake_compactor_swapper();
3823 vm_compressor_get(ppnum_t pn
, int *slot
, int flags
)
3825 c_slot_mapping_t slot_ptr
;
3831 dst
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
3832 #elif __arm__ || __arm64__
3833 dst
= (char *) phystokv((pmap_paddr_t
)pn
<< PAGE_SHIFT
);
3835 #error "unsupported architecture"
3837 slot_ptr
= (c_slot_mapping_t
)slot
;
3839 if (slot_ptr
->s_cseg
== C_SV_CSEG_ID
) {
3844 * page was populated with a single value
3845 * that found a home in our hash table
3846 * grab that value from the hash and populate the page
3847 * that we need to populate the page with
3849 dptr
= (int32_t *)(uintptr_t)dst
;
3850 data
= c_segment_sv_hash_table
[slot_ptr
->s_cindx
].he_data
;
3852 memset_word(dptr
, data
, PAGE_SIZE
/ sizeof(int32_t));
3857 for (i
= 0; i
< (int)(PAGE_SIZE
/ sizeof(int32_t)); i
++)
3861 if ( !(flags
& C_KEEP
)) {
3862 c_segment_sv_hash_drop_ref(slot_ptr
->s_cindx
);
3864 OSAddAtomic(-1, &c_segment_pages_compressed
);
3868 OSAddAtomic(1, &c_segment_svp_nonzero_decompressions
);
3870 OSAddAtomic(1, &c_segment_svp_zero_decompressions
);
3875 retval
= c_decompress_page(dst
, slot_ptr
, flags
, &zeroslot
);
3878 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
3879 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
3885 * returns 0 if we successfully decompressed a page from a segment already in memory
3886 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
3887 * returns -1 if we encountered an error swapping in the segment - decompression failed
3888 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
3895 vm_compressor_free(int *slot
, int flags
)
3897 c_slot_mapping_t slot_ptr
;
3901 assert(flags
== 0 || flags
== C_DONT_BLOCK
);
3903 slot_ptr
= (c_slot_mapping_t
)slot
;
3905 if (slot_ptr
->s_cseg
== C_SV_CSEG_ID
) {
3907 c_segment_sv_hash_drop_ref(slot_ptr
->s_cindx
);
3908 OSAddAtomic(-1, &c_segment_pages_compressed
);
3913 retval
= c_decompress_page(NULL
, slot_ptr
, flags
, &zeroslot
);
3915 * returns 0 if we successfully freed the specified compressed page
3916 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
3922 assert(retval
== -2);
3929 vm_compressor_put(ppnum_t pn
, int *slot
, void **current_chead
, char *scratch_buf
)
3935 src
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
3936 #elif __arm__ || __arm64__
3937 src
= (char *) phystokv((pmap_paddr_t
)pn
<< PAGE_SHIFT
);
3939 #error "unsupported architecture"
3942 retval
= c_compress_page(src
, (c_slot_mapping_t
)slot
, (c_segment_t
*)current_chead
, scratch_buf
);
3948 vm_compressor_transfer(
3952 c_slot_mapping_t dst_slot
, src_slot
;
3957 src_slot
= (c_slot_mapping_t
) src_slot_p
;
3959 if (src_slot
->s_cseg
== C_SV_CSEG_ID
) {
3960 *dst_slot_p
= *src_slot_p
;
3964 dst_slot
= (c_slot_mapping_t
) dst_slot_p
;
3966 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3967 /* get segment for src_slot */
3968 c_seg
= c_segments
[src_slot
->s_cseg
-1].c_seg
;
3970 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3971 /* wait if it's busy */
3972 if (c_seg
->c_busy
&& !c_seg
->c_busy_swapping
) {
3973 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3974 c_seg_wait_on_busy(c_seg
);
3977 /* find the c_slot */
3978 c_indx
= src_slot
->s_cindx
;
3979 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
3980 /* point the c_slot back to dst_slot instead of src_slot */
3981 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(dst_slot
);
3983 *dst_slot_p
= *src_slot_p
;
3985 lck_mtx_unlock_always(&c_seg
->c_lock
);
3986 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3991 int freezer_finished_filling
= 0;
3994 vm_compressor_finished_filling(
3995 void **current_chead
)
3999 if ((c_seg
= *(c_segment_t
*)current_chead
) == NULL
)
4002 assert(c_seg
->c_state
== C_IS_FILLING
);
4004 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4006 c_current_seg_filled(c_seg
, (c_segment_t
*)current_chead
);
4008 lck_mtx_unlock_always(&c_seg
->c_lock
);
4010 freezer_finished_filling
++;
4015 * This routine is used to transfer the compressed chunks from
4016 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
4017 * by the current_chead and a new cindx within that c_seg.
4019 * Currently, this routine is only used by the "freezer backed by
4020 * compressor with swap" mode to create a series of c_segs that
4021 * only contain compressed data belonging to one task. So, we
4022 * move a task's previously compressed data into a set of new
4023 * c_segs which will also hold the task's yet to be compressed data.
4027 vm_compressor_relocate(
4028 void **current_chead
,
4031 c_slot_mapping_t slot_ptr
;
4032 c_slot_mapping_t src_slot
;
4033 uint32_t c_rounded_size
;
4039 c_segment_t c_seg_dst
= NULL
;
4040 c_segment_t c_seg_src
= NULL
;
4041 kern_return_t kr
= KERN_SUCCESS
;
4044 src_slot
= (c_slot_mapping_t
) slot_p
;
4046 if (src_slot
->s_cseg
== C_SV_CSEG_ID
) {
4048 * no need to relocate... this is a page full of a single
4049 * value which is hashed to a single entry not contained
4056 c_seg_dst
= c_seg_allocate((c_segment_t
*)current_chead
);
4058 * returns with c_seg lock held
4059 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4060 * c_nextslot has been allocated and
4061 * c_store.c_buffer populated
4063 if (c_seg_dst
== NULL
) {
4065 * Out of compression segments?
4067 kr
= KERN_RESOURCE_SHORTAGE
;
4071 assert(c_seg_dst
->c_busy
== 0);
4073 C_SEG_BUSY(c_seg_dst
);
4075 dst_slot
= c_seg_dst
->c_nextslot
;
4077 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4080 c_seg_src
= c_segments
[src_slot
->s_cseg
- 1].c_seg
;
4082 assert(c_seg_dst
!= c_seg_src
);
4084 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4086 if (C_SEG_IS_ONDISK(c_seg_src
)) {
4089 * A "thaw" can mark a process as eligible for
4090 * another freeze cycle without bringing any of
4091 * its swapped out c_segs back from disk (because
4092 * that is done on-demand).
4094 * If the src c_seg we find for our pre-compressed
4095 * data is already on-disk, then we are dealing
4096 * with an app's data that is already packed and
4097 * swapped out. Don't do anything.
4100 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4102 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4109 if (c_seg_src
->c_busy
) {
4111 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4112 c_seg_wait_on_busy(c_seg_src
);
4116 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4121 C_SEG_BUSY(c_seg_src
);
4123 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4125 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4127 /* find the c_slot */
4128 c_indx
= src_slot
->s_cindx
;
4130 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, c_indx
);
4132 c_size
= UNPACK_C_SIZE(c_src
);
4136 if (c_size
> (uint32_t)(C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst
->c_nextoffset
))) {
4138 * This segment is full. We need a new one.
4141 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4143 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4144 C_SEG_WAKEUP_DONE(c_seg_src
);
4145 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4149 lck_mtx_lock_spin_always(&c_seg_dst
->c_lock
);
4151 assert(c_seg_dst
->c_busy
);
4152 assert(c_seg_dst
->c_state
== C_IS_FILLING
);
4153 assert(!c_seg_dst
->c_on_minorcompact_q
);
4155 c_current_seg_filled(c_seg_dst
, (c_segment_t
*)current_chead
);
4156 assert(*current_chead
== NULL
);
4158 C_SEG_WAKEUP_DONE(c_seg_dst
);
4160 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4164 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4169 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
4171 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
4172 //is platform alignment actually necessary since wkdm aligns its output?
4173 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
4175 cslot_copy(c_dst
, c_src
);
4176 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
4178 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
)
4179 c_seg_dst
->c_firstemptyslot
++;
4181 c_seg_dst
->c_slots_used
++;
4182 c_seg_dst
->c_nextslot
++;
4183 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
4184 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
4187 PACK_C_SIZE(c_src
, 0);
4189 c_seg_src
->c_bytes_used
-= c_rounded_size
;
4190 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
4192 assert(c_seg_src
->c_slots_used
);
4193 c_seg_src
->c_slots_used
--;
4195 if (c_indx
< c_seg_src
->c_firstemptyslot
) {
4196 c_seg_src
->c_firstemptyslot
= c_indx
;
4199 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
4201 PAGE_REPLACEMENT_ALLOWED(TRUE
);
4202 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
4203 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4204 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
4205 slot_ptr
->s_cindx
= dst_slot
;
4207 PAGE_REPLACEMENT_ALLOWED(FALSE
);
4212 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4214 C_SEG_WAKEUP_DONE(c_seg_src
);
4216 if (c_seg_src
->c_bytes_used
== 0 && c_seg_src
->c_state
!= C_IS_FILLING
) {
4217 if (!c_seg_src
->c_on_minorcompact_q
)
4218 c_seg_need_delayed_compaction(c_seg_src
, FALSE
);
4221 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4226 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4228 lck_mtx_lock_spin_always(&c_seg_dst
->c_lock
);
4230 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
4232 * Nearing or exceeded maximum slot and offset capacity.
4234 assert(c_seg_dst
->c_busy
);
4235 assert(c_seg_dst
->c_state
== C_IS_FILLING
);
4236 assert(!c_seg_dst
->c_on_minorcompact_q
);
4238 c_current_seg_filled(c_seg_dst
, (c_segment_t
*)current_chead
);
4239 assert(*current_chead
== NULL
);
4242 C_SEG_WAKEUP_DONE(c_seg_dst
);
4244 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4248 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4253 #endif /* CONFIG_FREEZE */