2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_compressor.h>
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_protos.h>
41 #include <mach/mach_host.h> /* for host_info() */
42 #include <kern/ledger.h>
43 #include <kern/policy_internal.h>
44 #include <kern/thread_group.h>
45 #include <san/kasan.h>
47 #if defined(__x86_64__)
48 #include <i386/misc_protos.h>
50 #if defined(__arm64__)
51 #include <arm/machine_routines.h>
54 #include <IOKit/IOHibernatePrivate.h>
56 extern boolean_t vm_darkwake_mode
;
57 extern zone_t vm_page_zone
;
59 #if DEVELOPMENT || DEBUG
60 /* sysctl defined in bsd/dev/arm64/sysctl.c */
61 int do_cseg_wedge_thread(void);
62 int do_cseg_unwedge_thread(void);
63 static event_t debug_cseg_wait_event
= NULL
;
64 #endif /* DEVELOPMENT || DEBUG */
67 bool freezer_incore_cseg_acct
= TRUE
; /* Only count incore compressed memory for jetsams. */
68 void task_disown_frozen_csegs(task_t owner_task
);
69 #endif /* CONFIG_FREEZE */
71 #if POPCOUNT_THE_COMPRESSED_DATA
72 boolean_t popcount_c_segs
= TRUE
;
74 static inline uint32_t
75 vmc_pop(uintptr_t ins
, int sz
)
79 if (__probable(popcount_c_segs
== FALSE
)) {
85 uint64_t *ins64
= (uint64_t *) ins
;
86 uint64_t *ins642
= (uint64_t *) (ins
+ 8);
87 rv1
= __builtin_popcountll(*ins64
);
88 rv2
= __builtin_popcountll(*ins642
);
95 uint32_t *ins32
= (uint32_t *) ins
;
96 rv
+= __builtin_popcount(*ins32
);
102 char *ins8
= (char *)ins
;
103 rv
+= __builtin_popcount(*ins8
);
111 #if VALIDATE_C_SEGMENTS
112 boolean_t validate_c_segs
= TRUE
;
115 * vm_compressor_mode has a heirarchy of control to set its value.
116 * boot-args are checked first, then device-tree, and finally
117 * the default value that is defined below. See vm_fault_init() for
118 * the boot-arg & device-tree code.
124 int vm_compressor_mode
= VM_PAGER_FREEZER_DEFAULT
;
125 struct freezer_context freezer_context_global
;
126 #else /* CONFIG_FREEZE */
127 int vm_compressor_mode
= VM_PAGER_NOT_CONFIGURED
;
128 #endif /* CONFIG_FREEZE */
130 #else /* CONFIG_EMBEDDED */
131 int vm_compressor_mode
= VM_PAGER_COMPRESSOR_WITH_SWAP
;
133 #endif /* CONFIG_EMBEDDED */
135 TUNABLE(uint32_t, vm_compression_limit
, "vm_compression_limit", 0);
136 int vm_compressor_is_active
= 0;
137 int vm_compressor_available
= 0;
139 extern uint64_t vm_swap_get_max_configured_space(void);
140 extern void vm_pageout_io_throttle(void);
142 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
143 extern unsigned int hash_string(char *cp
, int len
);
144 static unsigned int vmc_hash(char *, int);
145 boolean_t checksum_c_segs
= TRUE
;
148 vmc_hash(char *cp
, int len
)
150 if (__probable(checksum_c_segs
== FALSE
)) {
153 return hash_string(cp
, len
);
157 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
158 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
161 struct c_sv_hash_entry
{
164 uint32_t c_sv_he_ref
;
165 uint32_t c_sv_he_data
;
167 uint64_t c_sv_he_record
;
171 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
172 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
173 #define he_record c_sv_he_un.c_sv_he_record
175 #define C_SV_HASH_MAX_MISS 32
176 #define C_SV_HASH_SIZE ((1 << 10))
177 #define C_SV_HASH_MASK ((1 << 10) - 1)
178 #define C_SV_CSEG_ID ((1 << 22) - 1)
186 #define C_SLOT_ASSERT_PACKABLE(ptr) \
187 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
189 #define C_SLOT_PACK_PTR(ptr) \
190 VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
192 #define C_SLOT_UNPACK_PTR(cslot) \
193 (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
195 /* for debugging purposes */
196 SECURITY_READ_ONLY_EARLY(vm_packing_params_t
) c_slot_packing_params
=
197 VM_PACKING_PARAMS(C_SLOT_PACKED_PTR
);
199 uint32_t c_segment_count
= 0;
200 uint32_t c_segment_count_max
= 0;
202 uint64_t c_generation_id
= 0;
203 uint64_t c_generation_id_flush_barrier
;
206 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
208 boolean_t hibernate_no_swapspace
= FALSE
;
209 clock_sec_t hibernate_flushing_deadline
= 0;
212 #if RECORD_THE_COMPRESSED_DATA
213 char *c_compressed_record_sbuf
;
214 char *c_compressed_record_ebuf
;
215 char *c_compressed_record_cptr
;
219 queue_head_t c_age_list_head
;
220 queue_head_t c_swappedin_list_head
;
221 queue_head_t c_swapout_list_head
;
222 queue_head_t c_swapio_list_head
;
223 queue_head_t c_swappedout_list_head
;
224 queue_head_t c_swappedout_sparse_list_head
;
225 queue_head_t c_major_list_head
;
226 queue_head_t c_filling_list_head
;
227 queue_head_t c_bad_list_head
;
229 uint32_t c_age_count
= 0;
230 uint32_t c_swappedin_count
= 0;
231 uint32_t c_swapout_count
= 0;
232 uint32_t c_swapio_count
= 0;
233 uint32_t c_swappedout_count
= 0;
234 uint32_t c_swappedout_sparse_count
= 0;
235 uint32_t c_major_count
= 0;
236 uint32_t c_filling_count
= 0;
237 uint32_t c_empty_count
= 0;
238 uint32_t c_bad_count
= 0;
241 queue_head_t c_minor_list_head
;
242 uint32_t c_minor_count
= 0;
244 int c_overage_swapped_count
= 0;
245 int c_overage_swapped_limit
= 0;
247 int c_seg_fixed_array_len
;
248 union c_segu
*c_segments
;
249 vm_offset_t c_buffers
;
250 vm_size_t c_buffers_size
;
251 caddr_t c_segments_next_page
;
252 boolean_t c_segments_busy
;
253 uint32_t c_segments_available
;
254 uint32_t c_segments_limit
;
255 uint32_t c_segments_nearing_limit
;
257 uint32_t c_segment_svp_in_hash
;
258 uint32_t c_segment_svp_hash_succeeded
;
259 uint32_t c_segment_svp_hash_failed
;
260 uint32_t c_segment_svp_zero_compressions
;
261 uint32_t c_segment_svp_nonzero_compressions
;
262 uint32_t c_segment_svp_zero_decompressions
;
263 uint32_t c_segment_svp_nonzero_decompressions
;
265 uint32_t c_segment_noncompressible_pages
;
267 uint32_t c_segment_pages_compressed
= 0; /* Tracks # of uncompressed pages fed into the compressor */
269 int32_t c_segment_pages_compressed_incore
= 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
270 uint32_t c_segments_incore_limit
= 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
271 #endif /* CONFIG_FREEZE */
273 uint32_t c_segment_pages_compressed_limit
;
274 uint32_t c_segment_pages_compressed_nearing_limit
;
275 uint32_t c_free_segno_head
= (uint32_t)-1;
277 uint32_t vm_compressor_minorcompact_threshold_divisor
= 10;
278 uint32_t vm_compressor_majorcompact_threshold_divisor
= 10;
279 uint32_t vm_compressor_unthrottle_threshold_divisor
= 10;
280 uint32_t vm_compressor_catchup_threshold_divisor
= 10;
282 uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
= 0;
283 uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
= 0;
284 uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
= 0;
285 uint32_t vm_compressor_catchup_threshold_divisor_overridden
= 0;
287 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
289 LCK_GRP_DECLARE(vm_compressor_lck_grp
, "vm_compressor");
290 LCK_RW_DECLARE(c_master_lock
, &vm_compressor_lck_grp
);
291 LCK_MTX_DECLARE(c_list_lock_storage
, &vm_compressor_lck_grp
);
293 boolean_t decompressions_blocked
= FALSE
;
295 zone_t compressor_segment_zone
;
296 int c_compressor_swap_trigger
= 0;
298 uint32_t compressor_cpus
;
299 char *compressor_scratch_bufs
;
300 char *kdp_compressor_scratch_buf
;
301 char *kdp_compressor_decompressed_page
;
302 addr64_t kdp_compressor_decompressed_page_paddr
;
303 ppnum_t kdp_compressor_decompressed_page_ppnum
;
305 clock_sec_t start_of_sample_period_sec
= 0;
306 clock_nsec_t start_of_sample_period_nsec
= 0;
307 clock_sec_t start_of_eval_period_sec
= 0;
308 clock_nsec_t start_of_eval_period_nsec
= 0;
309 uint32_t sample_period_decompression_count
= 0;
310 uint32_t sample_period_compression_count
= 0;
311 uint32_t last_eval_decompression_count
= 0;
312 uint32_t last_eval_compression_count
= 0;
314 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
316 boolean_t vm_swapout_ripe_segments
= FALSE
;
317 uint32_t vm_ripe_target_age
= (60 * 60 * 48);
319 uint32_t swapout_target_age
= 0;
320 uint32_t age_of_decompressions_during_sample_period
[DECOMPRESSION_SAMPLE_MAX_AGE
];
321 uint32_t overage_decompressions_during_sample_period
= 0;
324 void do_fastwake_warmup(queue_head_t
*, boolean_t
);
325 boolean_t fastwake_warmup
= FALSE
;
326 boolean_t fastwake_recording_in_progress
= FALSE
;
327 clock_sec_t dont_trim_until_ts
= 0;
329 uint64_t c_segment_warmup_count
;
330 uint64_t first_c_segment_to_warm_generation_id
= 0;
331 uint64_t last_c_segment_to_warm_generation_id
= 0;
332 boolean_t hibernate_flushing
= FALSE
;
334 int64_t c_segment_input_bytes
__attribute__((aligned(8))) = 0;
335 int64_t c_segment_compressed_bytes
__attribute__((aligned(8))) = 0;
336 int64_t compressor_bytes_used
__attribute__((aligned(8))) = 0;
339 struct c_sv_hash_entry c_segment_sv_hash_table
[C_SV_HASH_SIZE
] __attribute__ ((aligned(8)));
341 static boolean_t
compressor_needs_to_swap(void);
342 static void vm_compressor_swap_trigger_thread(void);
343 static void vm_compressor_do_delayed_compactions(boolean_t
);
344 static void vm_compressor_compact_and_swap(boolean_t
);
345 static void vm_compressor_age_swapped_in_segments(boolean_t
);
348 static void vm_compressor_take_paging_space_action(void);
351 void compute_swapout_target_age(void);
353 boolean_t
c_seg_major_compact(c_segment_t
, c_segment_t
);
354 boolean_t
c_seg_major_compact_ok(c_segment_t
, c_segment_t
);
356 int c_seg_minor_compaction_and_unlock(c_segment_t
, boolean_t
);
357 int c_seg_do_minor_compaction_and_unlock(c_segment_t
, boolean_t
, boolean_t
, boolean_t
);
358 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
);
360 void c_seg_move_to_sparse_list(c_segment_t
);
361 void c_seg_insert_into_q(queue_head_t
*, c_segment_t
);
363 uint64_t vm_available_memory(void);
364 uint64_t vm_compressor_pages_compressed(void);
367 * indicate the need to do a major compaction if
368 * the overall set of in-use compression segments
369 * becomes sparse... on systems that support pressure
370 * driven swapping, this will also cause swapouts to
373 static inline boolean_t
374 vm_compressor_needs_to_major_compact()
376 uint32_t incore_seg_count
;
378 incore_seg_count
= c_segment_count
- c_swappedout_count
- c_swappedout_sparse_count
;
380 if ((c_segment_count
>= (c_segments_nearing_limit
/ 8)) &&
381 ((incore_seg_count
* C_SEG_MAX_PAGES
) - VM_PAGE_COMPRESSOR_COUNT
) >
382 ((incore_seg_count
/ 8) * C_SEG_MAX_PAGES
)) {
390 vm_available_memory(void)
392 return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE_64
;
397 vm_compressor_pages_compressed(void)
399 return c_segment_pages_compressed
* PAGE_SIZE_64
;
404 vm_compressor_low_on_space(void)
407 uint64_t incore_seg_count
;
408 uint32_t incore_compressed_pages
;
409 if (freezer_incore_cseg_acct
) {
410 incore_seg_count
= c_segment_count
- c_swappedout_count
- c_swappedout_sparse_count
;
411 incore_compressed_pages
= c_segment_pages_compressed_incore
;
413 incore_seg_count
= c_segment_count
;
414 incore_compressed_pages
= c_segment_pages_compressed
;
417 if ((incore_compressed_pages
> c_segment_pages_compressed_nearing_limit
) ||
418 (incore_seg_count
> c_segments_nearing_limit
)) {
421 #else /* CONFIG_FREEZE */
422 if ((c_segment_pages_compressed
> c_segment_pages_compressed_nearing_limit
) ||
423 (c_segment_count
> c_segments_nearing_limit
)) {
426 #endif /* CONFIG_FREEZE */
432 vm_compressor_out_of_space(void)
435 uint64_t incore_seg_count
;
436 uint32_t incore_compressed_pages
;
437 if (freezer_incore_cseg_acct
) {
438 incore_seg_count
= c_segment_count
- c_swappedout_count
- c_swappedout_sparse_count
;
439 incore_compressed_pages
= c_segment_pages_compressed_incore
;
441 incore_seg_count
= c_segment_count
;
442 incore_compressed_pages
= c_segment_pages_compressed
;
445 if ((incore_compressed_pages
>= c_segment_pages_compressed_limit
) ||
446 (incore_seg_count
> c_segments_incore_limit
)) {
449 #else /* CONFIG_FREEZE */
450 if ((c_segment_pages_compressed
>= c_segment_pages_compressed_limit
) ||
451 (c_segment_count
>= c_segments_limit
)) {
454 #endif /* CONFIG_FREEZE */
460 vm_wants_task_throttled(task_t task
)
462 if (task
== kernel_task
) {
466 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
467 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
468 (unsigned int)pmap_compressed(task
->map
->pmap
) > (c_segment_pages_compressed
/ 4)) {
476 #if DEVELOPMENT || DEBUG
478 * On compressor/swap exhaustion, kill the largest process regardless of
479 * its chosen process policy.
481 TUNABLE(bool, kill_on_no_paging_space
, "-kill_on_no_paging_space", false);
482 #endif /* DEVELOPMENT || DEBUG */
486 static uint32_t no_paging_space_action_in_progress
= 0;
487 extern void memorystatus_send_low_swap_note(void);
490 vm_compressor_take_paging_space_action(void)
492 if (no_paging_space_action_in_progress
== 0) {
493 if (OSCompareAndSwap(0, 1, (UInt32
*)&no_paging_space_action_in_progress
)) {
494 if (no_paging_space_action()) {
495 #if DEVELOPMENT || DEBUG
496 if (kill_on_no_paging_space
) {
498 * Since we are choosing to always kill a process, we don't need the
499 * "out of application memory" dialog box in this mode. And, hence we won't
502 no_paging_space_action_in_progress
= 0;
505 #endif /* DEVELOPMENT || DEBUG */
506 memorystatus_send_low_swap_note();
509 no_paging_space_action_in_progress
= 0;
513 #endif /* !CONFIG_EMBEDDED */
517 vm_decompressor_lock(void)
519 PAGE_REPLACEMENT_ALLOWED(TRUE
);
521 decompressions_blocked
= TRUE
;
523 PAGE_REPLACEMENT_ALLOWED(FALSE
);
527 vm_decompressor_unlock(void)
529 PAGE_REPLACEMENT_ALLOWED(TRUE
);
531 decompressions_blocked
= FALSE
;
533 PAGE_REPLACEMENT_ALLOWED(FALSE
);
535 thread_wakeup((event_t
)&decompressions_blocked
);
539 cslot_copy(c_slot_t cdst
, c_slot_t csrc
)
541 #if CHECKSUM_THE_DATA
542 cdst
->c_hash_data
= csrc
->c_hash_data
;
544 #if CHECKSUM_THE_COMPRESSED_DATA
545 cdst
->c_hash_compressed_data
= csrc
->c_hash_compressed_data
;
547 #if POPCOUNT_THE_COMPRESSED_DATA
548 cdst
->c_pop_cdata
= csrc
->c_pop_cdata
;
550 cdst
->c_size
= csrc
->c_size
;
551 cdst
->c_packed_ptr
= csrc
->c_packed_ptr
;
552 #if defined(__arm__) || defined(__arm64__)
553 cdst
->c_codec
= csrc
->c_codec
;
555 #if __ARM_WKDM_POPCNT__
556 cdst
->c_inline_popcount
= csrc
->c_inline_popcount
;
560 vm_map_t compressor_map
;
561 uint64_t compressor_pool_max_size
;
562 uint64_t compressor_pool_size
;
563 uint32_t compressor_pool_multiplier
;
565 #if DEVELOPMENT || DEBUG
567 * Compressor segments are write-protected in development/debug
568 * kernels to help debug memory corruption.
569 * In cases where performance is a concern, this can be disabled
570 * via the boot-arg "-disable_cseg_write_protection".
572 boolean_t write_protect_c_segs
= TRUE
;
573 int vm_compressor_test_seg_wp
;
574 uint32_t vm_ktrace_enabled
;
575 #endif /* DEVELOPMENT || DEBUG */
578 vm_compressor_init(void)
582 kern_return_t retval
= KERN_SUCCESS
;
583 vm_offset_t start_addr
= 0;
584 vm_size_t c_segments_arr_size
= 0, compressor_submap_size
= 0;
585 vm_map_kernel_flags_t vmk_flags
;
586 #if RECORD_THE_COMPRESSED_DATA
587 vm_size_t c_compressed_record_sbuf_size
= 0;
588 #endif /* RECORD_THE_COMPRESSED_DATA */
590 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
591 char bootarg_name
[32];
592 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
594 #if DEVELOPMENT || DEBUG
595 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name
, sizeof(bootarg_name
))) {
596 write_protect_c_segs
= FALSE
;
599 PE_parse_boot_argn("vm_compressor_validation", &vmcval
, sizeof(vmcval
));
601 if (kern_feature_override(KF_COMPRSV_OVRD
)) {
605 #if POPCOUNT_THE_COMPRESSED_DATA
606 popcount_c_segs
= FALSE
;
608 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
609 checksum_c_segs
= FALSE
;
611 #if VALIDATE_C_SEGMENTS
612 validate_c_segs
= FALSE
;
614 write_protect_c_segs
= FALSE
;
616 #endif /* DEVELOPMENT || DEBUG */
619 if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name
, sizeof(bootarg_name
))) {
620 freezer_incore_cseg_acct
= FALSE
;
622 #endif /* CONFIG_FREEZE */
624 assert((C_SEGMENTS_PER_PAGE
* sizeof(union c_segu
)) == PAGE_SIZE
);
626 #ifdef CONFIG_EMBEDDED
627 vm_compressor_minorcompact_threshold_divisor
= 20;
628 vm_compressor_majorcompact_threshold_divisor
= 30;
629 vm_compressor_unthrottle_threshold_divisor
= 40;
630 vm_compressor_catchup_threshold_divisor
= 60;
632 if (max_mem
<= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
633 vm_compressor_minorcompact_threshold_divisor
= 11;
634 vm_compressor_majorcompact_threshold_divisor
= 13;
635 vm_compressor_unthrottle_threshold_divisor
= 20;
636 vm_compressor_catchup_threshold_divisor
= 35;
638 vm_compressor_minorcompact_threshold_divisor
= 20;
639 vm_compressor_majorcompact_threshold_divisor
= 25;
640 vm_compressor_unthrottle_threshold_divisor
= 35;
641 vm_compressor_catchup_threshold_divisor
= 50;
645 queue_init(&c_bad_list_head
);
646 queue_init(&c_age_list_head
);
647 queue_init(&c_minor_list_head
);
648 queue_init(&c_major_list_head
);
649 queue_init(&c_filling_list_head
);
650 queue_init(&c_swapout_list_head
);
651 queue_init(&c_swapio_list_head
);
652 queue_init(&c_swappedin_list_head
);
653 queue_init(&c_swappedout_list_head
);
654 queue_init(&c_swappedout_sparse_list_head
);
656 c_free_segno_head
= -1;
657 c_segments_available
= 0;
659 if (vm_compression_limit
) {
660 compressor_pool_size
= ptoa_64(vm_compression_limit
);
663 compressor_pool_max_size
= C_SEG_MAX_LIMIT
;
664 compressor_pool_max_size
*= C_SEG_BUFSIZE
;
668 if (vm_compression_limit
== 0) {
669 if (max_mem
<= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
670 compressor_pool_size
= 16ULL * max_mem
;
671 } else if (max_mem
<= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
672 compressor_pool_size
= 8ULL * max_mem
;
673 } else if (max_mem
<= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
674 compressor_pool_size
= 4ULL * max_mem
;
676 compressor_pool_size
= 2ULL * max_mem
;
679 if (max_mem
<= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
680 compressor_pool_multiplier
= 1;
681 } else if (max_mem
<= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
682 compressor_pool_multiplier
= 2;
684 compressor_pool_multiplier
= 4;
687 #elif defined(__arm__)
689 #define VM_RESERVE_SIZE (1024 * 1024 * 256)
690 #define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450)
692 if (compressor_pool_max_size
> MAX_COMPRESSOR_POOL_SIZE
) {
693 compressor_pool_max_size
= MAX_COMPRESSOR_POOL_SIZE
;
696 if (vm_compression_limit
== 0) {
697 compressor_pool_size
= ((kernel_map
->max_offset
- kernel_map
->min_offset
) - kernel_map
->size
) - VM_RESERVE_SIZE
;
699 compressor_pool_multiplier
= 1;
701 #elif defined(__arm64__) && defined(XNU_TARGET_OS_WATCH)
704 * On M9 watches the compressor can become big and can lead to
705 * churn in workingset resulting in audio drops. Setting a cap
706 * on the compressor size favors reclaiming unused memory
707 * sitting in idle band via jetsams
710 #define COMPRESSOR_CAP_PERCENTAGE 37ULL
712 if (compressor_pool_max_size
> max_mem
) {
713 compressor_pool_max_size
= max_mem
;
716 if (vm_compression_limit
== 0) {
717 compressor_pool_size
= (max_mem
* COMPRESSOR_CAP_PERCENTAGE
) / 100ULL;
719 compressor_pool_multiplier
= 1;
723 if (compressor_pool_max_size
> max_mem
) {
724 compressor_pool_max_size
= max_mem
;
727 if (vm_compression_limit
== 0) {
728 compressor_pool_size
= max_mem
;
730 compressor_pool_multiplier
= 1;
732 if (compressor_pool_size
> compressor_pool_max_size
) {
733 compressor_pool_size
= compressor_pool_max_size
;
737 c_segments_limit
= (uint32_t)(compressor_pool_size
/ (vm_size_t
)(C_SEG_ALLOCSIZE
));
738 c_segments_nearing_limit
= (uint32_t)(((uint64_t)c_segments_limit
* 98ULL) / 100ULL);
740 c_segment_pages_compressed_limit
= (c_segments_limit
* (C_SEG_BUFSIZE
/ PAGE_SIZE
) * compressor_pool_multiplier
);
742 if (c_segment_pages_compressed_limit
< (uint32_t)(max_mem
/ PAGE_SIZE
)) {
743 if (!vm_compression_limit
) {
744 c_segment_pages_compressed_limit
= (uint32_t)(max_mem
/ PAGE_SIZE
);
748 c_segment_pages_compressed_nearing_limit
= (uint32_t)(((uint64_t)c_segment_pages_compressed_limit
* 98ULL) / 100ULL);
752 * Our in-core limits are based on the size of the compressor pool.
753 * The c_segments_nearing_limit is also based on the compressor pool
754 * size and calculated above.
756 c_segments_incore_limit
= c_segments_limit
;
758 if (freezer_incore_cseg_acct
) {
760 * Add enough segments to track all frozen c_segs that can be stored in swap.
762 c_segments_limit
+= (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t
)(C_SEG_ALLOCSIZE
));
766 * Submap needs space for:
769 * - swap reclaimations -- C_SEG_BUFSIZE
771 c_segments_arr_size
= vm_map_round_page((sizeof(union c_segu
) * c_segments_limit
), VM_MAP_PAGE_MASK(kernel_map
));
772 c_buffers_size
= vm_map_round_page(((vm_size_t
)C_SEG_ALLOCSIZE
* (vm_size_t
)c_segments_limit
), VM_MAP_PAGE_MASK(kernel_map
));
774 compressor_submap_size
= c_segments_arr_size
+ c_buffers_size
+ C_SEG_BUFSIZE
;
776 #if RECORD_THE_COMPRESSED_DATA
777 c_compressed_record_sbuf_size
= (vm_size_t
)C_SEG_ALLOCSIZE
+ (PAGE_SIZE
* 2);
778 compressor_submap_size
+= c_compressed_record_sbuf_size
;
779 #endif /* RECORD_THE_COMPRESSED_DATA */
781 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
782 vmk_flags
.vmkf_permanent
= TRUE
;
783 retval
= kmem_suballoc(kernel_map
, &start_addr
, compressor_submap_size
,
784 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_COMPRESSOR
,
787 if (retval
!= KERN_SUCCESS
) {
788 if (++attempts
> 3) {
789 panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size
);
792 compressor_pool_size
= compressor_pool_size
/ 2;
794 kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size
);
797 if (kernel_memory_allocate(compressor_map
, (vm_offset_t
*)(&c_segments
),
798 (sizeof(union c_segu
) * c_segments_limit
), 0,
799 KMA_KOBJECT
| KMA_VAONLY
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
) != KERN_SUCCESS
) {
800 panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n");
802 if (kernel_memory_allocate(compressor_map
, &c_buffers
, c_buffers_size
, 0,
803 KMA_COMPRESSOR
| KMA_VAONLY
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
) != KERN_SUCCESS
) {
804 panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n");
809 * Pick a good size that will minimize fragmentation in zalloc
810 * by minimizing the fragmentation in a 16k run.
812 * C_SEG_SLOT_VAR_ARRAY_MIN_LEN is larger on 4k systems than 16k ones,
813 * making the fragmentation in a 4k page terrible. Using 16k for all
814 * systems matches zalloc() and will minimize fragmentation.
816 uint32_t c_segment_size
= sizeof(struct c_segment
) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN
* sizeof(struct c_slot
));
817 uint32_t cnt
= (16 << 10) / c_segment_size
;
818 uint32_t frag
= (16 << 10) % c_segment_size
;
820 c_seg_fixed_array_len
= C_SEG_SLOT_VAR_ARRAY_MIN_LEN
;
822 while (cnt
* sizeof(struct c_slot
) < frag
) {
823 c_segment_size
+= sizeof(struct c_slot
);
824 c_seg_fixed_array_len
++;
825 frag
-= cnt
* sizeof(struct c_slot
);
828 compressor_segment_zone
= zone_create("compressor_segment",
829 c_segment_size
, ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
831 c_segments_busy
= FALSE
;
833 c_segments_next_page
= (caddr_t
)c_segments
;
834 vm_compressor_algorithm_init();
837 host_basic_info_data_t hinfo
;
838 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
843 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
845 compressor_cpus
= hinfo
.max_cpus
;
848 bufsize
+= compressor_cpus
* vm_compressor_get_decode_scratch_size();
849 bufsize
+= vm_compressor_get_decode_scratch_size();
851 bufsize
+= vm_compressor_get_encode_scratch_size();
853 #if RECORD_THE_COMPRESSED_DATA
854 bufsize
+= c_compressed_record_sbuf_size
;
857 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)&buf
, bufsize
,
858 PAGE_MASK
, KMA_KOBJECT
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
)) {
859 panic("vm_compressor_init: Unable to allocate %zd bytes", bufsize
);
863 * kdp_compressor_decompressed_page must be page aligned because we access
864 * it through the physical apperture by page number.
866 kdp_compressor_decompressed_page
= buf
;
867 kdp_compressor_decompressed_page_paddr
= kvtophys((vm_offset_t
)kdp_compressor_decompressed_page
);
868 kdp_compressor_decompressed_page_ppnum
= (ppnum_t
) atop(kdp_compressor_decompressed_page_paddr
);
870 bufsize
-= PAGE_SIZE
;
872 compressor_scratch_bufs
= buf
;
873 buf
+= compressor_cpus
* vm_compressor_get_decode_scratch_size();
874 bufsize
-= compressor_cpus
* vm_compressor_get_decode_scratch_size();
876 kdp_compressor_scratch_buf
= buf
;
877 buf
+= vm_compressor_get_decode_scratch_size();
878 bufsize
-= vm_compressor_get_decode_scratch_size();
881 freezer_context_global
.freezer_ctx_compressor_scratch_buf
= buf
;
882 buf
+= vm_compressor_get_encode_scratch_size();
883 bufsize
-= vm_compressor_get_encode_scratch_size();
886 #if RECORD_THE_COMPRESSED_DATA
887 c_compressed_record_sbuf
= buf
;
888 c_compressed_record_cptr
= buf
;
889 c_compressed_record_ebuf
= c_compressed_record_sbuf
+ c_compressed_record_sbuf_size
;
890 buf
+= c_compressed_record_sbuf_size
;
891 bufsize
-= c_compressed_record_sbuf_size
;
893 assert(bufsize
== 0);
896 if (kernel_thread_start_priority((thread_continue_t
)vm_compressor_swap_trigger_thread
, NULL
,
897 BASEPRI_VM
, &thread
) != KERN_SUCCESS
) {
898 panic("vm_compressor_swap_trigger_thread: create failed");
900 thread_deallocate(thread
);
902 if (vm_pageout_internal_start() != KERN_SUCCESS
) {
903 panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
905 if (VM_CONFIG_SWAP_IS_PRESENT
) {
906 vm_compressor_swap_init();
909 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
910 vm_compressor_is_active
= 1;
914 memorystatus_freeze_enabled
= TRUE
;
915 #endif /* CONFIG_FREEZE */
917 vm_compressor_available
= 1;
919 vm_page_reactivate_all_throttled();
923 #if VALIDATE_C_SEGMENTS
926 c_seg_validate(c_segment_t c_seg
, boolean_t must_be_compact
)
930 uint32_t c_rounded_size
;
934 if (__probable(validate_c_segs
== FALSE
)) {
937 if (c_seg
->c_firstemptyslot
< c_seg
->c_nextslot
) {
938 c_indx
= c_seg
->c_firstemptyslot
;
939 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
942 panic("c_seg_validate: no slot backing c_firstemptyslot");
946 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs
->c_size
);
951 for (c_indx
= 0; c_indx
< c_seg
->c_nextslot
; c_indx
++) {
952 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
954 c_size
= UNPACK_C_SIZE(cs
);
956 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
958 bytes_used
+= c_rounded_size
;
960 #if CHECKSUM_THE_COMPRESSED_DATA
962 if (c_size
&& cs
->c_hash_compressed_data
!= (csvhash
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))) {
963 addr64_t csvphys
= kvtophys((vm_offset_t
)&c_seg
->c_store
.c_buffer
[cs
->c_offset
]);
964 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg
, csvphys
, cs
->c_offset
, cs
, c_indx
, c_size
, cs
->c_hash_compressed_data
, csvhash
);
967 #if POPCOUNT_THE_COMPRESSED_DATA
970 uintptr_t csvaddr
= (uintptr_t) &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
971 if (cs
->c_pop_cdata
!= (csvpop
= vmc_pop(csvaddr
, c_size
))) {
972 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop
- cs
->c_pop_cdata
), (void *)csvaddr
, (void *) kvtophys(csvaddr
), c_seg
, cs
, (uint64_t)cs
->c_offset
, c_size
, csvpop
, cs
->c_pop_cdata
);
978 if (bytes_used
!= c_seg
->c_bytes_used
) {
979 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used
, c_seg
->c_bytes_used
);
982 if (c_seg
->c_bytes_used
> C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
)) {
983 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
984 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
987 if (must_be_compact
) {
988 if (c_seg
->c_bytes_used
!= C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
)) {
989 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
990 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
999 c_seg_need_delayed_compaction(c_segment_t c_seg
, boolean_t c_list_lock_held
)
1001 boolean_t clear_busy
= FALSE
;
1003 if (c_list_lock_held
== FALSE
) {
1004 if (!lck_mtx_try_lock_spin_always(c_list_lock
)) {
1007 lck_mtx_unlock_always(&c_seg
->c_lock
);
1008 lck_mtx_lock_spin_always(c_list_lock
);
1009 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1014 assert(c_seg
->c_state
!= C_IS_FILLING
);
1016 if (!c_seg
->c_on_minorcompact_q
&& !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg
))) {
1017 queue_enter(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
1018 c_seg
->c_on_minorcompact_q
= 1;
1021 if (c_list_lock_held
== FALSE
) {
1022 lck_mtx_unlock_always(c_list_lock
);
1025 if (clear_busy
== TRUE
) {
1026 C_SEG_WAKEUP_DONE(c_seg
);
1031 unsigned int c_seg_moved_to_sparse_list
= 0;
1034 c_seg_move_to_sparse_list(c_segment_t c_seg
)
1036 boolean_t clear_busy
= FALSE
;
1038 if (!lck_mtx_try_lock_spin_always(c_list_lock
)) {
1041 lck_mtx_unlock_always(&c_seg
->c_lock
);
1042 lck_mtx_lock_spin_always(c_list_lock
);
1043 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1047 c_seg_switch_state(c_seg
, C_ON_SWAPPEDOUTSPARSE_Q
, FALSE
);
1049 c_seg_moved_to_sparse_list
++;
1051 lck_mtx_unlock_always(c_list_lock
);
1053 if (clear_busy
== TRUE
) {
1054 C_SEG_WAKEUP_DONE(c_seg
);
1060 c_seg_insert_into_q(queue_head_t
*qhead
, c_segment_t c_seg
)
1062 c_segment_t c_seg_next
;
1064 if (queue_empty(qhead
)) {
1065 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
1067 c_seg_next
= (c_segment_t
)queue_first(qhead
);
1070 if (c_seg
->c_generation_id
< c_seg_next
->c_generation_id
) {
1071 queue_insert_before(qhead
, c_seg
, c_seg_next
, c_segment_t
, c_age_list
);
1074 c_seg_next
= (c_segment_t
) queue_next(&c_seg_next
->c_age_list
);
1076 if (queue_end(qhead
, (queue_entry_t
) c_seg_next
)) {
1077 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
1085 int try_minor_compaction_failed
= 0;
1086 int try_minor_compaction_succeeded
= 0;
1089 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
)
1091 assert(c_seg
->c_on_minorcompact_q
);
1093 * c_seg is currently on the delayed minor compaction
1094 * queue and we have c_seg locked... if we can get the
1095 * c_list_lock w/o blocking (if we blocked we could deadlock
1096 * because the lock order is c_list_lock then c_seg's lock)
1097 * we'll pull it from the delayed list and free it directly
1099 if (!lck_mtx_try_lock_spin_always(c_list_lock
)) {
1101 * c_list_lock is held, we need to bail
1103 try_minor_compaction_failed
++;
1105 lck_mtx_unlock_always(&c_seg
->c_lock
);
1107 try_minor_compaction_succeeded
++;
1110 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, FALSE
);
1116 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
, boolean_t need_list_lock
, boolean_t disallow_page_replacement
)
1120 assert(c_seg
->c_busy
);
1121 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg
));
1124 * check for the case that can occur when we are not swapping
1125 * and this segment has been major compacted in the past
1126 * and moved to the majorcompact q to remove it from further
1127 * consideration... if the occupancy falls too low we need
1128 * to put it back on the age_q so that it will be considered
1129 * in the next major compaction sweep... if we don't do this
1130 * we will eventually run into the c_segments_limit
1132 if (c_seg
->c_state
== C_ON_MAJORCOMPACT_Q
&& C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg
)) {
1133 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
1135 if (!c_seg
->c_on_minorcompact_q
) {
1136 if (clear_busy
== TRUE
) {
1137 C_SEG_WAKEUP_DONE(c_seg
);
1140 lck_mtx_unlock_always(&c_seg
->c_lock
);
1144 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
1145 c_seg
->c_on_minorcompact_q
= 0;
1148 lck_mtx_unlock_always(c_list_lock
);
1150 if (disallow_page_replacement
== TRUE
) {
1151 lck_mtx_unlock_always(&c_seg
->c_lock
);
1153 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1155 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1157 c_seg_freed
= c_seg_minor_compaction_and_unlock(c_seg
, clear_busy
);
1159 if (disallow_page_replacement
== TRUE
) {
1160 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1163 if (need_list_lock
== TRUE
) {
1164 lck_mtx_lock_spin_always(c_list_lock
);
1171 kdp_compressor_busy_find_owner(event64_t wait_event
, thread_waitinfo_t
*waitinfo
)
1173 c_segment_t c_seg
= (c_segment_t
) wait_event
;
1175 waitinfo
->owner
= thread_tid(c_seg
->c_busy_for_thread
);
1176 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(c_seg
);
1179 #if DEVELOPMENT || DEBUG
1181 do_cseg_wedge_thread(void)
1183 struct c_segment c_seg
;
1184 c_seg
.c_busy_for_thread
= current_thread();
1186 debug_cseg_wait_event
= (event_t
) &c_seg
;
1188 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor
);
1189 assert_wait((event_t
) (&c_seg
), THREAD_INTERRUPTIBLE
);
1191 thread_block(THREAD_CONTINUE_NULL
);
1197 do_cseg_unwedge_thread(void)
1199 thread_wakeup(debug_cseg_wait_event
);
1200 debug_cseg_wait_event
= NULL
;
1204 #endif /* DEVELOPMENT || DEBUG */
1207 c_seg_wait_on_busy(c_segment_t c_seg
)
1209 c_seg
->c_wanted
= 1;
1211 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor
);
1212 assert_wait((event_t
) (c_seg
), THREAD_UNINT
);
1214 lck_mtx_unlock_always(&c_seg
->c_lock
);
1215 thread_block(THREAD_CONTINUE_NULL
);
1220 * We don't have the task lock held while updating the task's
1221 * c_seg queues. We can do that because of the following restrictions:
1223 * - SINGLE FREEZER CONTEXT:
1224 * We 'insert' c_segs into the task list on the task_freeze path.
1225 * There can only be one such freeze in progress and the task
1226 * isn't disappearing because we have the VM map lock held throughout
1227 * and we have a reference on the proc too.
1229 * - SINGLE TASK DISOWN CONTEXT:
1230 * We 'disown' c_segs of a task ONLY from the task_terminate context. So
1231 * we don't need the task lock but we need the c_list_lock and the
1232 * compressor master lock (shared). We also hold the individual
1233 * c_seg locks (exclusive).
1236 * - can't get the c_seg lock on a try, then we start again because maybe
1237 * the c_seg is part of a compaction and might get freed. So we can't trust
1238 * that linkage and need to restart our queue traversal.
1239 * - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1240 * drop all locks again and wait and restart our queue traversal.
1242 * - The new_owner_task below is currently only the kernel or NULL.
1246 c_seg_update_task_owner(c_segment_t c_seg
, task_t new_owner_task
)
1248 task_t owner_task
= c_seg
->c_task_owner
;
1249 uint64_t uncompressed_bytes
= ((c_seg
->c_slots_used
) * PAGE_SIZE_64
);
1251 LCK_MTX_ASSERT(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
1252 LCK_MTX_ASSERT(&c_seg
->c_lock
, LCK_MTX_ASSERT_OWNED
);
1255 task_update_frozen_to_swap_acct(owner_task
, uncompressed_bytes
, DEBIT_FROM_SWAP
);
1256 queue_remove(&owner_task
->task_frozen_cseg_q
, c_seg
,
1257 c_segment_t
, c_task_list_next_cseg
);
1260 if (new_owner_task
) {
1261 queue_enter(&new_owner_task
->task_frozen_cseg_q
, c_seg
,
1262 c_segment_t
, c_task_list_next_cseg
);
1263 task_update_frozen_to_swap_acct(new_owner_task
, uncompressed_bytes
, CREDIT_TO_SWAP
);
1266 c_seg
->c_task_owner
= new_owner_task
;
1270 task_disown_frozen_csegs(task_t owner_task
)
1272 c_segment_t c_seg
= NULL
, next_cseg
= NULL
;
1275 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1276 lck_mtx_lock_spin_always(c_list_lock
);
1278 for (c_seg
= (c_segment_t
) queue_first(&owner_task
->task_frozen_cseg_q
);
1279 !queue_end(&owner_task
->task_frozen_cseg_q
, (queue_entry_t
) c_seg
);
1280 c_seg
= next_cseg
) {
1281 next_cseg
= (c_segment_t
) queue_next(&c_seg
->c_task_list_next_cseg
);;
1283 if (!lck_mtx_try_lock_spin_always(&c_seg
->c_lock
)) {
1284 lck_mtx_unlock(c_list_lock
);
1285 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1289 if (c_seg
->c_busy
) {
1290 lck_mtx_unlock(c_list_lock
);
1291 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1293 c_seg_wait_on_busy(c_seg
);
1297 assert(c_seg
->c_task_owner
== owner_task
);
1298 c_seg_update_task_owner(c_seg
, kernel_task
);
1299 lck_mtx_unlock_always(&c_seg
->c_lock
);
1302 lck_mtx_unlock(c_list_lock
);
1303 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1305 #endif /* CONFIG_FREEZE */
1308 c_seg_switch_state(c_segment_t c_seg
, int new_state
, boolean_t insert_head
)
1310 int old_state
= c_seg
->c_state
;
1312 #if !CONFIG_EMBEDDED
1313 #if DEVELOPMENT || DEBUG
1314 if (new_state
!= C_IS_FILLING
) {
1315 LCK_MTX_ASSERT(&c_seg
->c_lock
, LCK_MTX_ASSERT_OWNED
);
1317 LCK_MTX_ASSERT(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
1319 #endif /* !CONFIG_EMBEDDED */
1320 switch (old_state
) {
1322 assert(new_state
== C_IS_FILLING
|| new_state
== C_IS_FREE
);
1328 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_ON_SWAPOUT_Q
);
1330 queue_remove(&c_filling_list_head
, c_seg
, c_segment_t
, c_age_list
);
1335 assert(new_state
== C_ON_SWAPOUT_Q
|| new_state
== C_ON_MAJORCOMPACT_Q
||
1336 new_state
== C_IS_FREE
);
1338 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1342 case C_ON_SWAPPEDIN_Q
:
1343 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
);
1345 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1346 c_swappedin_count
--;
1349 case C_ON_SWAPOUT_Q
:
1350 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
|| new_state
== C_IS_EMPTY
|| new_state
== C_ON_SWAPIO_Q
);
1353 if (c_seg
->c_task_owner
&& (new_state
!= C_ON_SWAPIO_Q
)) {
1354 c_seg_update_task_owner(c_seg
, NULL
);
1356 #endif /* CONFIG_FREEZE */
1358 queue_remove(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1359 thread_wakeup((event_t
)&compaction_swapper_running
);
1364 assert(new_state
== C_ON_SWAPPEDOUT_Q
|| new_state
== C_ON_SWAPPEDOUTSPARSE_Q
|| new_state
== C_ON_AGE_Q
);
1366 queue_remove(&c_swapio_list_head
, c_seg
, c_segment_t
, c_age_list
);
1370 case C_ON_SWAPPEDOUT_Q
:
1371 assert(new_state
== C_ON_SWAPPEDIN_Q
|| new_state
== C_ON_AGE_Q
||
1372 new_state
== C_ON_SWAPPEDOUTSPARSE_Q
||
1373 new_state
== C_ON_BAD_Q
|| new_state
== C_IS_EMPTY
|| new_state
== C_IS_FREE
);
1375 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1376 c_swappedout_count
--;
1379 case C_ON_SWAPPEDOUTSPARSE_Q
:
1380 assert(new_state
== C_ON_SWAPPEDIN_Q
|| new_state
== C_ON_AGE_Q
||
1381 new_state
== C_ON_BAD_Q
|| new_state
== C_IS_EMPTY
|| new_state
== C_IS_FREE
);
1383 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1384 c_swappedout_sparse_count
--;
1387 case C_ON_MAJORCOMPACT_Q
:
1388 assert(new_state
== C_ON_AGE_Q
|| new_state
== C_IS_FREE
);
1390 queue_remove(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1395 assert(new_state
== C_IS_FREE
);
1397 queue_remove(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1402 panic("c_seg %p has bad c_state = %d\n", c_seg
, old_state
);
1405 switch (new_state
) {
1407 assert(old_state
!= C_IS_FILLING
);
1412 assert(old_state
== C_ON_SWAPOUT_Q
|| old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1418 assert(old_state
== C_IS_EMPTY
);
1420 queue_enter(&c_filling_list_head
, c_seg
, c_segment_t
, c_age_list
);
1425 assert(old_state
== C_IS_FILLING
|| old_state
== C_ON_SWAPPEDIN_Q
||
1426 old_state
== C_ON_SWAPOUT_Q
|| old_state
== C_ON_SWAPIO_Q
||
1427 old_state
== C_ON_MAJORCOMPACT_Q
|| old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1429 if (old_state
== C_IS_FILLING
) {
1430 queue_enter(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1432 if (!queue_empty(&c_age_list_head
)) {
1433 c_segment_t c_first
;
1435 c_first
= (c_segment_t
)queue_first(&c_age_list_head
);
1436 c_seg
->c_creation_ts
= c_first
->c_creation_ts
;
1438 queue_enter_first(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
1443 case C_ON_SWAPPEDIN_Q
:
1444 assert(old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1446 if (insert_head
== TRUE
) {
1447 queue_enter_first(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1449 queue_enter(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1451 c_swappedin_count
++;
1454 case C_ON_SWAPOUT_Q
:
1455 assert(old_state
== C_ON_AGE_Q
|| old_state
== C_IS_FILLING
);
1457 if (insert_head
== TRUE
) {
1458 queue_enter_first(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1460 queue_enter(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1466 assert(old_state
== C_ON_SWAPOUT_Q
);
1468 if (insert_head
== TRUE
) {
1469 queue_enter_first(&c_swapio_list_head
, c_seg
, c_segment_t
, c_age_list
);
1471 queue_enter(&c_swapio_list_head
, c_seg
, c_segment_t
, c_age_list
);
1476 case C_ON_SWAPPEDOUT_Q
:
1477 assert(old_state
== C_ON_SWAPIO_Q
);
1479 if (insert_head
== TRUE
) {
1480 queue_enter_first(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1482 queue_enter(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
1484 c_swappedout_count
++;
1487 case C_ON_SWAPPEDOUTSPARSE_Q
:
1488 assert(old_state
== C_ON_SWAPIO_Q
|| old_state
== C_ON_SWAPPEDOUT_Q
);
1490 if (insert_head
== TRUE
) {
1491 queue_enter_first(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1493 queue_enter(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
1496 c_swappedout_sparse_count
++;
1499 case C_ON_MAJORCOMPACT_Q
:
1500 assert(old_state
== C_ON_AGE_Q
);
1502 if (insert_head
== TRUE
) {
1503 queue_enter_first(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1505 queue_enter(&c_major_list_head
, c_seg
, c_segment_t
, c_age_list
);
1511 assert(old_state
== C_ON_SWAPPEDOUT_Q
|| old_state
== C_ON_SWAPPEDOUTSPARSE_Q
);
1513 if (insert_head
== TRUE
) {
1514 queue_enter_first(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1516 queue_enter(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
1522 panic("c_seg %p requesting bad c_state = %d\n", c_seg
, new_state
);
1524 c_seg
->c_state
= new_state
;
1530 c_seg_free(c_segment_t c_seg
)
1532 assert(c_seg
->c_busy
);
1534 lck_mtx_unlock_always(&c_seg
->c_lock
);
1535 lck_mtx_lock_spin_always(c_list_lock
);
1536 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1538 c_seg_free_locked(c_seg
);
1543 c_seg_free_locked(c_segment_t c_seg
)
1546 int pages_populated
= 0;
1547 int32_t *c_buffer
= NULL
;
1548 uint64_t c_swap_handle
= 0;
1550 assert(c_seg
->c_busy
);
1551 assert(c_seg
->c_slots_used
== 0);
1552 assert(!c_seg
->c_on_minorcompact_q
);
1553 assert(!c_seg
->c_busy_swapping
);
1555 if (c_seg
->c_overage_swap
== TRUE
) {
1556 c_overage_swapped_count
--;
1557 c_seg
->c_overage_swap
= FALSE
;
1559 if (!(C_SEG_IS_ONDISK(c_seg
))) {
1560 c_buffer
= c_seg
->c_store
.c_buffer
;
1562 c_swap_handle
= c_seg
->c_store
.c_swap_handle
;
1565 c_seg_switch_state(c_seg
, C_IS_FREE
, FALSE
);
1568 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
1569 c_seg
->c_store
.c_buffer
= NULL
;
1572 c_seg_update_task_owner(c_seg
, NULL
);
1573 #endif /* CONFIG_FREEZE */
1575 c_seg
->c_store
.c_swap_handle
= (uint64_t)-1;
1578 lck_mtx_unlock_always(&c_seg
->c_lock
);
1580 lck_mtx_unlock_always(c_list_lock
);
1583 if (pages_populated
) {
1584 kernel_memory_depopulate(compressor_map
, (vm_offset_t
)c_buffer
,
1585 pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
1587 } else if (c_swap_handle
) {
1589 * Free swap space on disk.
1591 vm_swap_free(c_swap_handle
);
1593 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1595 * c_seg must remain busy until
1596 * after the call to vm_swap_free
1598 C_SEG_WAKEUP_DONE(c_seg
);
1599 lck_mtx_unlock_always(&c_seg
->c_lock
);
1601 segno
= c_seg
->c_mysegno
;
1603 lck_mtx_lock_spin_always(c_list_lock
);
1605 * because the c_buffer is now associated with the segno,
1606 * we can't put the segno back on the free list until
1607 * after we have depopulated the c_buffer range, or
1608 * we run the risk of depopulating a range that is
1609 * now being used in one of the compressor heads
1611 c_segments
[segno
].c_segno
= c_free_segno_head
;
1612 c_free_segno_head
= segno
;
1615 lck_mtx_unlock_always(c_list_lock
);
1617 lck_mtx_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
1619 if (c_seg
->c_slot_var_array_len
) {
1620 kheap_free(KHEAP_DATA_BUFFERS
, c_seg
->c_slot_var_array
,
1621 sizeof(struct c_slot
) * c_seg
->c_slot_var_array_len
);
1624 zfree(compressor_segment_zone
, c_seg
);
1627 #if DEVELOPMENT || DEBUG
1628 int c_seg_trim_page_count
= 0;
1632 c_seg_trim_tail(c_segment_t c_seg
)
1637 uint32_t c_rounded_size
;
1638 uint16_t current_nextslot
;
1639 uint32_t current_populated_offset
;
1641 if (c_seg
->c_bytes_used
== 0) {
1644 current_nextslot
= c_seg
->c_nextslot
;
1645 current_populated_offset
= c_seg
->c_populated_offset
;
1647 while (c_seg
->c_nextslot
) {
1648 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, (c_seg
->c_nextslot
- 1));
1650 c_size
= UNPACK_C_SIZE(cs
);
1653 if (current_nextslot
!= c_seg
->c_nextslot
) {
1654 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1655 c_offset
= cs
->c_offset
+ C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1657 c_seg
->c_nextoffset
= c_offset
;
1658 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) &
1659 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
1661 if (c_seg
->c_firstemptyslot
> c_seg
->c_nextslot
) {
1662 c_seg
->c_firstemptyslot
= c_seg
->c_nextslot
;
1664 #if DEVELOPMENT || DEBUG
1665 c_seg_trim_page_count
+= ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset
)) -
1666 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) /
1672 c_seg
->c_nextslot
--;
1674 assert(c_seg
->c_nextslot
);
1679 c_seg_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
)
1681 c_slot_mapping_t slot_ptr
;
1682 uint32_t c_offset
= 0;
1683 uint32_t old_populated_offset
;
1684 uint32_t c_rounded_size
;
1686 uint16_t c_indx
= 0;
1691 assert(c_seg
->c_busy
);
1693 #if VALIDATE_C_SEGMENTS
1694 c_seg_validate(c_seg
, FALSE
);
1696 if (c_seg
->c_bytes_used
== 0) {
1700 lck_mtx_unlock_always(&c_seg
->c_lock
);
1702 if (c_seg
->c_firstemptyslot
>= c_seg
->c_nextslot
|| C_SEG_UNUSED_BYTES(c_seg
) < PAGE_SIZE
) {
1706 /* TODO: assert first emptyslot's c_size is actually 0 */
1708 #if DEVELOPMENT || DEBUG
1709 C_SEG_MAKE_WRITEABLE(c_seg
);
1712 #if VALIDATE_C_SEGMENTS
1713 c_seg
->c_was_minor_compacted
++;
1715 c_indx
= c_seg
->c_firstemptyslot
;
1716 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
1718 old_populated_offset
= c_seg
->c_populated_offset
;
1719 c_offset
= c_dst
->c_offset
;
1721 for (i
= c_indx
+ 1; i
< c_seg
->c_nextslot
&& c_offset
< c_seg
->c_nextoffset
; i
++) {
1722 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg
, i
);
1724 c_size
= UNPACK_C_SIZE(c_src
);
1730 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1731 /* N.B.: This memcpy may be an overlapping copy */
1732 memcpy(&c_seg
->c_store
.c_buffer
[c_offset
], &c_seg
->c_store
.c_buffer
[c_src
->c_offset
], c_rounded_size
);
1734 cslot_copy(c_dst
, c_src
);
1735 c_dst
->c_offset
= c_offset
;
1737 slot_ptr
= C_SLOT_UNPACK_PTR(c_dst
);
1738 slot_ptr
->s_cindx
= c_indx
;
1740 c_offset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1741 PACK_C_SIZE(c_src
, 0);
1744 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
1746 c_seg
->c_firstemptyslot
= c_indx
;
1747 c_seg
->c_nextslot
= c_indx
;
1748 c_seg
->c_nextoffset
= c_offset
;
1749 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
1750 c_seg
->c_bytes_unused
= 0;
1752 #if VALIDATE_C_SEGMENTS
1753 c_seg_validate(c_seg
, TRUE
);
1755 if (old_populated_offset
> c_seg
->c_populated_offset
) {
1759 gc_size
= C_SEG_OFFSET_TO_BYTES(old_populated_offset
- c_seg
->c_populated_offset
);
1760 gc_ptr
= &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
];
1762 kernel_memory_depopulate(compressor_map
, (vm_offset_t
)gc_ptr
, gc_size
,
1763 KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
1766 #if DEVELOPMENT || DEBUG
1767 C_SEG_WRITE_PROTECT(c_seg
);
1771 if (clear_busy
== TRUE
) {
1772 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1773 C_SEG_WAKEUP_DONE(c_seg
);
1774 lck_mtx_unlock_always(&c_seg
->c_lock
);
1781 c_seg_alloc_nextslot(c_segment_t c_seg
)
1783 struct c_slot
*old_slot_array
= NULL
;
1784 struct c_slot
*new_slot_array
= NULL
;
1788 if (c_seg
->c_nextslot
< c_seg_fixed_array_len
) {
1792 if ((c_seg
->c_nextslot
- c_seg_fixed_array_len
) >= c_seg
->c_slot_var_array_len
) {
1793 oldlen
= c_seg
->c_slot_var_array_len
;
1794 old_slot_array
= c_seg
->c_slot_var_array
;
1797 newlen
= C_SEG_SLOT_VAR_ARRAY_MIN_LEN
;
1799 newlen
= oldlen
* 2;
1802 new_slot_array
= kheap_alloc(KHEAP_DATA_BUFFERS
,
1803 sizeof(struct c_slot
) * newlen
, Z_WAITOK
);
1805 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1807 if (old_slot_array
) {
1808 memcpy(new_slot_array
, old_slot_array
,
1809 sizeof(struct c_slot
) * oldlen
);
1812 c_seg
->c_slot_var_array_len
= newlen
;
1813 c_seg
->c_slot_var_array
= new_slot_array
;
1815 lck_mtx_unlock_always(&c_seg
->c_lock
);
1817 if (old_slot_array
) {
1818 kheap_free(KHEAP_DATA_BUFFERS
, old_slot_array
,
1819 sizeof(struct c_slot
) * oldlen
);
1825 #define C_SEG_MAJOR_COMPACT_STATS_MAX (30)
1828 uint64_t asked_permission
;
1829 uint64_t compactions
;
1830 uint64_t moved_slots
;
1831 uint64_t moved_bytes
;
1832 uint64_t wasted_space_in_swapouts
;
1833 uint64_t count_of_swapouts
;
1834 uint64_t count_of_freed_segs
;
1835 uint64_t bailed_compactions
;
1836 uint64_t bytes_freed_rate_us
;
1837 } c_seg_major_compact_stats
[C_SEG_MAJOR_COMPACT_STATS_MAX
];
1839 int c_seg_major_compact_stats_now
= 0;
1842 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100)
1846 c_seg_major_compact_ok(
1847 c_segment_t c_seg_dst
,
1848 c_segment_t c_seg_src
)
1850 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].asked_permission
++;
1852 if (c_seg_src
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
&&
1853 c_seg_dst
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
) {
1857 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
1859 * destination segment is full... can't compact
1869 c_seg_major_compact(
1870 c_segment_t c_seg_dst
,
1871 c_segment_t c_seg_src
)
1873 c_slot_mapping_t slot_ptr
;
1874 uint32_t c_rounded_size
;
1880 boolean_t keep_compacting
= TRUE
;
1883 * segments are not locked but they are both marked c_busy
1884 * which keeps c_decompress from working on them...
1885 * we can safely allocate new pages, move compressed data
1886 * from c_seg_src to c_seg_dst and update both c_segment's
1887 * state w/o holding the master lock
1889 #if DEVELOPMENT || DEBUG
1890 C_SEG_MAKE_WRITEABLE(c_seg_dst
);
1893 #if VALIDATE_C_SEGMENTS
1894 c_seg_dst
->c_was_major_compacted
++;
1895 c_seg_src
->c_was_major_donor
++;
1897 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].compactions
++;
1899 dst_slot
= c_seg_dst
->c_nextslot
;
1901 for (i
= 0; i
< c_seg_src
->c_nextslot
; i
++) {
1902 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, i
);
1904 c_size
= UNPACK_C_SIZE(c_src
);
1907 /* BATCH: move what we have so far; */
1911 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
- c_seg_dst
->c_nextoffset
) < (unsigned) c_size
) {
1912 int size_to_populate
;
1915 size_to_populate
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
);
1917 if (size_to_populate
== 0) {
1919 keep_compacting
= FALSE
;
1922 if (size_to_populate
> C_SEG_MAX_POPULATE_SIZE
) {
1923 size_to_populate
= C_SEG_MAX_POPULATE_SIZE
;
1926 kernel_memory_populate(compressor_map
,
1927 (vm_offset_t
) &c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_populated_offset
],
1930 VM_KERN_MEMORY_COMPRESSOR
);
1932 c_seg_dst
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(size_to_populate
);
1933 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) <= C_SEG_BUFSIZE
);
1935 c_seg_alloc_nextslot(c_seg_dst
);
1937 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1939 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
1941 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1943 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].moved_slots
++;
1944 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].moved_bytes
+= c_size
;
1946 cslot_copy(c_dst
, c_src
);
1947 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
1949 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
) {
1950 c_seg_dst
->c_firstemptyslot
++;
1952 c_seg_dst
->c_slots_used
++;
1953 c_seg_dst
->c_nextslot
++;
1954 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
1955 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1957 PACK_C_SIZE(c_src
, 0);
1959 c_seg_src
->c_bytes_used
-= c_rounded_size
;
1960 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
1961 c_seg_src
->c_firstemptyslot
= 0;
1963 assert(c_seg_src
->c_slots_used
);
1964 c_seg_src
->c_slots_used
--;
1966 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
1967 /* dest segment is now full */
1968 keep_compacting
= FALSE
;
1972 #if DEVELOPMENT || DEBUG
1973 C_SEG_WRITE_PROTECT(c_seg_dst
);
1975 if (dst_slot
< c_seg_dst
->c_nextslot
) {
1976 PAGE_REPLACEMENT_ALLOWED(TRUE
);
1978 * we've now locked out c_decompress from
1979 * converting the slot passed into it into
1980 * a c_segment_t which allows us to use
1981 * the backptr to change which c_segment and
1982 * index the slot points to
1984 while (dst_slot
< c_seg_dst
->c_nextslot
) {
1985 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
1987 slot_ptr
= C_SLOT_UNPACK_PTR(c_dst
);
1988 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
1989 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
1990 slot_ptr
->s_cindx
= dst_slot
++;
1992 PAGE_REPLACEMENT_ALLOWED(FALSE
);
1994 return keep_compacting
;
1999 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec
, clock_nsec_t end_nsec
, clock_sec_t start_sec
, clock_nsec_t start_nsec
)
2002 uint64_t start_msecs
;
2004 end_msecs
= (end_sec
* 1000) + end_nsec
/ 1000000;
2005 start_msecs
= (start_sec
* 1000) + start_nsec
/ 1000000;
2007 return end_msecs
- start_msecs
;
2012 uint32_t compressor_eval_period_in_msecs
= 250;
2013 uint32_t compressor_sample_min_in_msecs
= 500;
2014 uint32_t compressor_sample_max_in_msecs
= 10000;
2015 uint32_t compressor_thrashing_threshold_per_10msecs
= 50;
2016 uint32_t compressor_thrashing_min_per_10msecs
= 20;
2018 /* When true, reset sample data next chance we get. */
2019 static boolean_t compressor_need_sample_reset
= FALSE
;
2023 compute_swapout_target_age(void)
2025 clock_sec_t cur_ts_sec
;
2026 clock_nsec_t cur_ts_nsec
;
2027 uint32_t min_operations_needed_in_this_sample
;
2028 uint64_t elapsed_msecs_in_eval
;
2029 uint64_t elapsed_msecs_in_sample
;
2030 boolean_t need_eval_reset
= FALSE
;
2032 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
2034 elapsed_msecs_in_sample
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_sample_period_sec
, start_of_sample_period_nsec
);
2036 if (compressor_need_sample_reset
||
2037 elapsed_msecs_in_sample
>= compressor_sample_max_in_msecs
) {
2038 compressor_need_sample_reset
= TRUE
;
2039 need_eval_reset
= TRUE
;
2042 elapsed_msecs_in_eval
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_eval_period_sec
, start_of_eval_period_nsec
);
2044 if (elapsed_msecs_in_eval
< compressor_eval_period_in_msecs
) {
2047 need_eval_reset
= TRUE
;
2049 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START
, elapsed_msecs_in_eval
, sample_period_compression_count
, sample_period_decompression_count
, 0, 0);
2051 min_operations_needed_in_this_sample
= (compressor_thrashing_min_per_10msecs
* (uint32_t)elapsed_msecs_in_eval
) / 10;
2053 if ((sample_period_compression_count
- last_eval_compression_count
) < min_operations_needed_in_this_sample
||
2054 (sample_period_decompression_count
- last_eval_decompression_count
) < min_operations_needed_in_this_sample
) {
2055 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_compression_count
- last_eval_compression_count
,
2056 sample_period_decompression_count
- last_eval_decompression_count
, 0, 1, 0);
2058 swapout_target_age
= 0;
2060 compressor_need_sample_reset
= TRUE
;
2061 need_eval_reset
= TRUE
;
2064 last_eval_compression_count
= sample_period_compression_count
;
2065 last_eval_decompression_count
= sample_period_decompression_count
;
2067 if (elapsed_msecs_in_sample
< compressor_sample_min_in_msecs
) {
2068 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, 0, 0, 5, 0);
2071 if (sample_period_decompression_count
> ((compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10)) {
2072 uint64_t running_total
;
2073 uint64_t working_target
;
2074 uint64_t aging_target
;
2075 uint32_t oldest_age_of_csegs_sampled
= 0;
2076 uint64_t working_set_approximation
= 0;
2078 swapout_target_age
= 0;
2080 working_target
= (sample_period_decompression_count
/ 100) * 95; /* 95 percent */
2081 aging_target
= (sample_period_decompression_count
/ 100) * 1; /* 1 percent */
2084 for (oldest_age_of_csegs_sampled
= 0; oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
; oldest_age_of_csegs_sampled
++) {
2085 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
2087 working_set_approximation
+= oldest_age_of_csegs_sampled
* age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
2089 if (running_total
>= working_target
) {
2093 if (oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
) {
2094 working_set_approximation
= (working_set_approximation
* 1000) / elapsed_msecs_in_sample
;
2096 if (working_set_approximation
< VM_PAGE_COMPRESSOR_COUNT
) {
2097 running_total
= overage_decompressions_during_sample_period
;
2099 for (oldest_age_of_csegs_sampled
= DECOMPRESSION_SAMPLE_MAX_AGE
- 1; oldest_age_of_csegs_sampled
; oldest_age_of_csegs_sampled
--) {
2100 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
2102 if (running_total
>= aging_target
) {
2106 swapout_target_age
= (uint32_t)cur_ts_sec
- oldest_age_of_csegs_sampled
;
2108 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 2, 0);
2110 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 0, 3, 0);
2113 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_target
, running_total
, 0, 4, 0);
2116 compressor_need_sample_reset
= TRUE
;
2117 need_eval_reset
= TRUE
;
2119 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_decompression_count
, (compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10, 0, 6, 0);
2122 if (compressor_need_sample_reset
== TRUE
) {
2123 bzero(age_of_decompressions_during_sample_period
, sizeof(age_of_decompressions_during_sample_period
));
2124 overage_decompressions_during_sample_period
= 0;
2126 start_of_sample_period_sec
= cur_ts_sec
;
2127 start_of_sample_period_nsec
= cur_ts_nsec
;
2128 sample_period_decompression_count
= 0;
2129 sample_period_compression_count
= 0;
2130 last_eval_decompression_count
= 0;
2131 last_eval_compression_count
= 0;
2132 compressor_need_sample_reset
= FALSE
;
2134 if (need_eval_reset
== TRUE
) {
2135 start_of_eval_period_sec
= cur_ts_sec
;
2136 start_of_eval_period_nsec
= cur_ts_nsec
;
2141 int compaction_swapper_init_now
= 0;
2142 int compaction_swapper_running
= 0;
2143 int compaction_swapper_awakened
= 0;
2144 int compaction_swapper_abort
= 0;
2148 boolean_t
memorystatus_kill_on_VM_compressor_thrashing(boolean_t
);
2149 boolean_t
memorystatus_kill_on_VM_compressor_space_shortage(boolean_t
);
2150 boolean_t
memorystatus_kill_on_FC_thrashing(boolean_t
);
2151 int compressor_thrashing_induced_jetsam
= 0;
2152 int filecache_thrashing_induced_jetsam
= 0;
2153 static boolean_t vm_compressor_thrashing_detected
= FALSE
;
2154 #endif /* CONFIG_JETSAM */
2157 compressor_needs_to_swap(void)
2159 boolean_t should_swap
= FALSE
;
2161 if (vm_swapout_ripe_segments
== TRUE
&& c_overage_swapped_count
< c_overage_swapped_limit
) {
2167 clock_get_system_nanotime(&now
, &nsec
);
2170 lck_mtx_lock_spin_always(c_list_lock
);
2172 if (!queue_empty(&c_age_list_head
)) {
2173 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
2175 age
= now
- c_seg
->c_creation_ts
;
2177 lck_mtx_unlock_always(c_list_lock
);
2179 if (age
>= vm_ripe_target_age
) {
2181 goto check_if_low_space
;
2184 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
2185 if (COMPRESSOR_NEEDS_TO_SWAP()) {
2187 goto check_if_low_space
;
2189 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external
) && vm_page_anonymous_count
< (vm_page_inactive_count
/ 20)) {
2191 goto check_if_low_space
;
2193 if (vm_page_free_count
< (vm_page_free_reserved
- (COMPRESSOR_FREE_RESERVED_LIMIT
* 2))) {
2195 goto check_if_low_space
;
2198 compute_swapout_target_age();
2200 if (swapout_target_age
) {
2203 lck_mtx_lock_spin_always(c_list_lock
);
2205 if (!queue_empty(&c_age_list_head
)) {
2206 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
2208 if (c_seg
->c_creation_ts
> swapout_target_age
) {
2209 swapout_target_age
= 0;
2212 lck_mtx_unlock_always(c_list_lock
);
2214 #if CONFIG_PHANTOM_CACHE
2215 if (vm_phantom_cache_check_pressure()) {
2219 if (swapout_target_age
) {
2226 if (should_swap
|| vm_compressor_low_on_space() == TRUE
) {
2227 if (vm_compressor_thrashing_detected
== FALSE
) {
2228 vm_compressor_thrashing_detected
= TRUE
;
2230 if (swapout_target_age
) {
2231 /* The compressor is thrashing. */
2232 memorystatus_kill_on_VM_compressor_thrashing(TRUE
/* async */);
2233 compressor_thrashing_induced_jetsam
++;
2234 } else if (vm_compressor_low_on_space() == TRUE
) {
2235 /* The compressor is running low on space. */
2236 memorystatus_kill_on_VM_compressor_space_shortage(TRUE
/* async */);
2237 compressor_thrashing_induced_jetsam
++;
2239 memorystatus_kill_on_FC_thrashing(TRUE
/* async */);
2240 filecache_thrashing_induced_jetsam
++;
2244 * let the jetsam take precedence over
2245 * any major compactions we might have
2246 * been able to do... otherwise we run
2247 * the risk of doing major compactions
2248 * on segments we're about to free up
2249 * due to the jetsam activity.
2251 should_swap
= FALSE
;
2254 #else /* CONFIG_JETSAM */
2255 if (should_swap
&& vm_swap_low_on_space()) {
2256 vm_compressor_take_paging_space_action();
2258 #endif /* CONFIG_JETSAM */
2260 if (should_swap
== FALSE
) {
2262 * vm_compressor_needs_to_major_compact returns true only if we're
2263 * about to run out of available compressor segments... in this
2264 * case, we absolutely need to run a major compaction even if
2265 * we've just kicked off a jetsam or we don't otherwise need to
2266 * swap... terminating objects releases
2267 * pages back to the uncompressed cache, but does not guarantee
2268 * that we will free up even a single compression segment
2270 should_swap
= vm_compressor_needs_to_major_compact();
2274 * returning TRUE when swap_supported == FALSE
2275 * will cause the major compaction engine to
2276 * run, but will not trigger any swapping...
2277 * segments that have been major compacted
2278 * will be moved to the majorcompact queue
2285 * This function is called from the jetsam thread after killing something to
2286 * mitigate thrashing.
2288 * We need to restart our thrashing detection heuristics since memory pressure
2289 * has potentially changed significantly, and we don't want to detect on old
2290 * data from before the jetsam.
2293 vm_thrashing_jetsam_done(void)
2295 vm_compressor_thrashing_detected
= FALSE
;
2297 /* Were we compressor-thrashing or filecache-thrashing? */
2298 if (swapout_target_age
) {
2299 swapout_target_age
= 0;
2300 compressor_need_sample_reset
= TRUE
;
2302 #if CONFIG_PHANTOM_CACHE
2304 vm_phantom_cache_restart_sample();
2308 #endif /* CONFIG_JETSAM */
2310 uint32_t vm_wake_compactor_swapper_calls
= 0;
2311 uint32_t vm_run_compactor_already_running
= 0;
2312 uint32_t vm_run_compactor_empty_minor_q
= 0;
2313 uint32_t vm_run_compactor_did_compact
= 0;
2314 uint32_t vm_run_compactor_waited
= 0;
2317 vm_run_compactor(void)
2319 if (c_segment_count
== 0) {
2323 lck_mtx_lock_spin_always(c_list_lock
);
2325 if (c_minor_count
== 0) {
2326 vm_run_compactor_empty_minor_q
++;
2328 lck_mtx_unlock_always(c_list_lock
);
2331 if (compaction_swapper_running
) {
2332 if (vm_pageout_state
.vm_restricted_to_single_processor
== FALSE
) {
2333 vm_run_compactor_already_running
++;
2335 lck_mtx_unlock_always(c_list_lock
);
2338 vm_run_compactor_waited
++;
2340 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
2342 lck_mtx_unlock_always(c_list_lock
);
2344 thread_block(THREAD_CONTINUE_NULL
);
2348 vm_run_compactor_did_compact
++;
2350 fastwake_warmup
= FALSE
;
2351 compaction_swapper_running
= 1;
2353 vm_compressor_do_delayed_compactions(FALSE
);
2355 compaction_swapper_running
= 0;
2357 lck_mtx_unlock_always(c_list_lock
);
2359 thread_wakeup((event_t
)&compaction_swapper_running
);
2364 vm_wake_compactor_swapper(void)
2366 if (compaction_swapper_running
|| compaction_swapper_awakened
|| c_segment_count
== 0) {
2370 if (c_minor_count
|| vm_compressor_needs_to_major_compact()) {
2371 lck_mtx_lock_spin_always(c_list_lock
);
2373 fastwake_warmup
= FALSE
;
2375 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2376 vm_wake_compactor_swapper_calls
++;
2378 compaction_swapper_awakened
= 1;
2379 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2381 lck_mtx_unlock_always(c_list_lock
);
2387 vm_consider_swapping()
2389 c_segment_t c_seg
, c_seg_next
;
2393 assert(VM_CONFIG_SWAP_IS_PRESENT
);
2395 lck_mtx_lock_spin_always(c_list_lock
);
2397 compaction_swapper_abort
= 1;
2399 while (compaction_swapper_running
) {
2400 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
2402 lck_mtx_unlock_always(c_list_lock
);
2404 thread_block(THREAD_CONTINUE_NULL
);
2406 lck_mtx_lock_spin_always(c_list_lock
);
2408 compaction_swapper_abort
= 0;
2409 compaction_swapper_running
= 1;
2411 vm_swapout_ripe_segments
= TRUE
;
2413 if (!queue_empty(&c_major_list_head
)) {
2414 clock_get_system_nanotime(&now
, &nsec
);
2416 c_seg
= (c_segment_t
)queue_first(&c_major_list_head
);
2418 while (!queue_end(&c_major_list_head
, (queue_entry_t
)c_seg
)) {
2419 if (c_overage_swapped_count
>= c_overage_swapped_limit
) {
2423 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
2425 if ((now
- c_seg
->c_creation_ts
) >= vm_ripe_target_age
) {
2426 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2428 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
2430 lck_mtx_unlock_always(&c_seg
->c_lock
);
2435 vm_compressor_compact_and_swap(FALSE
);
2437 compaction_swapper_running
= 0;
2439 vm_swapout_ripe_segments
= FALSE
;
2441 lck_mtx_unlock_always(c_list_lock
);
2443 thread_wakeup((event_t
)&compaction_swapper_running
);
2448 vm_consider_waking_compactor_swapper(void)
2450 boolean_t need_wakeup
= FALSE
;
2452 if (c_segment_count
== 0) {
2456 if (compaction_swapper_running
|| compaction_swapper_awakened
) {
2460 if (!compaction_swapper_inited
&& !compaction_swapper_init_now
) {
2461 compaction_swapper_init_now
= 1;
2465 if (c_minor_count
&& (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2467 } else if (compressor_needs_to_swap()) {
2469 } else if (c_minor_count
) {
2470 uint64_t total_bytes
;
2472 total_bytes
= compressor_object
->resident_page_count
* PAGE_SIZE_64
;
2474 if ((total_bytes
- compressor_bytes_used
) > total_bytes
/ 10) {
2478 if (need_wakeup
== TRUE
) {
2479 lck_mtx_lock_spin_always(c_list_lock
);
2481 fastwake_warmup
= FALSE
;
2483 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2484 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER
, DBG_FUNC_NONE
);
2486 compaction_swapper_awakened
= 1;
2487 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2489 lck_mtx_unlock_always(c_list_lock
);
2494 #define C_SWAPOUT_LIMIT 4
2495 #define DELAYED_COMPACTIONS_PER_PASS 30
2498 vm_compressor_do_delayed_compactions(boolean_t flush_all
)
2501 int number_compacted
= 0;
2502 boolean_t needs_to_swap
= FALSE
;
2505 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions
, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS
, DBG_FUNC_START
, c_minor_count
, flush_all
, 0, 0);
2507 #if !CONFIG_EMBEDDED
2508 LCK_MTX_ASSERT(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
2509 #endif /* !CONFIG_EMBEDDED */
2511 while (!queue_empty(&c_minor_list_head
) && needs_to_swap
== FALSE
) {
2512 c_seg
= (c_segment_t
)queue_first(&c_minor_list_head
);
2514 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2516 if (c_seg
->c_busy
) {
2517 lck_mtx_unlock_always(c_list_lock
);
2518 c_seg_wait_on_busy(c_seg
);
2519 lck_mtx_lock_spin_always(c_list_lock
);
2525 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, TRUE
);
2527 if (VM_CONFIG_SWAP_IS_ACTIVE
&& (number_compacted
++ > DELAYED_COMPACTIONS_PER_PASS
)) {
2528 if ((flush_all
== TRUE
|| compressor_needs_to_swap() == TRUE
) && c_swapout_count
< C_SWAPOUT_LIMIT
) {
2529 needs_to_swap
= TRUE
;
2532 number_compacted
= 0;
2534 lck_mtx_lock_spin_always(c_list_lock
);
2537 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions
, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS
, DBG_FUNC_END
, c_minor_count
, number_compacted
, needs_to_swap
, 0);
2541 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
2544 vm_compressor_age_swapped_in_segments(boolean_t flush_all
)
2550 clock_get_system_nanotime(&now
, &nsec
);
2552 while (!queue_empty(&c_swappedin_list_head
)) {
2553 c_seg
= (c_segment_t
)queue_first(&c_swappedin_list_head
);
2555 if (flush_all
== FALSE
&& (now
- c_seg
->c_swappedin_ts
) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT
) {
2559 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2561 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
2563 lck_mtx_unlock_always(&c_seg
->c_lock
);
2568 extern int vm_num_swap_files
;
2569 extern int vm_num_pinned_swap_files
;
2570 extern int vm_swappin_enabled
;
2572 extern unsigned int vm_swapfile_total_segs_used
;
2573 extern unsigned int vm_swapfile_total_segs_alloced
;
2577 vm_compressor_flush(void)
2579 uint64_t vm_swap_put_failures_at_start
;
2580 wait_result_t wait_result
= 0;
2581 AbsoluteTime startTime
, endTime
;
2582 clock_sec_t now_sec
;
2583 clock_nsec_t now_nsec
;
2586 HIBLOG("vm_compressor_flush - starting\n");
2588 clock_get_uptime(&startTime
);
2590 lck_mtx_lock_spin_always(c_list_lock
);
2592 fastwake_warmup
= FALSE
;
2593 compaction_swapper_abort
= 1;
2595 while (compaction_swapper_running
) {
2596 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
2598 lck_mtx_unlock_always(c_list_lock
);
2600 thread_block(THREAD_CONTINUE_NULL
);
2602 lck_mtx_lock_spin_always(c_list_lock
);
2604 compaction_swapper_abort
= 0;
2605 compaction_swapper_running
= 1;
2607 hibernate_flushing
= TRUE
;
2608 hibernate_no_swapspace
= FALSE
;
2609 c_generation_id_flush_barrier
= c_generation_id
+ 1000;
2611 clock_get_system_nanotime(&now_sec
, &now_nsec
);
2612 hibernate_flushing_deadline
= now_sec
+ HIBERNATE_FLUSHING_SECS_TO_COMPLETE
;
2614 vm_swap_put_failures_at_start
= vm_swap_put_failures
;
2616 vm_compressor_compact_and_swap(TRUE
);
2618 while (!queue_empty(&c_swapout_list_head
)) {
2619 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 5000, 1000 * NSEC_PER_USEC
);
2621 lck_mtx_unlock_always(c_list_lock
);
2623 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
2625 lck_mtx_lock_spin_always(c_list_lock
);
2627 if (wait_result
== THREAD_TIMED_OUT
) {
2631 hibernate_flushing
= FALSE
;
2632 compaction_swapper_running
= 0;
2634 if (vm_swap_put_failures
> vm_swap_put_failures_at_start
) {
2635 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
2636 vm_swap_put_failures
- vm_swap_put_failures_at_start
, VM_PAGE_COMPRESSOR_COUNT
);
2639 lck_mtx_unlock_always(c_list_lock
);
2641 thread_wakeup((event_t
)&compaction_swapper_running
);
2643 clock_get_uptime(&endTime
);
2644 SUB_ABSOLUTETIME(&endTime
, &startTime
);
2645 absolutetime_to_nanoseconds(endTime
, &nsec
);
2647 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
2648 nsec
/ 1000000ULL, vm_num_swap_files
, vm_num_pinned_swap_files
, vm_swappin_enabled
);
2652 int compaction_swap_trigger_thread_awakened
= 0;
2655 vm_compressor_swap_trigger_thread(void)
2657 current_thread()->options
|= TH_OPT_VMPRIV
;
2660 * compaction_swapper_init_now is set when the first call to
2661 * vm_consider_waking_compactor_swapper is made from
2662 * vm_pageout_scan... since this function is called upon
2663 * thread creation, we want to make sure to delay adjusting
2664 * the tuneables until we are awakened via vm_pageout_scan
2665 * so that we are at a point where the vm_swapfile_open will
2666 * be operating on the correct directory (in case the default
2667 * of using the VM volume is overridden by the dynamic_pager)
2669 if (compaction_swapper_init_now
) {
2670 vm_compaction_swapper_do_init();
2672 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
2673 thread_vm_bind_group_add();
2675 #if CONFIG_THREAD_GROUPS
2676 thread_group_vm_add();
2678 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
2679 compaction_swapper_init_now
= 0;
2681 lck_mtx_lock_spin_always(c_list_lock
);
2683 compaction_swap_trigger_thread_awakened
++;
2684 compaction_swapper_awakened
= 0;
2686 if (compaction_swapper_running
== 0) {
2687 compaction_swapper_running
= 1;
2689 vm_compressor_compact_and_swap(FALSE
);
2691 compaction_swapper_running
= 0;
2693 assert_wait((event_t
)&c_compressor_swap_trigger
, THREAD_UNINT
);
2695 if (compaction_swapper_running
== 0) {
2696 thread_wakeup((event_t
)&compaction_swapper_running
);
2699 lck_mtx_unlock_always(c_list_lock
);
2701 thread_block((thread_continue_t
)vm_compressor_swap_trigger_thread
);
2708 vm_compressor_record_warmup_start(void)
2712 lck_mtx_lock_spin_always(c_list_lock
);
2714 if (first_c_segment_to_warm_generation_id
== 0) {
2715 if (!queue_empty(&c_age_list_head
)) {
2716 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
2718 first_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
2720 first_c_segment_to_warm_generation_id
= 0;
2723 fastwake_recording_in_progress
= TRUE
;
2725 lck_mtx_unlock_always(c_list_lock
);
2730 vm_compressor_record_warmup_end(void)
2734 lck_mtx_lock_spin_always(c_list_lock
);
2736 if (fastwake_recording_in_progress
== TRUE
) {
2737 if (!queue_empty(&c_age_list_head
)) {
2738 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
2740 last_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
2742 last_c_segment_to_warm_generation_id
= first_c_segment_to_warm_generation_id
;
2745 fastwake_recording_in_progress
= FALSE
;
2747 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
2749 lck_mtx_unlock_always(c_list_lock
);
2753 #define DELAY_TRIM_ON_WAKE_SECS 25
2756 vm_compressor_delay_trim(void)
2761 clock_get_system_nanotime(&sec
, &nsec
);
2762 dont_trim_until_ts
= sec
+ DELAY_TRIM_ON_WAKE_SECS
;
2767 vm_compressor_do_warmup(void)
2769 lck_mtx_lock_spin_always(c_list_lock
);
2771 if (first_c_segment_to_warm_generation_id
== last_c_segment_to_warm_generation_id
) {
2772 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
2774 lck_mtx_unlock_always(c_list_lock
);
2778 if (compaction_swapper_running
== 0 && compaction_swapper_awakened
== 0) {
2779 fastwake_warmup
= TRUE
;
2781 compaction_swapper_awakened
= 1;
2782 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
2784 lck_mtx_unlock_always(c_list_lock
);
2788 do_fastwake_warmup_all(void)
2790 lck_mtx_lock_spin_always(c_list_lock
);
2792 if (queue_empty(&c_swappedout_list_head
) && queue_empty(&c_swappedout_sparse_list_head
)) {
2793 lck_mtx_unlock_always(c_list_lock
);
2797 fastwake_warmup
= TRUE
;
2799 do_fastwake_warmup(&c_swappedout_list_head
, TRUE
);
2801 do_fastwake_warmup(&c_swappedout_sparse_list_head
, TRUE
);
2803 fastwake_warmup
= FALSE
;
2805 lck_mtx_unlock_always(c_list_lock
);
2809 do_fastwake_warmup(queue_head_t
*c_queue
, boolean_t consider_all_cseg
)
2811 c_segment_t c_seg
= NULL
;
2812 AbsoluteTime startTime
, endTime
;
2816 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
2818 clock_get_uptime(&startTime
);
2820 lck_mtx_unlock_always(c_list_lock
);
2822 proc_set_thread_policy(current_thread(),
2823 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER2
);
2825 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2827 lck_mtx_lock_spin_always(c_list_lock
);
2829 while (!queue_empty(c_queue
) && fastwake_warmup
== TRUE
) {
2830 c_seg
= (c_segment_t
) queue_first(c_queue
);
2832 if (consider_all_cseg
== FALSE
) {
2833 if (c_seg
->c_generation_id
< first_c_segment_to_warm_generation_id
||
2834 c_seg
->c_generation_id
> last_c_segment_to_warm_generation_id
) {
2838 if (vm_page_free_count
< (AVAILABLE_MEMORY
/ 4)) {
2843 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2844 lck_mtx_unlock_always(c_list_lock
);
2846 if (c_seg
->c_busy
) {
2847 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2848 c_seg_wait_on_busy(c_seg
);
2849 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2851 if (c_seg_swapin(c_seg
, TRUE
, FALSE
) == 0) {
2852 lck_mtx_unlock_always(&c_seg
->c_lock
);
2854 c_segment_warmup_count
++;
2856 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2857 vm_pageout_io_throttle();
2858 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2860 lck_mtx_lock_spin_always(c_list_lock
);
2862 lck_mtx_unlock_always(c_list_lock
);
2864 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2866 proc_set_thread_policy(current_thread(),
2867 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER0
);
2869 clock_get_uptime(&endTime
);
2870 SUB_ABSOLUTETIME(&endTime
, &startTime
);
2871 absolutetime_to_nanoseconds(endTime
, &nsec
);
2873 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec
/ 1000000ULL);
2875 lck_mtx_lock_spin_always(c_list_lock
);
2877 if (consider_all_cseg
== FALSE
) {
2878 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
2882 int min_csegs_per_major_compaction
= DELAYED_COMPACTIONS_PER_PASS
;
2883 extern bool vm_swapout_thread_running
;
2884 extern boolean_t compressor_store_stop_compaction
;
2887 vm_compressor_compact_and_swap(boolean_t flush_all
)
2889 c_segment_t c_seg
, c_seg_next
;
2890 boolean_t keep_compacting
, switch_state
;
2893 mach_timespec_t start_ts
, end_ts
;
2894 unsigned int number_considered
, wanted_cseg_found
, yield_after_considered_per_pass
, number_yields
;
2895 uint64_t bytes_to_free
, bytes_freed
, delta_usec
;
2897 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_START
, c_age_count
, c_minor_count
, c_major_count
, vm_page_free_count
);
2899 if (fastwake_warmup
== TRUE
) {
2900 uint64_t starting_warmup_count
;
2902 starting_warmup_count
= c_segment_warmup_count
;
2904 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_START
, c_segment_warmup_count
,
2905 first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
, 0, 0);
2906 do_fastwake_warmup(&c_swappedout_list_head
, FALSE
);
2907 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_END
, c_segment_warmup_count
, c_segment_warmup_count
- starting_warmup_count
, 0, 0, 0);
2909 fastwake_warmup
= FALSE
;
2913 * it's possible for the c_age_list_head to be empty if we
2914 * hit our limits for growing the compressor pool and we subsequently
2915 * hibernated... on the next hibernation we could see the queue as
2916 * empty and not proceeed even though we have a bunch of segments on
2917 * the swapped in queue that need to be dealt with.
2919 vm_compressor_do_delayed_compactions(flush_all
);
2921 vm_compressor_age_swapped_in_segments(flush_all
);
2924 * we only need to grab the timestamp once per
2925 * invocation of this function since the
2926 * timescale we're interested in is measured
2929 clock_get_system_nanotime(&now
, &nsec
);
2931 start_ts
.tv_sec
= (int) now
;
2932 start_ts
.tv_nsec
= nsec
;
2934 number_considered
= 0;
2935 wanted_cseg_found
= 0;
2939 yield_after_considered_per_pass
= MAX(min_csegs_per_major_compaction
, DELAYED_COMPACTIONS_PER_PASS
);
2941 while (!queue_empty(&c_age_list_head
) && !compaction_swapper_abort
&& !compressor_store_stop_compaction
) {
2942 if (hibernate_flushing
== TRUE
) {
2945 if (hibernate_should_abort()) {
2946 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
2949 if (hibernate_no_swapspace
== TRUE
) {
2950 HIBLOG("vm_compressor_flush - out of swap space\n");
2953 if (vm_swap_files_pinned() == FALSE
) {
2954 HIBLOG("vm_compressor_flush - unpinned swap files\n");
2957 if (hibernate_in_progress_with_pinned_swap
== TRUE
&&
2958 (vm_swapfile_total_segs_alloced
== vm_swapfile_total_segs_used
)) {
2959 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
2962 clock_get_system_nanotime(&sec
, &nsec
);
2964 if (sec
> hibernate_flushing_deadline
) {
2965 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
2969 if (!vm_swap_out_of_space() && c_swapout_count
>= C_SWAPOUT_LIMIT
) {
2970 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 100, 1000 * NSEC_PER_USEC
);
2972 if (!vm_swapout_thread_running
) {
2973 thread_wakeup((event_t
)&c_swapout_list_head
);
2976 lck_mtx_unlock_always(c_list_lock
);
2978 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 1, c_swapout_count
, 0, 0);
2980 thread_block(THREAD_CONTINUE_NULL
);
2982 lck_mtx_lock_spin_always(c_list_lock
);
2987 vm_compressor_do_delayed_compactions(flush_all
);
2989 vm_compressor_age_swapped_in_segments(flush_all
);
2991 if (!vm_swap_out_of_space() && c_swapout_count
>= C_SWAPOUT_LIMIT
) {
2993 * we timed out on the above thread_block
2994 * let's loop around and try again
2995 * the timeout allows us to continue
2996 * to do minor compactions to make
2997 * more memory available
2999 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 2, c_swapout_count
, 0, 0);
3005 * Swap out segments?
3007 if (flush_all
== FALSE
) {
3008 boolean_t needs_to_swap
;
3010 lck_mtx_unlock_always(c_list_lock
);
3012 needs_to_swap
= compressor_needs_to_swap();
3014 lck_mtx_lock_spin_always(c_list_lock
);
3016 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 3, needs_to_swap
, 0, 0);
3018 if (needs_to_swap
== FALSE
) {
3022 if (queue_empty(&c_age_list_head
)) {
3023 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 4, c_age_count
, 0, 0);
3026 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
3028 assert(c_seg
->c_state
== C_ON_AGE_Q
);
3030 if (flush_all
== TRUE
&& c_seg
->c_generation_id
> c_generation_id_flush_barrier
) {
3031 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 5, 0, 0, 0);
3035 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3037 if (c_seg
->c_busy
) {
3038 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 6, (void*) VM_KERNEL_ADDRPERM(c_seg
), 0, 0);
3040 lck_mtx_unlock_always(c_list_lock
);
3041 c_seg_wait_on_busy(c_seg
);
3042 lck_mtx_lock_spin_always(c_list_lock
);
3048 if (c_seg_do_minor_compaction_and_unlock(c_seg
, FALSE
, TRUE
, TRUE
)) {
3050 * found an empty c_segment and freed it
3051 * so go grab the next guy in the queue
3053 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 7, 0, 0, 0);
3054 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].count_of_freed_segs
++;
3060 keep_compacting
= TRUE
;
3061 switch_state
= TRUE
;
3063 while (keep_compacting
== TRUE
) {
3064 assert(c_seg
->c_busy
);
3066 /* look for another segment to consolidate */
3068 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
3070 if (queue_end(&c_age_list_head
, (queue_entry_t
)c_seg_next
)) {
3074 assert(c_seg_next
->c_state
== C_ON_AGE_Q
);
3076 number_considered
++;
3078 if (c_seg_major_compact_ok(c_seg
, c_seg_next
) == FALSE
) {
3082 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
3084 if (c_seg_next
->c_busy
) {
3086 * We are going to block for our neighbor.
3087 * If our c_seg is wanted, we should unbusy
3088 * it because we don't know how long we might
3089 * have to block here.
3091 if (c_seg
->c_wanted
) {
3092 lck_mtx_unlock_always(&c_seg_next
->c_lock
);
3093 switch_state
= FALSE
;
3094 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].bailed_compactions
++;
3095 wanted_cseg_found
++;
3099 lck_mtx_unlock_always(c_list_lock
);
3101 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next
), 0, 0);
3103 c_seg_wait_on_busy(c_seg_next
);
3104 lck_mtx_lock_spin_always(c_list_lock
);
3108 /* grab that segment */
3109 C_SEG_BUSY(c_seg_next
);
3111 bytes_to_free
= C_SEG_OFFSET_TO_BYTES(c_seg_next
->c_populated_offset
);
3112 if (c_seg_do_minor_compaction_and_unlock(c_seg_next
, FALSE
, TRUE
, TRUE
)) {
3114 * found an empty c_segment and freed it
3115 * so we can't continue to use c_seg_next
3117 bytes_freed
+= bytes_to_free
;
3118 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].count_of_freed_segs
++;
3122 /* unlock the list ... */
3123 lck_mtx_unlock_always(c_list_lock
);
3125 /* do the major compaction */
3127 keep_compacting
= c_seg_major_compact(c_seg
, c_seg_next
);
3129 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 9, keep_compacting
, 0, 0);
3131 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3133 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
3135 * run a minor compaction on the donor segment
3136 * since we pulled at least some of it's
3137 * data into our target... if we've emptied
3138 * it, now is a good time to free it which
3139 * c_seg_minor_compaction_and_unlock also takes care of
3141 * by passing TRUE, we ask for c_busy to be cleared
3142 * and c_wanted to be taken care of
3144 bytes_to_free
= C_SEG_OFFSET_TO_BYTES(c_seg_next
->c_populated_offset
);
3145 if (c_seg_minor_compaction_and_unlock(c_seg_next
, TRUE
)) {
3146 bytes_freed
+= bytes_to_free
;
3147 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].count_of_freed_segs
++;
3149 bytes_to_free
-= C_SEG_OFFSET_TO_BYTES(c_seg_next
->c_populated_offset
);
3150 bytes_freed
+= bytes_to_free
;
3153 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3155 /* relock the list */
3156 lck_mtx_lock_spin_always(c_list_lock
);
3158 if (c_seg
->c_wanted
) {
3160 * Our c_seg is in demand. Let's
3161 * unbusy it and wakeup the waiters
3162 * instead of continuing the compaction
3163 * because we could be in this loop
3166 switch_state
= FALSE
;
3167 wanted_cseg_found
++;
3168 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].bailed_compactions
++;
3171 } /* major compaction */
3173 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 10, number_considered
, wanted_cseg_found
, 0);
3175 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3177 assert(c_seg
->c_busy
);
3178 assert(!c_seg
->c_on_minorcompact_q
);
3181 if (VM_CONFIG_SWAP_IS_ACTIVE
) {
3183 * This mode of putting a generic c_seg on the swapout list is
3184 * only supported when we have general swapping enabled
3186 c_seg_switch_state(c_seg
, C_ON_SWAPOUT_Q
, FALSE
);
3188 if ((vm_swapout_ripe_segments
== TRUE
&& c_overage_swapped_count
< c_overage_swapped_limit
)) {
3189 assert(VM_CONFIG_SWAP_IS_PRESENT
);
3191 * we are running compressor sweeps with swap-behind
3192 * make sure the c_seg has aged enough before swapping it
3195 if ((now
- c_seg
->c_creation_ts
) >= vm_ripe_target_age
) {
3196 c_seg
->c_overage_swap
= TRUE
;
3197 c_overage_swapped_count
++;
3198 c_seg_switch_state(c_seg
, C_ON_SWAPOUT_Q
, FALSE
);
3202 if (c_seg
->c_state
== C_ON_AGE_Q
) {
3204 * this c_seg didn't get moved to the swapout queue
3205 * so we need to move it out of the way...
3206 * we just did a major compaction on it so put it
3209 c_seg_switch_state(c_seg
, C_ON_MAJORCOMPACT_Q
, FALSE
);
3211 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].wasted_space_in_swapouts
+= C_SEG_BUFSIZE
- c_seg
->c_bytes_used
;
3212 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].count_of_swapouts
++;
3216 C_SEG_WAKEUP_DONE(c_seg
);
3218 lck_mtx_unlock_always(&c_seg
->c_lock
);
3220 if (c_swapout_count
) {
3222 * We don't pause/yield here because we will either
3223 * yield below or at the top of the loop with the
3224 * assert_wait_timeout.
3226 if (!vm_swapout_thread_running
) {
3227 thread_wakeup((event_t
)&c_swapout_list_head
);
3231 if (number_considered
>= yield_after_considered_per_pass
) {
3232 if (wanted_cseg_found
) {
3234 * We stopped major compactions on a c_seg
3235 * that is wanted. We don't know the priority
3236 * of the waiter unfortunately but we are at
3237 * a very high priority and so, just in case
3238 * the waiter is a critical system daemon or
3239 * UI thread, let's give up the CPU in case
3240 * the system is running a few CPU intensive
3243 lck_mtx_unlock_always(c_list_lock
);
3245 mutex_pause(2); /* 100us yield */
3249 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_NONE
, 11, number_considered
, number_yields
, 0);
3251 lck_mtx_lock_spin_always(c_list_lock
);
3254 number_considered
= 0;
3255 wanted_cseg_found
= 0;
3258 clock_get_system_nanotime(&now
, &nsec
);
3259 end_ts
.tv_sec
= (int) now
;
3260 end_ts
.tv_nsec
= nsec
;
3262 SUB_MACH_TIMESPEC(&end_ts
, &start_ts
);
3264 delta_usec
= (end_ts
.tv_sec
* USEC_PER_SEC
) + (end_ts
.tv_nsec
/ NSEC_PER_USEC
) - (number_yields
* 100);
3266 delta_usec
= MAX(1, delta_usec
); /* we could have 0 usec run if conditions weren't right */
3268 c_seg_major_compact_stats
[c_seg_major_compact_stats_now
].bytes_freed_rate_us
= (bytes_freed
/ delta_usec
);
3270 if ((c_seg_major_compact_stats_now
+ 1) == C_SEG_MAJOR_COMPACT_STATS_MAX
) {
3271 c_seg_major_compact_stats_now
= 0;
3273 c_seg_major_compact_stats_now
++;
3276 assert(c_seg_major_compact_stats_now
< C_SEG_MAJOR_COMPACT_STATS_MAX
);
3278 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap
, VM_COMPRESSOR_COMPACT_AND_SWAP
, DBG_FUNC_END
, c_age_count
, c_minor_count
, c_major_count
, vm_page_free_count
);
3283 c_seg_allocate(c_segment_t
*current_chead
)
3287 int size_to_populate
;
3289 #if !CONFIG_EMBEDDED
3290 if (vm_compressor_low_on_space()) {
3291 vm_compressor_take_paging_space_action();
3293 #endif /* !CONFIG_EMBEDDED */
3295 if ((c_seg
= *current_chead
) == NULL
) {
3298 lck_mtx_lock_spin_always(c_list_lock
);
3300 while (c_segments_busy
== TRUE
) {
3301 assert_wait((event_t
) (&c_segments_busy
), THREAD_UNINT
);
3303 lck_mtx_unlock_always(c_list_lock
);
3305 thread_block(THREAD_CONTINUE_NULL
);
3307 lck_mtx_lock_spin_always(c_list_lock
);
3309 if (c_free_segno_head
== (uint32_t)-1) {
3310 uint32_t c_segments_available_new
;
3311 uint32_t compressed_pages
;
3314 if (freezer_incore_cseg_acct
) {
3315 compressed_pages
= c_segment_pages_compressed_incore
;
3317 compressed_pages
= c_segment_pages_compressed
;
3320 compressed_pages
= c_segment_pages_compressed
;
3321 #endif /* CONFIG_FREEZE */
3323 if (c_segments_available
>= c_segments_limit
|| compressed_pages
>= c_segment_pages_compressed_limit
) {
3324 lck_mtx_unlock_always(c_list_lock
);
3328 c_segments_busy
= TRUE
;
3329 lck_mtx_unlock_always(c_list_lock
);
3331 kernel_memory_populate(compressor_map
, (vm_offset_t
)c_segments_next_page
,
3332 PAGE_SIZE
, KMA_KOBJECT
, VM_KERN_MEMORY_COMPRESSOR
);
3333 c_segments_next_page
+= PAGE_SIZE
;
3335 c_segments_available_new
= c_segments_available
+ C_SEGMENTS_PER_PAGE
;
3337 if (c_segments_available_new
> c_segments_limit
) {
3338 c_segments_available_new
= c_segments_limit
;
3341 for (c_segno
= c_segments_available
+ 1; c_segno
< c_segments_available_new
; c_segno
++) {
3342 c_segments
[c_segno
- 1].c_segno
= c_segno
;
3345 lck_mtx_lock_spin_always(c_list_lock
);
3347 c_segments
[c_segno
- 1].c_segno
= c_free_segno_head
;
3348 c_free_segno_head
= c_segments_available
;
3349 c_segments_available
= c_segments_available_new
;
3351 c_segments_busy
= FALSE
;
3352 thread_wakeup((event_t
) (&c_segments_busy
));
3354 c_segno
= c_free_segno_head
;
3355 assert(c_segno
>= 0 && c_segno
< c_segments_limit
);
3357 c_free_segno_head
= (uint32_t)c_segments
[c_segno
].c_segno
;
3360 * do the rest of the bookkeeping now while we're still behind
3361 * the list lock and grab our generation id now into a local
3362 * so that we can install it once we have the c_seg allocated
3365 if (c_segment_count
> c_segment_count_max
) {
3366 c_segment_count_max
= c_segment_count
;
3369 lck_mtx_unlock_always(c_list_lock
);
3371 c_seg
= zalloc_flags(compressor_segment_zone
, Z_WAITOK
| Z_ZERO
);
3373 c_seg
->c_store
.c_buffer
= (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno
);
3375 lck_mtx_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, LCK_ATTR_NULL
);
3377 c_seg
->c_state
= C_IS_EMPTY
;
3378 c_seg
->c_firstemptyslot
= C_SLOT_MAX_INDEX
;
3379 c_seg
->c_mysegno
= c_segno
;
3381 lck_mtx_lock_spin_always(c_list_lock
);
3383 c_seg_switch_state(c_seg
, C_IS_FILLING
, FALSE
);
3384 c_segments
[c_segno
].c_seg
= c_seg
;
3385 assert(c_segments
[c_segno
].c_segno
> c_segments_available
);
3386 lck_mtx_unlock_always(c_list_lock
);
3388 *current_chead
= c_seg
;
3390 #if DEVELOPMENT || DEBUG
3391 C_SEG_MAKE_WRITEABLE(c_seg
);
3394 c_seg_alloc_nextslot(c_seg
);
3396 size_to_populate
= C_SEG_ALLOCSIZE
- C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
);
3398 if (size_to_populate
) {
3399 min_needed
= PAGE_SIZE
+ (C_SEG_ALLOCSIZE
- C_SEG_BUFSIZE
);
3401 if (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
) < (unsigned) min_needed
) {
3402 if (size_to_populate
> C_SEG_MAX_POPULATE_SIZE
) {
3403 size_to_populate
= C_SEG_MAX_POPULATE_SIZE
;
3406 OSAddAtomic64(size_to_populate
/ PAGE_SIZE
, &vm_pageout_vminfo
.vm_compressor_pages_grabbed
);
3408 kernel_memory_populate(compressor_map
,
3409 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
],
3412 VM_KERN_MEMORY_COMPRESSOR
);
3414 size_to_populate
= 0;
3417 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3419 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3421 if (size_to_populate
) {
3422 c_seg
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(size_to_populate
);
3428 #if DEVELOPMENT || DEBUG
3430 extern boolean_t memorystatus_freeze_to_memory
;
3431 #endif /* CONFIG_FREEZE */
3432 #endif /* DEVELOPMENT || DEBUG */
3435 c_current_seg_filled(c_segment_t c_seg
, c_segment_t
*current_chead
)
3437 uint32_t unused_bytes
;
3438 uint32_t offset_to_depopulate
;
3439 int new_state
= C_ON_AGE_Q
;
3442 boolean_t head_insert
= FALSE
;
3444 unused_bytes
= trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
));
3447 offset_to_depopulate
= C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_nextoffset
)));
3450 * release the extra physical page(s) at the end of the segment
3452 lck_mtx_unlock_always(&c_seg
->c_lock
);
3454 kernel_memory_depopulate(
3456 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[offset_to_depopulate
],
3459 VM_KERN_MEMORY_COMPRESSOR
);
3461 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3463 c_seg
->c_populated_offset
= offset_to_depopulate
;
3465 assert(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
) <= C_SEG_BUFSIZE
);
3467 #if DEVELOPMENT || DEBUG
3469 boolean_t c_seg_was_busy
= FALSE
;
3471 if (!c_seg
->c_busy
) {
3474 c_seg_was_busy
= TRUE
;
3477 lck_mtx_unlock_always(&c_seg
->c_lock
);
3479 C_SEG_WRITE_PROTECT(c_seg
);
3481 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3483 if (c_seg_was_busy
== FALSE
) {
3484 C_SEG_WAKEUP_DONE(c_seg
);
3490 if (current_chead
== (c_segment_t
*) &(freezer_context_global
.freezer_ctx_chead
) &&
3491 VM_CONFIG_SWAP_IS_PRESENT
&&
3492 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
3493 #if DEVELOPMENT || DEBUG
3494 && !memorystatus_freeze_to_memory
3495 #endif /* DEVELOPMENT || DEBUG */
3497 new_state
= C_ON_SWAPOUT_Q
;
3499 #endif /* CONFIG_FREEZE */
3501 if (vm_darkwake_mode
== TRUE
) {
3502 new_state
= C_ON_SWAPOUT_Q
;
3506 clock_get_system_nanotime(&sec
, &nsec
);
3507 c_seg
->c_creation_ts
= (uint32_t)sec
;
3509 lck_mtx_lock_spin_always(c_list_lock
);
3511 c_seg
->c_generation_id
= c_generation_id
++;
3512 c_seg_switch_state(c_seg
, new_state
, head_insert
);
3515 if (c_seg
->c_state
== C_ON_SWAPOUT_Q
) {
3517 * darkwake and freezer can't co-exist together
3518 * We'll need to fix this accounting as a start.
3520 assert(vm_darkwake_mode
== FALSE
);
3521 c_seg_update_task_owner(c_seg
, freezer_context_global
.freezer_ctx_task
);
3522 freezer_context_global
.freezer_ctx_swapped_bytes
+= c_seg
->c_bytes_used
;
3524 #endif /* CONFIG_FREEZE */
3526 if (c_seg
->c_state
== C_ON_AGE_Q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
3528 assert(c_seg
->c_task_owner
== NULL
);
3529 #endif /* CONFIG_FREEZE */
3530 c_seg_need_delayed_compaction(c_seg
, TRUE
);
3533 lck_mtx_unlock_always(c_list_lock
);
3535 if (c_seg
->c_state
== C_ON_SWAPOUT_Q
) {
3537 * Darkwake and Freeze configs always
3538 * wake up the swapout thread because
3539 * the compactor thread that normally handles
3540 * it may not be running as much in these
3543 thread_wakeup((event_t
)&c_swapout_list_head
);
3546 *current_chead
= NULL
;
3551 * returns with c_seg locked
3554 c_seg_swapin_requeue(c_segment_t c_seg
, boolean_t has_data
, boolean_t minor_compact_ok
, boolean_t age_on_swapin_q
)
3559 clock_get_system_nanotime(&sec
, &nsec
);
3561 lck_mtx_lock_spin_always(c_list_lock
);
3562 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3564 assert(c_seg
->c_busy_swapping
);
3565 assert(c_seg
->c_busy
);
3567 c_seg
->c_busy_swapping
= 0;
3569 if (c_seg
->c_overage_swap
== TRUE
) {
3570 c_overage_swapped_count
--;
3571 c_seg
->c_overage_swap
= FALSE
;
3573 if (has_data
== TRUE
) {
3574 if (age_on_swapin_q
== TRUE
) {
3575 c_seg_switch_state(c_seg
, C_ON_SWAPPEDIN_Q
, FALSE
);
3577 c_seg_switch_state(c_seg
, C_ON_AGE_Q
, FALSE
);
3580 if (minor_compact_ok
== TRUE
&& !c_seg
->c_on_minorcompact_q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
3581 c_seg_need_delayed_compaction(c_seg
, TRUE
);
3584 c_seg
->c_store
.c_buffer
= (int32_t*) NULL
;
3585 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
3587 c_seg_switch_state(c_seg
, C_ON_BAD_Q
, FALSE
);
3589 c_seg
->c_swappedin_ts
= (uint32_t)sec
;
3591 lck_mtx_unlock_always(c_list_lock
);
3597 * c_seg has to be locked and is returned locked if the c_seg isn't freed
3598 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
3599 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
3603 c_seg_swapin(c_segment_t c_seg
, boolean_t force_minor_compaction
, boolean_t age_on_swapin_q
)
3605 vm_offset_t addr
= 0;
3606 uint32_t io_size
= 0;
3609 assert(C_SEG_IS_ONDISK(c_seg
));
3611 #if !CHECKSUM_THE_SWAP
3612 c_seg_trim_tail(c_seg
);
3614 io_size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
3615 f_offset
= c_seg
->c_store
.c_swap_handle
;
3618 c_seg
->c_busy_swapping
= 1;
3621 * This thread is likely going to block for I/O.
3622 * Make sure it is ready to run when the I/O completes because
3623 * it needs to clear the busy bit on the c_seg so that other
3624 * waiting threads can make progress too. To do that, boost
3625 * the rwlock_count so that the priority is boosted.
3627 set_thread_rwlock_boost();
3628 lck_mtx_unlock_always(&c_seg
->c_lock
);
3630 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3632 addr
= (vm_offset_t
)C_SEG_BUFFER_ADDRESS(c_seg
->c_mysegno
);
3633 c_seg
->c_store
.c_buffer
= (int32_t*) addr
;
3635 kernel_memory_populate(compressor_map
, addr
, io_size
, KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
3637 if (vm_swap_get(c_seg
, f_offset
, io_size
) != KERN_SUCCESS
) {
3638 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3640 kernel_memory_depopulate(compressor_map
, addr
, io_size
, KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
3642 c_seg_swapin_requeue(c_seg
, FALSE
, TRUE
, age_on_swapin_q
);
3645 vm_swap_decrypt(c_seg
);
3646 #endif /* ENCRYPTED_SWAP */
3648 #if CHECKSUM_THE_SWAP
3649 if (c_seg
->cseg_swap_size
!= io_size
) {
3650 panic("swapin size doesn't match swapout size");
3653 if (c_seg
->cseg_hash
!= vmc_hash((char*) c_seg
->c_store
.c_buffer
, (int)io_size
)) {
3654 panic("c_seg_swapin - Swap hash mismatch\n");
3656 #endif /* CHECKSUM_THE_SWAP */
3658 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
3660 c_seg_swapin_requeue(c_seg
, TRUE
, force_minor_compaction
== TRUE
? FALSE
: TRUE
, age_on_swapin_q
);
3664 * c_seg_swapin_requeue() returns with the c_seg lock held.
3666 if (!lck_mtx_try_lock_spin_always(c_list_lock
)) {
3667 assert(c_seg
->c_busy
);
3669 lck_mtx_unlock_always(&c_seg
->c_lock
);
3670 lck_mtx_lock_spin_always(c_list_lock
);
3671 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3674 if (c_seg
->c_task_owner
) {
3675 c_seg_update_task_owner(c_seg
, NULL
);
3678 lck_mtx_unlock_always(c_list_lock
);
3680 OSAddAtomic(c_seg
->c_slots_used
, &c_segment_pages_compressed_incore
);
3681 #endif /* CONFIG_FREEZE */
3683 OSAddAtomic64(c_seg
->c_bytes_used
, &compressor_bytes_used
);
3685 if (force_minor_compaction
== TRUE
) {
3686 if (c_seg_minor_compaction_and_unlock(c_seg
, FALSE
)) {
3688 * c_seg was completely empty so it was freed,
3689 * so be careful not to reference it again
3691 * Drop the rwlock_count so that the thread priority
3692 * is returned back to where it is supposed to be.
3694 clear_thread_rwlock_boost();
3698 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
3701 C_SEG_WAKEUP_DONE(c_seg
);
3704 * Drop the rwlock_count so that the thread priority
3705 * is returned back to where it is supposed to be.
3707 clear_thread_rwlock_boost();
3714 c_segment_sv_hash_drop_ref(int hash_indx
)
3716 struct c_sv_hash_entry o_sv_he
, n_sv_he
;
3719 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_indx
].he_record
;
3721 n_sv_he
.he_ref
= o_sv_he
.he_ref
- 1;
3722 n_sv_he
.he_data
= o_sv_he
.he_data
;
3724 if (OSCompareAndSwap64((UInt64
)o_sv_he
.he_record
, (UInt64
)n_sv_he
.he_record
, (UInt64
*) &c_segment_sv_hash_table
[hash_indx
].he_record
) == TRUE
) {
3725 if (n_sv_he
.he_ref
== 0) {
3726 OSAddAtomic(-1, &c_segment_svp_in_hash
);
3735 c_segment_sv_hash_insert(uint32_t data
)
3739 struct c_sv_hash_entry o_sv_he
, n_sv_he
;
3740 boolean_t got_ref
= FALSE
;
3743 OSAddAtomic(1, &c_segment_svp_zero_compressions
);
3745 OSAddAtomic(1, &c_segment_svp_nonzero_compressions
);
3748 hash_sindx
= data
& C_SV_HASH_MASK
;
3750 for (misses
= 0; misses
< C_SV_HASH_MAX_MISS
; misses
++) {
3751 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_sindx
].he_record
;
3753 while (o_sv_he
.he_data
== data
|| o_sv_he
.he_ref
== 0) {
3754 n_sv_he
.he_ref
= o_sv_he
.he_ref
+ 1;
3755 n_sv_he
.he_data
= data
;
3757 if (OSCompareAndSwap64((UInt64
)o_sv_he
.he_record
, (UInt64
)n_sv_he
.he_record
, (UInt64
*) &c_segment_sv_hash_table
[hash_sindx
].he_record
) == TRUE
) {
3758 if (n_sv_he
.he_ref
== 1) {
3759 OSAddAtomic(1, &c_segment_svp_in_hash
);
3764 o_sv_he
.he_record
= c_segment_sv_hash_table
[hash_sindx
].he_record
;
3766 if (got_ref
== TRUE
) {
3771 if (hash_sindx
== C_SV_HASH_SIZE
) {
3775 if (got_ref
== FALSE
) {
3783 #if RECORD_THE_COMPRESSED_DATA
3786 c_compressed_record_data(char *src
, int c_size
)
3788 if ((c_compressed_record_cptr
+ c_size
+ 4) >= c_compressed_record_ebuf
) {
3789 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
3792 *(int *)((void *)c_compressed_record_cptr
) = c_size
;
3794 c_compressed_record_cptr
+= 4;
3796 memcpy(c_compressed_record_cptr
, src
, c_size
);
3797 c_compressed_record_cptr
+= c_size
;
3803 c_compress_page(char *src
, c_slot_mapping_t slot_ptr
, c_segment_t
*current_chead
, char *scratch_buf
)
3806 int c_rounded_size
= 0;
3811 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START
, *current_chead
, 0, 0, 0, 0);
3813 if ((c_seg
= c_seg_allocate(current_chead
)) == NULL
) {
3817 * returns with c_seg lock held
3818 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
3819 * c_nextslot has been allocated and
3820 * c_store.c_buffer populated
3822 assert(c_seg
->c_state
== C_IS_FILLING
);
3824 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
3826 C_SLOT_ASSERT_PACKABLE(slot_ptr
);
3827 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(slot_ptr
);
3829 cs
->c_offset
= c_seg
->c_nextoffset
;
3831 max_csize
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)cs
->c_offset
);
3833 if (max_csize
> PAGE_SIZE
) {
3834 max_csize
= PAGE_SIZE
;
3837 #if CHECKSUM_THE_DATA
3838 cs
->c_hash_data
= vmc_hash(src
, PAGE_SIZE
);
3840 boolean_t incomp_copy
= FALSE
;
3841 int max_csize_adj
= (max_csize
- 4);
3843 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC
) {
3844 #if defined(__arm__) || defined(__arm64__)
3845 uint16_t ccodec
= CINVALID
;
3846 uint32_t inline_popcount
;
3847 if (max_csize
>= C_SEG_OFFSET_ALIGNMENT_BOUNDARY
) {
3848 c_size
= metacompressor((const uint8_t *) src
,
3849 (uint8_t *) &c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3850 max_csize_adj
, &ccodec
,
3851 scratch_buf
, &incomp_copy
, &inline_popcount
);
3852 #if __ARM_WKDM_POPCNT__
3853 cs
->c_inline_popcount
= inline_popcount
;
3855 assert(inline_popcount
== C_SLOT_NO_POPCOUNT
);
3858 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
3859 if (c_size
> max_csize_adj
) {
3866 assert(ccodec
== CCWK
|| ccodec
== CCLZ4
);
3867 cs
->c_codec
= ccodec
;
3870 #if defined(__arm__) || defined(__arm64__)
3873 #if defined(__arm64__)
3874 __unreachable_ok_push
3875 if (PAGE_SIZE
== 4096) {
3876 c_size
= WKdm_compress_4k((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3877 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3879 c_size
= WKdm_compress_16k((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3880 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3882 __unreachable_ok_pop
3884 c_size
= WKdm_compress_new((const WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
3885 (WK_word
*)(uintptr_t)scratch_buf
, max_csize_adj
);
3888 assertf(((c_size
<= max_csize_adj
) && (c_size
>= -1)),
3889 "c_size invalid (%d, %d), cur compressions: %d", c_size
, max_csize_adj
, c_segment_pages_compressed
);
3892 if (max_csize
< PAGE_SIZE
) {
3893 c_current_seg_filled(c_seg
, current_chead
);
3894 assert(*current_chead
== NULL
);
3896 lck_mtx_unlock_always(&c_seg
->c_lock
);
3897 /* TODO: it may be worth requiring codecs to distinguish
3898 * between incompressible inputs and failures due to
3899 * budget exhaustion.
3901 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3906 if (incomp_copy
== FALSE
) {
3907 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
3910 OSAddAtomic(1, &c_segment_noncompressible_pages
);
3911 } else if (c_size
== 0) {
3915 * special case - this is a page completely full of a single 32 bit value
3917 hash_index
= c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src
);
3919 if (hash_index
!= -1) {
3920 slot_ptr
->s_cindx
= hash_index
;
3921 slot_ptr
->s_cseg
= C_SV_CSEG_ID
;
3923 OSAddAtomic(1, &c_segment_svp_hash_succeeded
);
3924 #if RECORD_THE_COMPRESSED_DATA
3925 c_compressed_record_data(src
, 4);
3927 goto sv_compression
;
3931 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
3933 OSAddAtomic(1, &c_segment_svp_hash_failed
);
3936 #if RECORD_THE_COMPRESSED_DATA
3937 c_compressed_record_data((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3939 #if CHECKSUM_THE_COMPRESSED_DATA
3940 cs
->c_hash_compressed_data
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3942 #if POPCOUNT_THE_COMPRESSED_DATA
3943 cs
->c_pop_cdata
= vmc_pop((uintptr_t) &c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
3945 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
3947 PACK_C_SIZE(cs
, c_size
);
3948 c_seg
->c_bytes_used
+= c_rounded_size
;
3949 c_seg
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
3950 c_seg
->c_slots_used
++;
3952 slot_ptr
->s_cindx
= c_seg
->c_nextslot
++;
3953 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
3954 slot_ptr
->s_cseg
= c_seg
->c_mysegno
+ 1;
3957 if (c_seg
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
3958 c_current_seg_filled(c_seg
, current_chead
);
3959 assert(*current_chead
== NULL
);
3961 lck_mtx_unlock_always(&c_seg
->c_lock
);
3963 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
3965 #if RECORD_THE_COMPRESSED_DATA
3966 if ((c_compressed_record_cptr
- c_compressed_record_sbuf
) >= C_SEG_ALLOCSIZE
) {
3967 c_compressed_record_write(c_compressed_record_sbuf
, (int)(c_compressed_record_cptr
- c_compressed_record_sbuf
));
3968 c_compressed_record_cptr
= c_compressed_record_sbuf
;
3972 OSAddAtomic64(c_size
, &c_segment_compressed_bytes
);
3973 OSAddAtomic64(c_rounded_size
, &compressor_bytes_used
);
3975 OSAddAtomic64(PAGE_SIZE
, &c_segment_input_bytes
);
3977 OSAddAtomic(1, &c_segment_pages_compressed
);
3979 OSAddAtomic(1, &c_segment_pages_compressed_incore
);
3980 #endif /* CONFIG_FREEZE */
3981 OSAddAtomic(1, &sample_period_compression_count
);
3983 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END
, *current_chead
, c_size
, c_segment_input_bytes
, c_segment_compressed_bytes
, 0);
3989 sv_decompress(int32_t *ddst
, int32_t pattern
)
3991 // assert(__builtin_constant_p(PAGE_SIZE) != 0);
3992 #if defined(__x86_64__)
3993 memset_word(ddst
, pattern
, PAGE_SIZE
/ sizeof(int32_t));
3994 #elif defined(__arm64__)
3995 assert((PAGE_SIZE
% 128) == 0);
3997 fill32_dczva((addr64_t
)ddst
, PAGE_SIZE
);
3999 fill32_nt((addr64_t
)ddst
, PAGE_SIZE
, pattern
);
4004 /* Unroll the pattern fill loop 4x to encourage the
4005 * compiler to emit NEON stores, cf.
4006 * <rdar://problem/25839866> Loop autovectorization
4009 /* * We use separate loops for each PAGE_SIZE
4010 * to allow the autovectorizer to engage, as PAGE_SIZE
4011 * may not be a constant.
4014 __unreachable_ok_push
4015 if (PAGE_SIZE
== 4096) {
4016 for (i
= 0; i
< (4096U / sizeof(int32_t)); i
+= 4) {
4023 assert(PAGE_SIZE
== 16384);
4024 for (i
= 0; i
< (int)(16384U / sizeof(int32_t)); i
+= 4) {
4031 __unreachable_ok_pop
4036 c_decompress_page(char *dst
, volatile c_slot_mapping_t slot_ptr
, int flags
, int *zeroslot
)
4045 boolean_t need_unlock
= TRUE
;
4046 boolean_t consider_defragmenting
= FALSE
;
4047 boolean_t kdp_mode
= FALSE
;
4049 if (__improbable(flags
& C_KDP
)) {
4051 panic("C_KDP passed to decompress page from outside of debugger context");
4054 assert((flags
& C_KEEP
) == C_KEEP
);
4055 assert((flags
& C_DONT_BLOCK
) == C_DONT_BLOCK
);
4057 if ((flags
& (C_DONT_BLOCK
| C_KEEP
)) != (C_DONT_BLOCK
| C_KEEP
)) {
4066 if (__probable(!kdp_mode
)) {
4067 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4069 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock
)) {
4076 * if hibernation is enabled, it indicates (via a call
4077 * to 'vm_decompressor_lock' that no further
4078 * decompressions are allowed once it reaches
4079 * the point of flushing all of the currently dirty
4080 * anonymous memory through the compressor and out
4081 * to disk... in this state we allow freeing of compressed
4082 * pages and must honor the C_DONT_BLOCK case
4084 if (__improbable(dst
&& decompressions_blocked
== TRUE
)) {
4085 if (flags
& C_DONT_BLOCK
) {
4086 if (__probable(!kdp_mode
)) {
4087 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4094 * it's safe to atomically assert and block behind the
4095 * lock held in shared mode because "decompressions_blocked" is
4096 * only set and cleared and the thread_wakeup done when the lock
4097 * is held exclusively
4099 assert_wait((event_t
)&decompressions_blocked
, THREAD_UNINT
);
4101 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4103 thread_block(THREAD_CONTINUE_NULL
);
4108 /* s_cseg is actually "segno+1" */
4109 c_segno
= slot_ptr
->s_cseg
- 1;
4111 if (__improbable(c_segno
>= c_segments_available
)) {
4112 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
4113 c_segno
, c_segments_available
, slot_ptr
, *(int *)((void *)slot_ptr
));
4116 if (__improbable(c_segments
[c_segno
].c_segno
< c_segments_available
)) {
4117 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
4118 c_segno
, slot_ptr
, *(int *)((void *)slot_ptr
));
4121 c_seg
= c_segments
[c_segno
].c_seg
;
4123 if (__probable(!kdp_mode
)) {
4124 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4126 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg
->c_lock
)) {
4131 assert(c_seg
->c_state
!= C_IS_EMPTY
&& c_seg
->c_state
!= C_IS_FREE
);
4133 if (dst
== NULL
&& c_seg
->c_busy_swapping
) {
4134 assert(c_seg
->c_busy
);
4136 goto bypass_busy_check
;
4138 if (flags
& C_DONT_BLOCK
) {
4139 if (c_seg
->c_busy
|| (C_SEG_IS_ONDISK(c_seg
) && dst
)) {
4146 if (c_seg
->c_busy
) {
4147 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4149 c_seg_wait_on_busy(c_seg
);
4155 c_indx
= slot_ptr
->s_cindx
;
4157 if (__improbable(c_indx
>= c_seg
->c_nextslot
)) {
4158 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4159 c_indx
, c_seg
->c_nextslot
, c_seg
, slot_ptr
, *(int *)((void *)slot_ptr
));
4162 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
4164 c_size
= UNPACK_C_SIZE(cs
);
4166 if (__improbable(c_size
== 0)) {
4167 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4168 c_seg
, slot_ptr
, *(int *)((void *)slot_ptr
));
4171 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
4174 uint32_t age_of_cseg
;
4175 clock_sec_t cur_ts_sec
;
4176 clock_nsec_t cur_ts_nsec
;
4178 if (C_SEG_IS_ONDISK(c_seg
)) {
4180 if (freezer_incore_cseg_acct
) {
4181 if ((c_seg
->c_slots_used
+ c_segment_pages_compressed_incore
) >= c_segment_pages_compressed_nearing_limit
) {
4182 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4183 lck_mtx_unlock_always(&c_seg
->c_lock
);
4185 memorystatus_kill_on_VM_compressor_space_shortage(FALSE
/* async */);
4190 uint32_t incore_seg_count
= c_segment_count
- c_swappedout_count
- c_swappedout_sparse_count
;
4191 if ((incore_seg_count
+ 1) >= c_segments_nearing_limit
) {
4192 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4193 lck_mtx_unlock_always(&c_seg
->c_lock
);
4195 memorystatus_kill_on_VM_compressor_space_shortage(FALSE
/* async */);
4200 #endif /* CONFIG_FREEZE */
4201 assert(kdp_mode
== FALSE
);
4202 retval
= c_seg_swapin(c_seg
, FALSE
, TRUE
);
4203 assert(retval
== 0);
4207 if (c_seg
->c_state
== C_ON_BAD_Q
) {
4208 assert(c_seg
->c_store
.c_buffer
== NULL
);
4215 #if POPCOUNT_THE_COMPRESSED_DATA
4217 uintptr_t csvaddr
= (uintptr_t) &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
4218 if (cs
->c_pop_cdata
!= (csvpop
= vmc_pop(csvaddr
, c_size
))) {
4219 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop
- cs
->c_pop_cdata
), (void *)csvaddr
, (void *) kvtophys(csvaddr
), c_seg
, cs
, cs
->c_offset
, c_size
, csvpop
, cs
->c_pop_cdata
);
4223 #if CHECKSUM_THE_COMPRESSED_DATA
4225 if (cs
->c_hash_compressed_data
!= (csvhash
= vmc_hash((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))) {
4226 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg
, cs
, c_size
, cs
->c_hash_compressed_data
, csvhash
);
4229 if (c_rounded_size
== PAGE_SIZE
) {
4231 * page wasn't compressible... just copy it out
4233 memcpy(dst
, &c_seg
->c_store
.c_buffer
[cs
->c_offset
], PAGE_SIZE
);
4234 } else if (c_size
== 4) {
4239 * page was populated with a single value
4240 * that didn't fit into our fast hash
4241 * so we packed it in as a single non-compressed value
4242 * that we need to populate the page with
4244 dptr
= (int32_t *)(uintptr_t)dst
;
4245 data
= *(int32_t *)(&c_seg
->c_store
.c_buffer
[cs
->c_offset
]);
4246 sv_decompress(dptr
, data
);
4251 if (__probable(!kdp_mode
)) {
4253 * we're behind the c_seg lock held in spin mode
4254 * which means pre-emption is disabled... therefore
4255 * the following sequence is atomic and safe
4257 my_cpu_no
= cpu_number();
4259 assert(my_cpu_no
< compressor_cpus
);
4261 scratch_buf
= &compressor_scratch_bufs
[my_cpu_no
* vm_compressor_get_decode_scratch_size()];
4263 scratch_buf
= kdp_compressor_scratch_buf
;
4266 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC
) {
4267 #if defined(__arm__) || defined(__arm64__)
4268 uint16_t c_codec
= cs
->c_codec
;
4269 uint32_t inline_popcount
;
4270 if (!metadecompressor((const uint8_t *) &c_seg
->c_store
.c_buffer
[cs
->c_offset
],
4271 (uint8_t *)dst
, c_size
, c_codec
, (void *)scratch_buf
, &inline_popcount
)) {
4274 #if __ARM_WKDM_POPCNT__
4275 if (inline_popcount
!= cs
->c_inline_popcount
) {
4277 * The codec choice in compression and
4278 * decompression must agree, so there
4279 * should never be a disagreement in
4280 * whether an inline population count
4283 assert(inline_popcount
!= C_SLOT_NO_POPCOUNT
);
4284 assert(cs
->c_inline_popcount
!= C_SLOT_NO_POPCOUNT
);
4285 printf("decompression failure from physical region %llx+%05x: popcount mismatch (%d != %d)\n",
4286 (unsigned long long)kvtophys((uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
]), c_size
,
4288 cs
->c_inline_popcount
);
4292 assert(inline_popcount
== C_SLOT_NO_POPCOUNT
);
4293 #endif /* __ARM_WKDM_POPCNT__ */
4297 #if defined(__arm64__)
4298 __unreachable_ok_push
4299 if (PAGE_SIZE
== 4096) {
4300 WKdm_decompress_4k((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
4301 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
4303 WKdm_decompress_16k((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
4304 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
4306 __unreachable_ok_pop
4308 WKdm_decompress_new((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
4309 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
4314 #if CHECKSUM_THE_DATA
4315 if (cs
->c_hash_data
!= vmc_hash(dst
, PAGE_SIZE
)) {
4316 #if defined(__arm__) || defined(__arm64__)
4317 int32_t *dinput
= &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
4318 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs
, cs
->c_hash_data
, cs
->c_offset
, c_size
, c_rounded_size
, cs
->c_codec
, *dinput
, *(dinput
+ 1), *(dinput
+ 2));
4320 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs
, cs
->c_hash_data
, cs
->c_offset
, c_size
);
4324 if (c_seg
->c_swappedin_ts
== 0 && !kdp_mode
) {
4325 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
4327 age_of_cseg
= (uint32_t)cur_ts_sec
- c_seg
->c_creation_ts
;
4328 if (age_of_cseg
< DECOMPRESSION_SAMPLE_MAX_AGE
) {
4329 OSAddAtomic(1, &age_of_decompressions_during_sample_period
[age_of_cseg
]);
4331 OSAddAtomic(1, &overage_decompressions_during_sample_period
);
4334 OSAddAtomic(1, &sample_period_decompression_count
);
4340 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
4342 if (C_SEG_IS_ONDISK(c_seg
)) {
4344 * The compression sweep feature will push out anonymous pages to disk
4345 * without going through the freezer path and so those c_segs, while
4346 * swapped out, won't have an owner.
4348 if (c_seg
->c_task_owner
) {
4349 task_update_frozen_to_swap_acct(c_seg
->c_task_owner
, PAGE_SIZE_64
, DEBIT_FROM_SWAP
);
4353 * We are freeing a page in swap without swapping it in. We bump the in-core
4354 * count here to simulate a swapin of a page so that we can accurately
4355 * decrement it below.
4357 OSAddAtomic(1, &c_segment_pages_compressed_incore
);
4360 #endif /* CONFIG_FREEZE */
4362 if (flags
& C_KEEP
) {
4366 assert(kdp_mode
== FALSE
);
4368 c_seg
->c_bytes_unused
+= c_rounded_size
;
4369 c_seg
->c_bytes_used
-= c_rounded_size
;
4371 assert(c_seg
->c_slots_used
);
4372 c_seg
->c_slots_used
--;
4376 if (c_indx
< c_seg
->c_firstemptyslot
) {
4377 c_seg
->c_firstemptyslot
= c_indx
;
4380 OSAddAtomic(-1, &c_segment_pages_compressed
);
4382 OSAddAtomic(-1, &c_segment_pages_compressed_incore
);
4383 assertf(c_segment_pages_compressed_incore
>= 0, "-ve incore count %p 0x%x", c_seg
, c_segment_pages_compressed_incore
);
4384 #endif /* CONFIG_FREEZE */
4386 if (c_seg
->c_state
!= C_ON_BAD_Q
&& !(C_SEG_IS_ONDISK(c_seg
))) {
4388 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
4389 * free of a compressed page (i.e. dst == NULL)
4391 OSAddAtomic64(-c_rounded_size
, &compressor_bytes_used
);
4393 if (c_seg
->c_busy_swapping
) {
4395 * bypass case for c_busy_swapping...
4396 * let the swapin/swapout paths deal with putting
4397 * the c_seg on the minor compaction queue if needed
4399 assert(c_seg
->c_busy
);
4402 assert(!c_seg
->c_busy
);
4404 if (c_seg
->c_state
!= C_IS_FILLING
) {
4405 if (c_seg
->c_bytes_used
== 0) {
4406 if (!(C_SEG_IS_ONDISK(c_seg
))) {
4407 int pages_populated
;
4409 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
4410 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
4412 if (pages_populated
) {
4413 assert(c_seg
->c_state
!= C_ON_BAD_Q
);
4414 assert(c_seg
->c_store
.c_buffer
!= NULL
);
4417 lck_mtx_unlock_always(&c_seg
->c_lock
);
4419 kernel_memory_depopulate(compressor_map
,
4420 (vm_offset_t
) c_seg
->c_store
.c_buffer
,
4421 pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
, VM_KERN_MEMORY_COMPRESSOR
);
4423 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4424 C_SEG_WAKEUP_DONE(c_seg
);
4426 if (!c_seg
->c_on_minorcompact_q
&& c_seg
->c_state
!= C_ON_SWAPOUT_Q
&& c_seg
->c_state
!= C_ON_SWAPIO_Q
) {
4427 c_seg_need_delayed_compaction(c_seg
, FALSE
);
4430 if (c_seg
->c_state
!= C_ON_SWAPPEDOUTSPARSE_Q
) {
4431 c_seg_move_to_sparse_list(c_seg
);
4432 consider_defragmenting
= TRUE
;
4435 } else if (c_seg
->c_on_minorcompact_q
) {
4436 assert(c_seg
->c_state
!= C_ON_BAD_Q
);
4437 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg
));
4439 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg
)) {
4440 c_seg_try_minor_compaction_and_unlock(c_seg
);
4441 need_unlock
= FALSE
;
4443 } else if (!(C_SEG_IS_ONDISK(c_seg
))) {
4444 if (c_seg
->c_state
!= C_ON_BAD_Q
&& c_seg
->c_state
!= C_ON_SWAPOUT_Q
&& c_seg
->c_state
!= C_ON_SWAPIO_Q
&&
4445 C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
4446 c_seg_need_delayed_compaction(c_seg
, FALSE
);
4448 } else if (c_seg
->c_state
!= C_ON_SWAPPEDOUTSPARSE_Q
&& C_SEG_ONDISK_IS_SPARSE(c_seg
)) {
4449 c_seg_move_to_sparse_list(c_seg
);
4450 consider_defragmenting
= TRUE
;
4454 if (__improbable(kdp_mode
)) {
4458 if (need_unlock
== TRUE
) {
4459 lck_mtx_unlock_always(&c_seg
->c_lock
);
4462 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4464 if (consider_defragmenting
== TRUE
) {
4465 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE
);
4469 if ((c_minor_count
&& COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) {
4470 vm_wake_compactor_swapper();
4479 vm_compressor_get(ppnum_t pn
, int *slot
, int flags
)
4481 c_slot_mapping_t slot_ptr
;
4486 dst
= pmap_map_compressor_page(pn
);
4487 slot_ptr
= (c_slot_mapping_t
)slot
;
4489 assert(dst
!= NULL
);
4491 if (slot_ptr
->s_cseg
== C_SV_CSEG_ID
) {
4496 * page was populated with a single value
4497 * that found a home in our hash table
4498 * grab that value from the hash and populate the page
4499 * that we need to populate the page with
4501 dptr
= (int32_t *)(uintptr_t)dst
;
4502 data
= c_segment_sv_hash_table
[slot_ptr
->s_cindx
].he_data
;
4503 sv_decompress(dptr
, data
);
4504 if (!(flags
& C_KEEP
)) {
4505 c_segment_sv_hash_drop_ref(slot_ptr
->s_cindx
);
4507 OSAddAtomic(-1, &c_segment_pages_compressed
);
4509 OSAddAtomic(-1, &c_segment_pages_compressed_incore
);
4510 assertf(c_segment_pages_compressed_incore
>= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore
);
4511 #endif /* CONFIG_FREEZE */
4515 OSAddAtomic(1, &c_segment_svp_nonzero_decompressions
);
4517 OSAddAtomic(1, &c_segment_svp_zero_decompressions
);
4520 pmap_unmap_compressor_page(pn
, dst
);
4524 retval
= c_decompress_page(dst
, slot_ptr
, flags
, &zeroslot
);
4527 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
4528 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
4534 pmap_unmap_compressor_page(pn
, dst
);
4537 * returns 0 if we successfully decompressed a page from a segment already in memory
4538 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
4539 * returns -1 if we encountered an error swapping in the segment - decompression failed
4540 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
4545 #if DEVELOPMENT || DEBUG
4548 vm_compressor_inject_error(int *slot
)
4550 c_slot_mapping_t slot_ptr
= (c_slot_mapping_t
)slot
;
4552 /* No error detection for single-value compression. */
4553 if (slot_ptr
->s_cseg
== C_SV_CSEG_ID
) {
4554 printf("%s(): cannot inject errors in SV-compressed pages\n", __func__
);
4558 /* s_cseg is actually "segno+1" */
4559 const uint32_t c_segno
= slot_ptr
->s_cseg
- 1;
4561 assert(c_segno
< c_segments_available
);
4562 assert(c_segments
[c_segno
].c_segno
>= c_segments_available
);
4564 const c_segment_t c_seg
= c_segments
[c_segno
].c_seg
;
4566 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4568 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4569 assert(c_seg
->c_state
!= C_IS_EMPTY
&& c_seg
->c_state
!= C_IS_FREE
);
4571 const uint16_t c_indx
= slot_ptr
->s_cindx
;
4572 assert(c_indx
< c_seg
->c_nextslot
);
4575 * To safely make this segment temporarily writable, we need to mark
4576 * the segment busy, which allows us to release the segment lock.
4578 while (c_seg
->c_busy
) {
4579 c_seg_wait_on_busy(c_seg
);
4580 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4584 bool already_writable
= (c_seg
->c_state
== C_IS_FILLING
);
4585 if (!already_writable
) {
4587 * Protection update must be performed preemptibly, so temporarily drop
4588 * the lock. Having set c_busy will prevent most other concurrent
4591 lck_mtx_unlock_always(&c_seg
->c_lock
);
4592 C_SEG_MAKE_WRITEABLE(c_seg
);
4593 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4597 * Once we've released the lock following our c_state == C_IS_FILLING check,
4598 * c_current_seg_filled() can (re-)write-protect the segment. However, it
4599 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
4600 * can detect this by re-checking after we've reobtained the lock.
4602 if (already_writable
&& c_seg
->c_state
!= C_IS_FILLING
) {
4603 lck_mtx_unlock_always(&c_seg
->c_lock
);
4604 C_SEG_MAKE_WRITEABLE(c_seg
);
4605 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4606 already_writable
= false;
4607 /* Segment can't be freed while c_busy is set. */
4608 assert(c_seg
->c_state
!= C_IS_FILLING
);
4611 c_slot_t cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
4612 int32_t *data
= &c_seg
->c_store
.c_buffer
[cs
->c_offset
];
4613 /* assume that the compressed data holds at least one int32_t */
4614 assert(UNPACK_C_SIZE(cs
) > sizeof(*data
));
4616 * This bit is known to be in the payload of a MISS packet resulting from
4617 * the pattern used in the test pattern from decompression_failure.c.
4618 * Flipping it should result in many corrupted bits in the test page.
4620 data
[0] ^= 0x00000100;
4621 if (!already_writable
) {
4622 lck_mtx_unlock_always(&c_seg
->c_lock
);
4623 C_SEG_WRITE_PROTECT(c_seg
);
4624 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4627 C_SEG_WAKEUP_DONE(c_seg
);
4628 lck_mtx_unlock_always(&c_seg
->c_lock
);
4630 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4633 #endif /* DEVELOPMENT || DEBUG */
4636 vm_compressor_free(int *slot
, int flags
)
4638 c_slot_mapping_t slot_ptr
;
4642 assert(flags
== 0 || flags
== C_DONT_BLOCK
);
4644 slot_ptr
= (c_slot_mapping_t
)slot
;
4646 if (slot_ptr
->s_cseg
== C_SV_CSEG_ID
) {
4647 c_segment_sv_hash_drop_ref(slot_ptr
->s_cindx
);
4648 OSAddAtomic(-1, &c_segment_pages_compressed
);
4650 OSAddAtomic(-1, &c_segment_pages_compressed_incore
);
4651 assertf(c_segment_pages_compressed_incore
>= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore
);
4652 #endif /* CONFIG_FREEZE */
4657 retval
= c_decompress_page(NULL
, slot_ptr
, flags
, &zeroslot
);
4659 * returns 0 if we successfully freed the specified compressed page
4660 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
4666 assert(retval
== -2);
4674 vm_compressor_put(ppnum_t pn
, int *slot
, void **current_chead
, char *scratch_buf
)
4679 src
= pmap_map_compressor_page(pn
);
4680 assert(src
!= NULL
);
4682 retval
= c_compress_page(src
, (c_slot_mapping_t
)slot
, (c_segment_t
*)current_chead
, scratch_buf
);
4683 pmap_unmap_compressor_page(pn
, src
);
4689 vm_compressor_transfer(
4693 c_slot_mapping_t dst_slot
, src_slot
;
4698 src_slot
= (c_slot_mapping_t
) src_slot_p
;
4700 if (src_slot
->s_cseg
== C_SV_CSEG_ID
) {
4701 *dst_slot_p
= *src_slot_p
;
4705 dst_slot
= (c_slot_mapping_t
) dst_slot_p
;
4707 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4708 /* get segment for src_slot */
4709 c_seg
= c_segments
[src_slot
->s_cseg
- 1].c_seg
;
4711 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4712 /* wait if it's busy */
4713 if (c_seg
->c_busy
&& !c_seg
->c_busy_swapping
) {
4714 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4715 c_seg_wait_on_busy(c_seg
);
4718 /* find the c_slot */
4719 c_indx
= src_slot
->s_cindx
;
4720 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
4721 /* point the c_slot back to dst_slot instead of src_slot */
4722 C_SLOT_ASSERT_PACKABLE(dst_slot
);
4723 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(dst_slot
);
4725 *dst_slot_p
= *src_slot_p
;
4727 lck_mtx_unlock_always(&c_seg
->c_lock
);
4728 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4733 int freezer_finished_filling
= 0;
4736 vm_compressor_finished_filling(
4737 void **current_chead
)
4741 if ((c_seg
= *(c_segment_t
*)current_chead
) == NULL
) {
4745 assert(c_seg
->c_state
== C_IS_FILLING
);
4747 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
4749 c_current_seg_filled(c_seg
, (c_segment_t
*)current_chead
);
4751 lck_mtx_unlock_always(&c_seg
->c_lock
);
4753 freezer_finished_filling
++;
4758 * This routine is used to transfer the compressed chunks from
4759 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
4760 * by the current_chead and a new cindx within that c_seg.
4762 * Currently, this routine is only used by the "freezer backed by
4763 * compressor with swap" mode to create a series of c_segs that
4764 * only contain compressed data belonging to one task. So, we
4765 * move a task's previously compressed data into a set of new
4766 * c_segs which will also hold the task's yet to be compressed data.
4770 vm_compressor_relocate(
4771 void **current_chead
,
4774 c_slot_mapping_t slot_ptr
;
4775 c_slot_mapping_t src_slot
;
4776 uint32_t c_rounded_size
;
4782 c_segment_t c_seg_dst
= NULL
;
4783 c_segment_t c_seg_src
= NULL
;
4784 kern_return_t kr
= KERN_SUCCESS
;
4787 src_slot
= (c_slot_mapping_t
) slot_p
;
4789 if (src_slot
->s_cseg
== C_SV_CSEG_ID
) {
4791 * no need to relocate... this is a page full of a single
4792 * value which is hashed to a single entry not contained
4799 c_seg_dst
= c_seg_allocate((c_segment_t
*)current_chead
);
4801 * returns with c_seg lock held
4802 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4803 * c_nextslot has been allocated and
4804 * c_store.c_buffer populated
4806 if (c_seg_dst
== NULL
) {
4808 * Out of compression segments?
4810 kr
= KERN_RESOURCE_SHORTAGE
;
4814 assert(c_seg_dst
->c_busy
== 0);
4816 C_SEG_BUSY(c_seg_dst
);
4818 dst_slot
= c_seg_dst
->c_nextslot
;
4820 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4823 c_seg_src
= c_segments
[src_slot
->s_cseg
- 1].c_seg
;
4825 assert(c_seg_dst
!= c_seg_src
);
4827 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4829 if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src
) ||
4830 c_seg_src
->c_state
== C_IS_FILLING
) {
4832 * Skip this page if :-
4833 * a) the src c_seg is already on-disk (or on its way there)
4834 * A "thaw" can mark a process as eligible for
4835 * another freeze cycle without bringing any of
4836 * its swapped out c_segs back from disk (because
4837 * that is done on-demand).
4838 * Or, this page may be mapped elsewhere in the task's map,
4839 * and we may have marked it for swap already.
4841 * b) Or, the src c_seg is being filled by the compressor
4842 * thread. We don't want the added latency of waiting for
4843 * this c_seg in the freeze path and so we skip it.
4846 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4848 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4855 if (c_seg_src
->c_busy
) {
4856 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4857 c_seg_wait_on_busy(c_seg_src
);
4861 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4866 C_SEG_BUSY(c_seg_src
);
4868 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4870 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4872 /* find the c_slot */
4873 c_indx
= src_slot
->s_cindx
;
4875 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, c_indx
);
4877 c_size
= UNPACK_C_SIZE(c_src
);
4881 if (c_size
> (uint32_t)(C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst
->c_nextoffset
))) {
4883 * This segment is full. We need a new one.
4886 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4888 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4889 C_SEG_WAKEUP_DONE(c_seg_src
);
4890 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4894 lck_mtx_lock_spin_always(&c_seg_dst
->c_lock
);
4896 assert(c_seg_dst
->c_busy
);
4897 assert(c_seg_dst
->c_state
== C_IS_FILLING
);
4898 assert(!c_seg_dst
->c_on_minorcompact_q
);
4900 c_current_seg_filled(c_seg_dst
, (c_segment_t
*)current_chead
);
4901 assert(*current_chead
== NULL
);
4903 C_SEG_WAKEUP_DONE(c_seg_dst
);
4905 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4909 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
4914 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
4916 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
4918 * Is platform alignment actually necessary since wkdm aligns its output?
4920 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
4922 cslot_copy(c_dst
, c_src
);
4923 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
4925 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
) {
4926 c_seg_dst
->c_firstemptyslot
++;
4929 c_seg_dst
->c_slots_used
++;
4930 c_seg_dst
->c_nextslot
++;
4931 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
4932 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
4935 PACK_C_SIZE(c_src
, 0);
4937 c_seg_src
->c_bytes_used
-= c_rounded_size
;
4938 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
4940 assert(c_seg_src
->c_slots_used
);
4941 c_seg_src
->c_slots_used
--;
4943 if (c_indx
< c_seg_src
->c_firstemptyslot
) {
4944 c_seg_src
->c_firstemptyslot
= c_indx
;
4947 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
4949 PAGE_REPLACEMENT_ALLOWED(TRUE
);
4950 slot_ptr
= C_SLOT_UNPACK_PTR(c_dst
);
4951 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4952 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
4953 slot_ptr
->s_cindx
= dst_slot
;
4955 PAGE_REPLACEMENT_ALLOWED(FALSE
);
4959 lck_mtx_lock_spin_always(&c_seg_src
->c_lock
);
4961 C_SEG_WAKEUP_DONE(c_seg_src
);
4963 if (c_seg_src
->c_bytes_used
== 0 && c_seg_src
->c_state
!= C_IS_FILLING
) {
4964 if (!c_seg_src
->c_on_minorcompact_q
) {
4965 c_seg_need_delayed_compaction(c_seg_src
, FALSE
);
4969 lck_mtx_unlock_always(&c_seg_src
->c_lock
);
4973 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
4975 lck_mtx_lock_spin_always(&c_seg_dst
->c_lock
);
4977 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX_INDEX
) {
4979 * Nearing or exceeded maximum slot and offset capacity.
4981 assert(c_seg_dst
->c_busy
);
4982 assert(c_seg_dst
->c_state
== C_IS_FILLING
);
4983 assert(!c_seg_dst
->c_on_minorcompact_q
);
4985 c_current_seg_filled(c_seg_dst
, (c_segment_t
*)current_chead
);
4986 assert(*current_chead
== NULL
);
4989 C_SEG_WAKEUP_DONE(c_seg_dst
);
4991 lck_mtx_unlock_always(&c_seg_dst
->c_lock
);
4995 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
5000 #endif /* CONFIG_FREEZE */