2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_compressor.h>
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <mach/mach_host.h> /* for host_info() */
39 #include <kern/ledger.h>
41 #include <default_pager/default_pager_alerts.h>
42 #include <default_pager/default_pager_object_server.h>
44 #include <IOKit/IOHibernatePrivate.h>
47 * vm_compressor_mode has a heirarchy of control to set its value.
48 * boot-args are checked first, then device-tree, and finally
49 * the default value that is defined below. See vm_fault_init() for
50 * the boot-arg & device-tree code.
53 extern ipc_port_t min_pages_trigger_port
;
54 extern lck_mtx_t paging_segments_lock
;
55 #define PSL_LOCK() lck_mtx_lock(&paging_segments_lock)
56 #define PSL_UNLOCK() lck_mtx_unlock(&paging_segments_lock)
59 int vm_compressor_mode
= VM_PAGER_COMPRESSOR_WITH_SWAP
;
63 int vm_compression_limit
= 0;
65 extern boolean_t vm_swap_up
;
66 extern void vm_pageout_io_throttle(void);
68 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
69 extern unsigned int hash_string(char *cp
, int len
);
74 uint64_t c_offset
:C_SEG_OFFSET_BITS
,
78 unsigned int c_hash_data
;
80 #if CHECKSUM_THE_COMPRESSED_DATA
81 unsigned int c_hash_compressed_data
;
86 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
87 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
90 struct c_slot_mapping
{
91 uint32_t s_cseg
:22, /* segment number + 1 */
92 s_cindx
:10; /* index in the segment */
95 typedef struct c_slot_mapping
*c_slot_mapping_t
;
105 #define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS) >> 2)
106 #define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)
109 uint32_t c_segment_count
= 0;
111 uint64_t c_generation_id
= 0;
112 uint64_t c_generation_id_flush_barrier
;
115 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
117 boolean_t hibernate_no_swapspace
= FALSE
;
118 clock_sec_t hibernate_flushing_deadline
= 0;
121 #if TRACK_BAD_C_SEGMENTS
122 queue_head_t c_bad_list_head
;
123 uint32_t c_bad_count
= 0;
126 queue_head_t c_age_list_head
;
127 queue_head_t c_swapout_list_head
;
128 queue_head_t c_swappedin_list_head
;
129 queue_head_t c_swappedout_list_head
;
130 queue_head_t c_swappedout_sparse_list_head
;
132 uint32_t c_age_count
= 0;
133 uint32_t c_swapout_count
= 0;
134 uint32_t c_swappedin_count
= 0;
135 uint32_t c_swappedout_count
= 0;
136 uint32_t c_swappedout_sparse_count
= 0;
138 queue_head_t c_minor_list_head
;
139 uint32_t c_minor_count
= 0;
141 union c_segu
*c_segments
;
142 caddr_t c_segments_next_page
;
143 boolean_t c_segments_busy
;
144 uint32_t c_segments_available
;
145 uint32_t c_segments_limit
;
146 uint32_t c_segments_nearing_limit
;
147 uint32_t c_segment_pages_compressed
;
148 uint32_t c_segment_pages_compressed_limit
;
149 uint32_t c_segment_pages_compressed_nearing_limit
;
150 uint32_t c_free_segno_head
= (uint32_t)-1;
152 uint32_t vm_compressor_minorcompact_threshold_divisor
= 10;
153 uint32_t vm_compressor_majorcompact_threshold_divisor
= 10;
154 uint32_t vm_compressor_unthrottle_threshold_divisor
= 10;
155 uint32_t vm_compressor_catchup_threshold_divisor
= 10;
157 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
160 lck_grp_attr_t vm_compressor_lck_grp_attr
;
161 lck_attr_t vm_compressor_lck_attr
;
162 lck_grp_t vm_compressor_lck_grp
;
165 #if __i386__ || __x86_64__
166 lck_mtx_t
*c_list_lock
;
167 #else /* __i386__ || __x86_64__ */
168 lck_spin_t
*c_list_lock
;
169 #endif /* __i386__ || __x86_64__ */
171 lck_rw_t c_master_lock
;
172 boolean_t decompressions_blocked
= FALSE
;
174 zone_t compressor_segment_zone
;
175 int c_compressor_swap_trigger
= 0;
177 uint32_t compressor_cpus
;
178 char *compressor_scratch_bufs
;
181 clock_sec_t start_of_sample_period_sec
= 0;
182 clock_nsec_t start_of_sample_period_nsec
= 0;
183 clock_sec_t start_of_eval_period_sec
= 0;
184 clock_nsec_t start_of_eval_period_nsec
= 0;
185 uint32_t sample_period_decompression_count
= 0;
186 uint32_t sample_period_compression_count
= 0;
187 uint32_t last_eval_decompression_count
= 0;
188 uint32_t last_eval_compression_count
= 0;
190 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
192 uint32_t swapout_target_age
= 0;
193 uint32_t age_of_decompressions_during_sample_period
[DECOMPRESSION_SAMPLE_MAX_AGE
];
194 uint32_t overage_decompressions_during_sample_period
= 0;
196 void do_fastwake_warmup(void);
197 boolean_t fastwake_warmup
= FALSE
;
198 boolean_t fastwake_recording_in_progress
= FALSE
;
199 clock_sec_t dont_trim_until_ts
= 0;
201 uint64_t c_segment_warmup_count
;
202 uint64_t first_c_segment_to_warm_generation_id
= 0;
203 uint64_t last_c_segment_to_warm_generation_id
= 0;
204 boolean_t hibernate_flushing
= FALSE
;
206 int64_t c_segment_input_bytes
__attribute__((aligned(8))) = 0;
207 int64_t c_segment_compressed_bytes
__attribute__((aligned(8))) = 0;
208 int64_t compressor_bytes_used
__attribute__((aligned(8))) = 0;
209 uint64_t compressor_kvspace_used
__attribute__((aligned(8))) = 0;
210 uint64_t compressor_kvwaste_limit
= 0;
212 static boolean_t
compressor_needs_to_swap(void);
213 static void vm_compressor_swap_trigger_thread(void);
214 static void vm_compressor_do_delayed_compactions(boolean_t
);
215 static void vm_compressor_compact_and_swap(boolean_t
);
216 static void vm_compressor_age_swapped_in_segments(boolean_t
);
218 boolean_t
vm_compressor_low_on_space(void);
220 void compute_swapout_target_age(void);
222 boolean_t
c_seg_major_compact(c_segment_t
, c_segment_t
);
223 boolean_t
c_seg_major_compact_ok(c_segment_t
, c_segment_t
);
225 int c_seg_minor_compaction_and_unlock(c_segment_t
, boolean_t
);
226 int c_seg_do_minor_compaction_and_unlock(c_segment_t
, boolean_t
, boolean_t
, boolean_t
);
227 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
);
228 void c_seg_need_delayed_compaction(c_segment_t
);
230 void c_seg_move_to_sparse_list(c_segment_t
);
231 void c_seg_insert_into_q(queue_head_t
*, c_segment_t
);
233 boolean_t
c_seg_try_free(c_segment_t
);
234 void c_seg_free(c_segment_t
);
235 void c_seg_free_locked(c_segment_t
);
238 uint64_t vm_available_memory(void);
239 uint64_t vm_compressor_pages_compressed(void);
241 extern unsigned int dp_pages_free
, dp_pages_reserve
;
244 vm_available_memory(void)
246 return (((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE_64
);
251 vm_compressor_pages_compressed(void)
253 return (c_segment_pages_compressed
* PAGE_SIZE_64
);
258 vm_compression_available(void)
260 if ( !(COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
))
263 if (c_segments_available
>= c_segments_limit
|| c_segment_pages_compressed
>= c_segment_pages_compressed_limit
)
271 vm_compressor_low_on_space(void)
273 if ((c_segment_pages_compressed
> c_segment_pages_compressed_nearing_limit
) ||
274 (c_segment_count
> c_segments_nearing_limit
))
282 vm_wants_task_throttled(task_t task
)
284 if (task
== kernel_task
)
287 if (vm_compressor_mode
== COMPRESSED_PAGER_IS_ACTIVE
|| vm_compressor_mode
== DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
288 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
289 (unsigned int)pmap_compressed(task
->map
->pmap
) > (c_segment_pages_compressed
/ 4))
292 if (((dp_pages_free
+ dp_pages_reserve
< 2000) && VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) &&
293 get_task_resident_size(task
) > (((AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE
) / 5))
301 vm_compressor_init_locks(void)
303 lck_grp_attr_setdefault(&vm_compressor_lck_grp_attr
);
304 lck_grp_init(&vm_compressor_lck_grp
, "vm_compressor", &vm_compressor_lck_grp_attr
);
305 lck_attr_setdefault(&vm_compressor_lck_attr
);
307 lck_rw_init(&c_master_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
312 vm_decompressor_lock(void)
314 PAGE_REPLACEMENT_ALLOWED(TRUE
);
316 decompressions_blocked
= TRUE
;
318 PAGE_REPLACEMENT_ALLOWED(FALSE
);
322 vm_decompressor_unlock(void)
324 PAGE_REPLACEMENT_ALLOWED(TRUE
);
326 decompressions_blocked
= FALSE
;
328 PAGE_REPLACEMENT_ALLOWED(FALSE
);
330 thread_wakeup((event_t
)&decompressions_blocked
);
336 vm_compressor_init(void)
339 struct c_slot cs_dummy
;
340 c_slot_t cs
= &cs_dummy
;
343 * ensure that any pointer that gets created from
344 * the vm_page zone can be packed properly
346 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_min_address
);
348 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_min_address
)
349 panic("C_SLOT_UNPACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address
);
351 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_max_address
);
353 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_max_address
)
354 panic("C_SLOT_UNPACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address
);
357 assert((C_SEGMENTS_PER_PAGE
* sizeof(union c_segu
)) == PAGE_SIZE
);
359 PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit
, sizeof (vm_compression_limit
));
361 if (max_mem
<= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
362 vm_compressor_minorcompact_threshold_divisor
= 11;
363 vm_compressor_majorcompact_threshold_divisor
= 13;
364 vm_compressor_unthrottle_threshold_divisor
= 20;
365 vm_compressor_catchup_threshold_divisor
= 35;
367 vm_compressor_minorcompact_threshold_divisor
= 20;
368 vm_compressor_majorcompact_threshold_divisor
= 25;
369 vm_compressor_unthrottle_threshold_divisor
= 35;
370 vm_compressor_catchup_threshold_divisor
= 50;
373 * vm_page_init_lck_grp is now responsible for calling vm_compressor_init_locks
374 * c_master_lock needs to be available early so that "vm_page_find_contiguous" can
375 * use PAGE_REPLACEMENT_ALLOWED to coordinate with the compressor.
378 #if __i386__ || __x86_64__
379 c_list_lock
= lck_mtx_alloc_init(&vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
380 #else /* __i386__ || __x86_64__ */
381 c_list_lock
= lck_spin_alloc_init(&vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
382 #endif /* __i386__ || __x86_64__ */
384 #if TRACK_BAD_C_SEGMENTS
385 queue_init(&c_bad_list_head
);
387 queue_init(&c_age_list_head
);
388 queue_init(&c_minor_list_head
);
389 queue_init(&c_swapout_list_head
);
390 queue_init(&c_swappedin_list_head
);
391 queue_init(&c_swappedout_list_head
);
392 queue_init(&c_swappedout_sparse_list_head
);
394 compressor_segment_zone
= zinit(sizeof (struct c_segment
),
395 128000 * sizeof (struct c_segment
),
396 8192, "compressor_segment");
397 zone_change(compressor_segment_zone
, Z_CALLERACCT
, FALSE
);
398 zone_change(compressor_segment_zone
, Z_NOENCRYPT
, TRUE
);
401 c_free_segno_head
= -1;
402 c_segments_available
= 0;
404 if (vm_compression_limit
== 0) {
405 c_segment_pages_compressed_limit
= (uint32_t)((max_mem
/ PAGE_SIZE
)) * vm_scale
;
407 #define OLD_SWAP_LIMIT (1024 * 1024 * 16)
408 #define MAX_SWAP_LIMIT (1024 * 1024 * 128)
410 if (c_segment_pages_compressed_limit
> (OLD_SWAP_LIMIT
))
411 c_segment_pages_compressed_limit
= OLD_SWAP_LIMIT
;
413 if (c_segment_pages_compressed_limit
< (uint32_t)(max_mem
/ PAGE_SIZE_64
))
414 c_segment_pages_compressed_limit
= (uint32_t)(max_mem
/ PAGE_SIZE_64
);
416 if (vm_compression_limit
< MAX_SWAP_LIMIT
)
417 c_segment_pages_compressed_limit
= vm_compression_limit
;
419 c_segment_pages_compressed_limit
= MAX_SWAP_LIMIT
;
421 if ((c_segments_limit
= c_segment_pages_compressed_limit
/ (C_SEG_BUFSIZE
/ PAGE_SIZE
)) > C_SEG_MAX_LIMIT
)
422 c_segments_limit
= C_SEG_MAX_LIMIT
;
424 c_segment_pages_compressed_nearing_limit
= (c_segment_pages_compressed_limit
* 98) / 100;
425 c_segments_nearing_limit
= (c_segments_limit
* 98) / 100;
427 compressor_kvwaste_limit
= (vm_map_max(kernel_map
) - vm_map_min(kernel_map
)) / 16;
429 c_segments_busy
= FALSE
;
431 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)(&c_segments
), (sizeof(union c_segu
) * c_segments_limit
), 0, KMA_KOBJECT
| KMA_VAONLY
) != KERN_SUCCESS
)
432 panic("vm_compressor_init: kernel_memory_allocate failed\n");
434 c_segments_next_page
= (caddr_t
)c_segments
;
437 host_basic_info_data_t hinfo
;
438 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
441 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
443 compressor_cpus
= hinfo
.max_cpus
;
445 compressor_scratch_bufs
= kalloc(compressor_cpus
* WKdm_SCRATCH_BUF_SIZE
);
448 if (kernel_thread_start_priority((thread_continue_t
)vm_compressor_swap_trigger_thread
, NULL
,
449 BASEPRI_PREEMPT
- 1, &thread
) != KERN_SUCCESS
) {
450 panic("vm_compressor_swap_trigger_thread: create failed");
452 thread
->options
|= TH_OPT_VMPRIV
;
454 thread_deallocate(thread
);
456 assert(default_pager_init_flag
== 0);
458 if (vm_pageout_internal_start() != KERN_SUCCESS
) {
459 panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
462 if ((vm_compressor_mode
== VM_PAGER_COMPRESSOR_WITH_SWAP
) ||
463 (vm_compressor_mode
== VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP
)) {
464 vm_compressor_swap_init();
468 memorystatus_freeze_enabled
= TRUE
;
469 #endif /* CONFIG_FREEZE */
471 default_pager_init_flag
= 1;
473 vm_page_reactivate_all_throttled();
477 #if VALIDATE_C_SEGMENTS
480 c_seg_validate(c_segment_t c_seg
, boolean_t must_be_compact
)
484 int32_t bytes_unused
;
485 uint32_t c_rounded_size
;
489 if (c_seg
->c_firstemptyslot
< c_seg
->c_nextslot
) {
490 c_indx
= c_seg
->c_firstemptyslot
;
491 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
494 panic("c_seg_validate: no slot backing c_firstemptyslot");
497 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs
->c_size
);
502 for (c_indx
= 0; c_indx
< c_seg
->c_nextslot
; c_indx
++) {
504 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
506 c_size
= UNPACK_C_SIZE(cs
);
508 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
510 bytes_used
+= c_rounded_size
;
512 #if CHECKSUM_THE_COMPRESSED_DATA
513 if (c_size
&& cs
->c_hash_compressed_data
!= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))
514 panic("compressed data doesn't match original");
518 if (bytes_used
!= c_seg
->c_bytes_used
)
519 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used
, c_seg
->c_bytes_used
);
521 if (c_seg
->c_bytes_used
> C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
522 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
523 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
525 if (must_be_compact
) {
526 if (c_seg
->c_bytes_used
!= C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
527 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
528 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
536 c_seg_need_delayed_compaction(c_segment_t c_seg
)
538 boolean_t clear_busy
= FALSE
;
540 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
543 lck_mtx_unlock_always(&c_seg
->c_lock
);
544 lck_mtx_lock_spin_always(c_list_lock
);
545 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
549 if (!c_seg
->c_on_minorcompact_q
&& !c_seg
->c_ondisk
&& !c_seg
->c_on_swapout_q
) {
550 queue_enter(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
551 c_seg
->c_on_minorcompact_q
= 1;
554 lck_mtx_unlock_always(c_list_lock
);
556 if (clear_busy
== TRUE
)
557 C_SEG_WAKEUP_DONE(c_seg
);
561 unsigned int c_seg_moved_to_sparse_list
= 0;
564 c_seg_move_to_sparse_list(c_segment_t c_seg
)
566 boolean_t clear_busy
= FALSE
;
568 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
571 lck_mtx_unlock_always(&c_seg
->c_lock
);
572 lck_mtx_lock_spin_always(c_list_lock
);
573 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
577 assert(c_seg
->c_ondisk
);
578 assert(c_seg
->c_on_swappedout_q
);
579 assert(!c_seg
->c_on_swappedout_sparse_q
);
581 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
582 c_seg
->c_on_swappedout_q
= 0;
583 c_swappedout_count
--;
585 c_seg_insert_into_q(&c_swappedout_sparse_list_head
, c_seg
);
586 c_seg
->c_on_swappedout_sparse_q
= 1;
587 c_swappedout_sparse_count
++;
589 c_seg_moved_to_sparse_list
++;
591 lck_mtx_unlock_always(c_list_lock
);
593 if (clear_busy
== TRUE
)
594 C_SEG_WAKEUP_DONE(c_seg
);
599 c_seg_insert_into_q(queue_head_t
*qhead
, c_segment_t c_seg
)
601 c_segment_t c_seg_next
;
603 if (queue_empty(qhead
)) {
604 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
606 c_seg_next
= (c_segment_t
)queue_first(qhead
);
610 if (c_seg
->c_generation_id
< c_seg_next
->c_generation_id
) {
611 queue_insert_before(qhead
, c_seg
, c_seg_next
, c_segment_t
, c_age_list
);
614 c_seg_next
= (c_segment_t
) queue_next(&c_seg_next
->c_age_list
);
616 if (queue_end(qhead
, (queue_entry_t
) c_seg_next
)) {
617 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
625 int try_minor_compaction_failed
= 0;
626 int try_minor_compaction_succeeded
= 0;
629 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
)
632 assert(c_seg
->c_on_minorcompact_q
);
634 * c_seg is currently on the delayed minor compaction
635 * queue and we have c_seg locked... if we can get the
636 * c_list_lock w/o blocking (if we blocked we could deadlock
637 * because the lock order is c_list_lock then c_seg's lock)
638 * we'll pull it from the delayed list and free it directly
640 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
642 * c_list_lock is held, we need to bail
644 try_minor_compaction_failed
++;
646 lck_mtx_unlock_always(&c_seg
->c_lock
);
648 try_minor_compaction_succeeded
++;
651 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, FALSE
);
657 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
, boolean_t need_list_lock
, boolean_t disallow_page_replacement
)
661 assert(c_seg
->c_busy
);
663 if (!c_seg
->c_on_minorcompact_q
) {
664 if (clear_busy
== TRUE
)
665 C_SEG_WAKEUP_DONE(c_seg
);
667 lck_mtx_unlock_always(&c_seg
->c_lock
);
671 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
672 c_seg
->c_on_minorcompact_q
= 0;
675 lck_mtx_unlock_always(c_list_lock
);
677 if (disallow_page_replacement
== TRUE
) {
678 lck_mtx_unlock_always(&c_seg
->c_lock
);
680 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
682 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
684 c_seg_freed
= c_seg_minor_compaction_and_unlock(c_seg
, clear_busy
);
686 if (disallow_page_replacement
== TRUE
)
687 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
689 if (need_list_lock
== TRUE
)
690 lck_mtx_lock_spin_always(c_list_lock
);
692 return (c_seg_freed
);
697 c_seg_wait_on_busy(c_segment_t c_seg
)
700 assert_wait((event_t
) (c_seg
), THREAD_UNINT
);
702 lck_mtx_unlock_always(&c_seg
->c_lock
);
703 thread_block(THREAD_CONTINUE_NULL
);
708 int try_free_succeeded
= 0;
709 int try_free_failed
= 0;
712 c_seg_try_free(c_segment_t c_seg
)
715 * c_seg is currently on the delayed minor compaction
716 * or the spapped out sparse queue and we have c_seg locked...
717 * if we can get the c_list_lock w/o blocking (if we blocked we
718 * could deadlock because the lock order is c_list_lock then c_seg's lock)
719 * we'll pull it from the appropriate queue and free it
721 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
723 * c_list_lock is held, we need to bail
728 if (c_seg
->c_on_minorcompact_q
) {
729 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
730 c_seg
->c_on_minorcompact_q
= 0;
733 assert(c_seg
->c_on_swappedout_sparse_q
);
736 * c_seg_free_locked will remove it from the swappedout sparse list
739 if (!c_seg
->c_busy_swapping
)
742 c_seg_free_locked(c_seg
);
744 try_free_succeeded
++;
751 c_seg_free(c_segment_t c_seg
)
753 assert(c_seg
->c_busy
);
755 lck_mtx_unlock_always(&c_seg
->c_lock
);
756 lck_mtx_lock_spin_always(c_list_lock
);
757 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
759 c_seg_free_locked(c_seg
);
764 c_seg_free_locked(c_segment_t c_seg
)
768 int32_t *c_buffer
= NULL
;
769 uint64_t c_swap_handle
;
771 assert(!c_seg
->c_on_minorcompact_q
);
773 if (c_seg
->c_on_age_q
) {
774 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
775 c_seg
->c_on_age_q
= 0;
777 } else if (c_seg
->c_on_swappedin_q
) {
778 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
779 c_seg
->c_on_swappedin_q
= 0;
781 } else if (c_seg
->c_on_swapout_q
) {
782 queue_remove(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
783 c_seg
->c_on_swapout_q
= 0;
785 thread_wakeup((event_t
)&compaction_swapper_running
);
786 } else if (c_seg
->c_on_swappedout_q
) {
787 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
788 c_seg
->c_on_swappedout_q
= 0;
789 c_swappedout_count
--;
790 } else if (c_seg
->c_on_swappedout_sparse_q
) {
791 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
792 c_seg
->c_on_swappedout_sparse_q
= 0;
793 c_swappedout_sparse_count
--;
795 #if TRACK_BAD_C_SEGMENTS
796 else if (c_seg
->c_on_bad_q
) {
797 queue_remove(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
798 c_seg
->c_on_bad_q
= 0;
802 segno
= c_seg
->c_mysegno
;
803 c_segments
[segno
].c_segno
= c_free_segno_head
;
804 c_free_segno_head
= segno
;
807 lck_mtx_unlock_always(c_list_lock
);
809 if (c_seg
->c_wanted
) {
810 thread_wakeup((event_t
) (c_seg
));
813 if (c_seg
->c_busy_swapping
) {
814 c_seg
->c_must_free
= 1;
816 lck_mtx_unlock_always(&c_seg
->c_lock
);
819 if (c_seg
->c_ondisk
== 0) {
820 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
822 c_buffer
= c_seg
->c_store
.c_buffer
;
823 c_seg
->c_store
.c_buffer
= NULL
;
826 * Free swap space on disk.
828 c_swap_handle
= c_seg
->c_store
.c_swap_handle
;
829 c_seg
->c_store
.c_swap_handle
= (uint64_t)-1;
831 lck_mtx_unlock_always(&c_seg
->c_lock
);
835 kernel_memory_depopulate(kernel_map
, (vm_offset_t
) c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
837 kmem_free(kernel_map
, (vm_offset_t
) c_buffer
, C_SEG_ALLOCSIZE
);
838 OSAddAtomic64(-C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
840 } else if (c_swap_handle
)
841 vm_swap_free(c_swap_handle
);
844 #if __i386__ || __x86_64__
845 lck_mtx_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
846 #else /* __i386__ || __x86_64__ */
847 lck_spin_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
848 #endif /* __i386__ || __x86_64__ */
850 for (i
= 0; i
< C_SEG_SLOT_ARRAYS
; i
++) {
851 if (c_seg
->c_slots
[i
] == 0)
854 kfree((char *)c_seg
->c_slots
[i
], sizeof(struct c_slot
) * C_SEG_SLOT_ARRAY_SIZE
);
856 zfree(compressor_segment_zone
, c_seg
);
860 int c_seg_trim_page_count
= 0;
863 c_seg_trim_tail(c_segment_t c_seg
)
868 uint32_t c_rounded_size
;
869 uint16_t current_nextslot
;
870 uint32_t current_populated_offset
;
872 if (c_seg
->c_bytes_used
== 0)
874 current_nextslot
= c_seg
->c_nextslot
;
875 current_populated_offset
= c_seg
->c_populated_offset
;
877 while (c_seg
->c_nextslot
) {
879 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, (c_seg
->c_nextslot
- 1));
881 c_size
= UNPACK_C_SIZE(cs
);
884 if (current_nextslot
!= c_seg
->c_nextslot
) {
885 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
886 c_offset
= cs
->c_offset
+ C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
888 c_seg
->c_nextoffset
= c_offset
;
889 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
891 if (c_seg
->c_firstemptyslot
> c_seg
->c_nextslot
)
892 c_seg
->c_firstemptyslot
= c_seg
->c_nextslot
;
894 c_seg_trim_page_count
+= ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset
)) -
895 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
);
901 assert(c_seg
->c_nextslot
);
906 c_seg_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
)
908 c_slot_mapping_t slot_ptr
;
909 uint32_t c_offset
= 0;
910 uint32_t old_populated_offset
;
911 uint32_t c_rounded_size
;
917 boolean_t need_unlock
= TRUE
;
919 assert(c_seg
->c_busy
);
921 #if VALIDATE_C_SEGMENTS
922 c_seg_validate(c_seg
, FALSE
);
924 if (c_seg
->c_bytes_used
== 0) {
928 if (c_seg
->c_firstemptyslot
>= c_seg
->c_nextslot
|| C_SEG_UNUSED_BYTES(c_seg
) < PAGE_SIZE
)
931 #if VALIDATE_C_SEGMENTS
932 c_seg
->c_was_minor_compacted
++;
934 c_indx
= c_seg
->c_firstemptyslot
;
935 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
937 old_populated_offset
= c_seg
->c_populated_offset
;
938 c_offset
= c_dst
->c_offset
;
940 for (i
= c_indx
+ 1; i
< c_seg
->c_nextslot
&& c_offset
< c_seg
->c_nextoffset
; i
++) {
942 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg
, i
);
944 c_size
= UNPACK_C_SIZE(c_src
);
949 memcpy(&c_seg
->c_store
.c_buffer
[c_offset
], &c_seg
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
951 #if CHECKSUM_THE_DATA
952 c_dst
->c_hash_data
= c_src
->c_hash_data
;
954 #if CHECKSUM_THE_COMPRESSED_DATA
955 c_dst
->c_hash_compressed_data
= c_src
->c_hash_compressed_data
;
957 c_dst
->c_size
= c_src
->c_size
;
958 c_dst
->c_packed_ptr
= c_src
->c_packed_ptr
;
959 c_dst
->c_offset
= c_offset
;
961 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
962 slot_ptr
->s_cindx
= c_indx
;
964 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
966 c_offset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
967 PACK_C_SIZE(c_src
, 0);
970 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
972 c_seg
->c_firstemptyslot
= c_indx
;
973 c_seg
->c_nextslot
= c_indx
;
974 c_seg
->c_nextoffset
= c_offset
;
975 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
976 c_seg
->c_bytes_unused
= 0;
978 #if VALIDATE_C_SEGMENTS
979 c_seg_validate(c_seg
, TRUE
);
982 if (old_populated_offset
> c_seg
->c_populated_offset
) {
986 gc_size
= C_SEG_OFFSET_TO_BYTES(old_populated_offset
- c_seg
->c_populated_offset
);
987 gc_ptr
= &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
];
989 lck_mtx_unlock_always(&c_seg
->c_lock
);
991 kernel_memory_depopulate(kernel_map
, (vm_offset_t
)gc_ptr
, gc_size
, KMA_COMPRESSOR
);
993 if (clear_busy
== TRUE
)
994 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
999 if (need_unlock
== TRUE
) {
1000 if (clear_busy
== TRUE
)
1001 C_SEG_WAKEUP_DONE(c_seg
);
1003 lck_mtx_unlock_always(&c_seg
->c_lock
);
1011 uint64_t asked_permission
;
1012 uint64_t compactions
;
1013 uint64_t moved_slots
;
1014 uint64_t moved_bytes
;
1015 uint64_t wasted_space_in_swapouts
;
1016 uint64_t count_of_swapouts
;
1017 } c_seg_major_compact_stats
;
1020 #define C_MAJOR_COMPACTION_AGE_APPROPRIATE 30
1021 #define C_MAJOR_COMPACTION_OLD_ENOUGH 300
1022 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 80) / 100)
1026 c_seg_major_compact_ok(
1027 c_segment_t c_seg_dst
,
1028 c_segment_t c_seg_src
)
1031 c_seg_major_compact_stats
.asked_permission
++;
1033 if (c_seg_src
->c_filling
) {
1035 * we're at or near the head... don't compact
1039 if (c_seg_src
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
&&
1040 c_seg_dst
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
)
1043 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX
) {
1045 * destination segment is full... can't compact
1055 c_seg_major_compact(
1056 c_segment_t c_seg_dst
,
1057 c_segment_t c_seg_src
)
1059 c_slot_mapping_t slot_ptr
;
1060 uint32_t c_rounded_size
;
1067 boolean_t keep_compacting
= TRUE
;
1070 * segments are not locked but they are both marked c_busy
1071 * which keeps c_decompress from working on them...
1072 * we can safely allocate new pages, move compressed data
1073 * from c_seg_src to c_seg_dst and update both c_segment's
1074 * state w/o holding the master lock
1077 #if VALIDATE_C_SEGMENTS
1078 c_seg_dst
->c_was_major_compacted
++;
1079 c_seg_src
->c_was_major_donor
++;
1081 c_seg_major_compact_stats
.compactions
++;
1083 dst_slot
= c_seg_dst
->c_nextslot
;
1085 for (i
= 0; i
< c_seg_src
->c_nextslot
; i
++) {
1087 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, i
);
1089 c_size
= UNPACK_C_SIZE(c_src
);
1092 /* BATCH: move what we have so far; */
1096 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
- c_seg_dst
->c_nextoffset
) < (unsigned) c_size
) {
1098 if ((C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) == C_SEG_BUFSIZE
)) {
1100 keep_compacting
= FALSE
;
1103 kernel_memory_populate(kernel_map
,
1104 (vm_offset_t
) &c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_populated_offset
],
1108 c_seg_dst
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
);
1109 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) <= C_SEG_BUFSIZE
);
1112 slotarray
= C_SEG_SLOTARRAY_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1114 if (c_seg_dst
->c_slots
[slotarray
] == 0) {
1115 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1116 c_seg_dst
->c_slots
[slotarray
] = (struct c_slot
*)
1117 kalloc(sizeof(struct c_slot
) *
1118 C_SEG_SLOT_ARRAY_SIZE
);
1119 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1121 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1123 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
1125 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1127 c_seg_major_compact_stats
.moved_slots
++;
1128 c_seg_major_compact_stats
.moved_bytes
+= c_size
;
1130 #if CHECKSUM_THE_DATA
1131 c_dst
->c_hash_data
= c_src
->c_hash_data
;
1133 #if CHECKSUM_THE_COMPRESSED_DATA
1134 c_dst
->c_hash_compressed_data
= c_src
->c_hash_compressed_data
;
1136 c_dst
->c_size
= c_src
->c_size
;
1137 c_dst
->c_packed_ptr
= c_src
->c_packed_ptr
;
1138 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
1140 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
)
1141 c_seg_dst
->c_firstemptyslot
++;
1142 c_seg_dst
->c_nextslot
++;
1143 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
1144 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1146 PACK_C_SIZE(c_src
, 0);
1148 c_seg_src
->c_bytes_used
-= c_rounded_size
;
1149 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
1150 c_seg_src
->c_firstemptyslot
= 0;
1152 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX
) {
1153 /* dest segment is now full */
1154 keep_compacting
= FALSE
;
1158 if (dst_slot
< c_seg_dst
->c_nextslot
) {
1160 PAGE_REPLACEMENT_ALLOWED(TRUE
);
1162 * we've now locked out c_decompress from
1163 * converting the slot passed into it into
1164 * a c_segment_t which allows us to use
1165 * the backptr to change which c_segment and
1166 * index the slot points to
1168 while (dst_slot
< c_seg_dst
->c_nextslot
) {
1170 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
1172 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
1173 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
1174 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
1175 slot_ptr
->s_cindx
= dst_slot
++;
1177 PAGE_REPLACEMENT_ALLOWED(FALSE
);
1179 return (keep_compacting
);
1184 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec
, clock_nsec_t end_nsec
, clock_sec_t start_sec
, clock_nsec_t start_nsec
)
1187 uint64_t start_msecs
;
1189 end_msecs
= (end_sec
* 1000) + end_nsec
/ 1000000;
1190 start_msecs
= (start_sec
* 1000) + start_nsec
/ 1000000;
1192 return (end_msecs
- start_msecs
);
1197 uint32_t compressor_eval_period_in_msecs
= 250;
1198 uint32_t compressor_sample_min_in_msecs
= 500;
1199 uint32_t compressor_sample_max_in_msecs
= 10000;
1200 uint32_t compressor_thrashing_threshold_per_10msecs
= 50;
1201 uint32_t compressor_thrashing_min_per_10msecs
= 20;
1203 /* When true, reset sample data next chance we get. */
1204 static boolean_t compressor_need_sample_reset
= FALSE
;
1206 extern uint32_t vm_page_filecache_min
;
1210 compute_swapout_target_age(void)
1212 clock_sec_t cur_ts_sec
;
1213 clock_nsec_t cur_ts_nsec
;
1214 uint32_t min_operations_needed_in_this_sample
;
1215 uint64_t elapsed_msecs_in_eval
;
1216 uint64_t elapsed_msecs_in_sample
;
1217 boolean_t need_eval_reset
= FALSE
;
1219 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
1221 elapsed_msecs_in_sample
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_sample_period_sec
, start_of_sample_period_nsec
);
1223 if (compressor_need_sample_reset
||
1224 elapsed_msecs_in_sample
>= compressor_sample_max_in_msecs
) {
1225 compressor_need_sample_reset
= TRUE
;
1226 need_eval_reset
= TRUE
;
1229 elapsed_msecs_in_eval
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_eval_period_sec
, start_of_eval_period_nsec
);
1231 if (elapsed_msecs_in_eval
< compressor_eval_period_in_msecs
)
1233 need_eval_reset
= TRUE
;
1235 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START
, elapsed_msecs_in_eval
, sample_period_compression_count
, sample_period_decompression_count
, 0, 0);
1237 min_operations_needed_in_this_sample
= (compressor_thrashing_min_per_10msecs
* (uint32_t)elapsed_msecs_in_eval
) / 10;
1239 if ((sample_period_compression_count
- last_eval_compression_count
) < min_operations_needed_in_this_sample
||
1240 (sample_period_decompression_count
- last_eval_decompression_count
) < min_operations_needed_in_this_sample
) {
1242 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_compression_count
- last_eval_compression_count
,
1243 sample_period_decompression_count
- last_eval_decompression_count
, 0, 1, 0);
1245 swapout_target_age
= 0;
1247 compressor_need_sample_reset
= TRUE
;
1248 need_eval_reset
= TRUE
;
1251 last_eval_compression_count
= sample_period_compression_count
;
1252 last_eval_decompression_count
= sample_period_decompression_count
;
1254 if (elapsed_msecs_in_sample
< compressor_sample_min_in_msecs
) {
1256 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, 0, 0, 5, 0);
1259 if (sample_period_decompression_count
> ((compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10)) {
1261 uint64_t running_total
;
1262 uint64_t working_target
;
1263 uint64_t aging_target
;
1264 uint32_t oldest_age_of_csegs_sampled
= 0;
1265 uint64_t working_set_approximation
= 0;
1267 swapout_target_age
= 0;
1269 working_target
= (sample_period_decompression_count
/ 100) * 95; /* 95 percent */
1270 aging_target
= (sample_period_decompression_count
/ 100) * 1; /* 1 percent */
1273 for (oldest_age_of_csegs_sampled
= 0; oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
; oldest_age_of_csegs_sampled
++) {
1275 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1277 working_set_approximation
+= oldest_age_of_csegs_sampled
* age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1279 if (running_total
>= working_target
)
1282 if (oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
) {
1284 working_set_approximation
= (working_set_approximation
* 1000) / elapsed_msecs_in_sample
;
1286 if (working_set_approximation
< VM_PAGE_COMPRESSOR_COUNT
) {
1288 running_total
= overage_decompressions_during_sample_period
;
1290 for (oldest_age_of_csegs_sampled
= DECOMPRESSION_SAMPLE_MAX_AGE
- 1; oldest_age_of_csegs_sampled
; oldest_age_of_csegs_sampled
--) {
1291 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1293 if (running_total
>= aging_target
)
1296 swapout_target_age
= (uint32_t)cur_ts_sec
- oldest_age_of_csegs_sampled
;
1298 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 2, 0);
1300 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 0, 3, 0);
1303 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_target
, running_total
, 0, 4, 0);
1305 compressor_need_sample_reset
= TRUE
;
1306 need_eval_reset
= TRUE
;
1308 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_decompression_count
, (compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10, 0, 6, 0);
1310 if (compressor_need_sample_reset
== TRUE
) {
1311 bzero(age_of_decompressions_during_sample_period
, sizeof(age_of_decompressions_during_sample_period
));
1312 overage_decompressions_during_sample_period
= 0;
1314 start_of_sample_period_sec
= cur_ts_sec
;
1315 start_of_sample_period_nsec
= cur_ts_nsec
;
1316 sample_period_decompression_count
= 0;
1317 sample_period_compression_count
= 0;
1318 last_eval_decompression_count
= 0;
1319 last_eval_compression_count
= 0;
1320 compressor_need_sample_reset
= FALSE
;
1322 if (need_eval_reset
== TRUE
) {
1323 start_of_eval_period_sec
= cur_ts_sec
;
1324 start_of_eval_period_nsec
= cur_ts_nsec
;
1329 int compaction_swapper_inited
= 0;
1330 int compaction_swapper_init_now
= 0;
1331 int compaction_swapper_running
= 0;
1332 int compaction_swapper_abort
= 0;
1336 boolean_t
memorystatus_kill_on_VM_thrashing(boolean_t
);
1337 boolean_t
memorystatus_kill_on_FC_thrashing(boolean_t
);
1338 int compressor_thrashing_induced_jetsam
= 0;
1339 int filecache_thrashing_induced_jetsam
= 0;
1340 static boolean_t vm_compressor_thrashing_detected
= FALSE
;
1341 #endif /* CONFIG_JETSAM */
1344 compressor_needs_to_swap(void)
1346 boolean_t should_swap
= FALSE
;
1348 if (vm_swap_up
== TRUE
) {
1349 if (COMPRESSOR_NEEDS_TO_SWAP()) {
1352 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external
) && vm_page_anonymous_count
< (vm_page_inactive_count
/ 20)) {
1355 if (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
))
1358 compute_swapout_target_age();
1360 if (swapout_target_age
) {
1363 lck_mtx_lock_spin_always(c_list_lock
);
1365 if (!queue_empty(&c_age_list_head
)) {
1367 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1369 if (c_seg
->c_creation_ts
> swapout_target_age
)
1370 swapout_target_age
= 0;
1372 lck_mtx_unlock_always(c_list_lock
);
1374 #if CONFIG_PHANTOM_CACHE
1375 if (vm_phantom_cache_check_pressure())
1378 if (swapout_target_age
)
1381 if (vm_swap_up
== FALSE
) {
1385 if (vm_compressor_thrashing_detected
== FALSE
) {
1386 vm_compressor_thrashing_detected
= TRUE
;
1388 if (swapout_target_age
) {
1389 memorystatus_kill_on_VM_thrashing(TRUE
/* async */);
1390 compressor_thrashing_induced_jetsam
++;
1392 memorystatus_kill_on_FC_thrashing(TRUE
/* async */);
1393 filecache_thrashing_induced_jetsam
++;
1396 * let the jetsam take precedence over
1397 * any major compactions we might have
1398 * been able to do... otherwise we run
1399 * the risk of doing major compactions
1400 * on segments we're about to free up
1401 * due to the jetsam activity.
1403 should_swap
= FALSE
;
1405 #endif /* CONFIG_JETSAM */
1407 should_swap
= COMPRESSOR_NEEDS_TO_MAJOR_COMPACT();
1411 * returning TRUE when swap_supported == FALSE
1412 * will cause the major compaction engine to
1413 * run, but will not trigger any swapping...
1414 * segments that have been major compacted
1415 * will be moved to the swapped_out_q
1416 * but will not have the c_ondisk flag set
1418 return (should_swap
);
1423 * This function is called from the jetsam thread after killing something to
1424 * mitigate thrashing.
1426 * We need to restart our thrashing detection heuristics since memory pressure
1427 * has potentially changed significantly, and we don't want to detect on old
1428 * data from before the jetsam.
1431 vm_thrashing_jetsam_done(void)
1433 vm_compressor_thrashing_detected
= FALSE
;
1435 /* Were we compressor-thrashing or filecache-thrashing? */
1436 if (swapout_target_age
) {
1437 swapout_target_age
= 0;
1438 compressor_need_sample_reset
= TRUE
;
1440 #if CONFIG_PHANTOM_CACHE
1442 vm_phantom_cache_restart_sample();
1446 #endif /* CONFIG_JETSAM */
1448 uint32_t vm_wake_compactor_swapper_calls
= 0;
1451 vm_wake_compactor_swapper(void)
1453 boolean_t need_major_compaction
= FALSE
;
1455 if (compaction_swapper_running
)
1458 if (c_minor_count
== 0 && need_major_compaction
== FALSE
)
1461 lck_mtx_lock_spin_always(c_list_lock
);
1463 fastwake_warmup
= FALSE
;
1465 if (compaction_swapper_running
== 0) {
1466 vm_wake_compactor_swapper_calls
++;
1468 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1470 compaction_swapper_running
= 1;
1472 lck_mtx_unlock_always(c_list_lock
);
1477 vm_consider_waking_compactor_swapper(void)
1479 boolean_t need_wakeup
= FALSE
;
1481 if (compaction_swapper_running
)
1484 if (!compaction_swapper_inited
&& !compaction_swapper_init_now
) {
1485 compaction_swapper_init_now
= 1;
1489 if (c_minor_count
&& (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
1493 } else if (compressor_needs_to_swap()) {
1497 } else if (c_minor_count
) {
1498 uint64_t total_bytes
;
1500 total_bytes
= compressor_object
->resident_page_count
* PAGE_SIZE_64
;
1502 if ((total_bytes
- compressor_bytes_used
) > total_bytes
/ 10)
1505 if (need_wakeup
== TRUE
) {
1507 lck_mtx_lock_spin_always(c_list_lock
);
1509 fastwake_warmup
= FALSE
;
1511 if (compaction_swapper_running
== 0) {
1512 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER
, DBG_FUNC_NONE
);
1514 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1516 compaction_swapper_running
= 1;
1518 lck_mtx_unlock_always(c_list_lock
);
1523 #define C_SWAPOUT_LIMIT 4
1524 #define DELAYED_COMPACTIONS_PER_PASS 30
1527 vm_compressor_do_delayed_compactions(boolean_t flush_all
)
1530 int number_compacted
= 0;
1531 boolean_t needs_to_swap
= FALSE
;
1534 lck_mtx_assert(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
1536 while (!queue_empty(&c_minor_list_head
) && needs_to_swap
== FALSE
) {
1538 c_seg
= (c_segment_t
)queue_first(&c_minor_list_head
);
1540 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1542 if (c_seg
->c_busy
) {
1544 lck_mtx_unlock_always(c_list_lock
);
1545 c_seg_wait_on_busy(c_seg
);
1546 lck_mtx_lock_spin_always(c_list_lock
);
1552 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, TRUE
);
1554 if (vm_swap_up
== TRUE
&& (number_compacted
++ > DELAYED_COMPACTIONS_PER_PASS
)) {
1556 if ((flush_all
== TRUE
|| compressor_needs_to_swap() == TRUE
) && c_swapout_count
< C_SWAPOUT_LIMIT
)
1557 needs_to_swap
= TRUE
;
1559 number_compacted
= 0;
1561 lck_mtx_lock_spin_always(c_list_lock
);
1566 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
1569 vm_compressor_age_swapped_in_segments(boolean_t flush_all
)
1575 clock_get_system_nanotime(&now
, &nsec
);
1577 while (!queue_empty(&c_swappedin_list_head
)) {
1579 c_seg
= (c_segment_t
)queue_first(&c_swappedin_list_head
);
1581 if (flush_all
== FALSE
&& (now
- c_seg
->c_swappedin_ts
) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT
)
1584 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1586 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1587 c_seg
->c_on_swappedin_q
= 0;
1588 c_swappedin_count
--;
1590 c_seg_insert_into_q(&c_age_list_head
, c_seg
);
1591 c_seg
->c_on_age_q
= 1;
1594 lck_mtx_unlock_always(&c_seg
->c_lock
);
1600 vm_compressor_flush(void)
1602 uint64_t vm_swap_put_failures_at_start
;
1603 wait_result_t wait_result
= 0;
1604 AbsoluteTime startTime
, endTime
;
1605 clock_sec_t now_sec
;
1606 clock_nsec_t now_nsec
;
1609 HIBLOG("vm_compressor_flush - starting\n");
1611 clock_get_uptime(&startTime
);
1613 lck_mtx_lock_spin_always(c_list_lock
);
1615 fastwake_warmup
= FALSE
;
1616 compaction_swapper_abort
= 1;
1618 while (compaction_swapper_running
) {
1619 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
1621 lck_mtx_unlock_always(c_list_lock
);
1623 thread_block(THREAD_CONTINUE_NULL
);
1625 lck_mtx_lock_spin_always(c_list_lock
);
1627 compaction_swapper_abort
= 0;
1628 compaction_swapper_running
= 1;
1630 hibernate_flushing
= TRUE
;
1631 hibernate_no_swapspace
= FALSE
;
1632 c_generation_id_flush_barrier
= c_generation_id
+ 1000;
1634 clock_get_system_nanotime(&now_sec
, &now_nsec
);
1635 hibernate_flushing_deadline
= now_sec
+ HIBERNATE_FLUSHING_SECS_TO_COMPLETE
;
1637 vm_swap_put_failures_at_start
= vm_swap_put_failures
;
1639 vm_compressor_compact_and_swap(TRUE
);
1641 while (!queue_empty(&c_swapout_list_head
)) {
1643 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 5000, 1000*NSEC_PER_USEC
);
1645 lck_mtx_unlock_always(c_list_lock
);
1647 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1649 lck_mtx_lock_spin_always(c_list_lock
);
1651 if (wait_result
== THREAD_TIMED_OUT
)
1654 hibernate_flushing
= FALSE
;
1655 compaction_swapper_running
= 0;
1657 if (vm_swap_put_failures
> vm_swap_put_failures_at_start
)
1658 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
1659 vm_swap_put_failures
- vm_swap_put_failures_at_start
, VM_PAGE_COMPRESSOR_COUNT
);
1661 lck_mtx_unlock_always(c_list_lock
);
1663 clock_get_uptime(&endTime
);
1664 SUB_ABSOLUTETIME(&endTime
, &startTime
);
1665 absolutetime_to_nanoseconds(endTime
, &nsec
);
1667 HIBLOG("vm_compressor_flush completed - took %qd msecs\n", nsec
/ 1000000ULL);
1671 extern void vm_swap_file_set_tuneables(void);
1672 int compaction_swap_trigger_thread_awakened
= 0;
1676 vm_compressor_swap_trigger_thread(void)
1679 * compaction_swapper_init_now is set when the first call to
1680 * vm_consider_waking_compactor_swapper is made from
1681 * vm_pageout_scan... since this function is called upon
1682 * thread creation, we want to make sure to delay adjusting
1683 * the tuneables until we are awakened via vm_pageout_scan
1684 * so that we are at a point where the vm_swapfile_open will
1685 * be operating on the correct directory (in case the default
1686 * of /var/vm/ is overridden by the dymanic_pager
1688 if (compaction_swapper_init_now
&& !compaction_swapper_inited
) {
1689 if (vm_compressor_mode
== VM_PAGER_COMPRESSOR_WITH_SWAP
)
1690 vm_swap_file_set_tuneables();
1692 compaction_swapper_inited
= 1;
1694 lck_mtx_lock_spin_always(c_list_lock
);
1696 compaction_swap_trigger_thread_awakened
++;
1698 vm_compressor_compact_and_swap(FALSE
);
1700 assert_wait((event_t
)&c_compressor_swap_trigger
, THREAD_UNINT
);
1702 compaction_swapper_running
= 0;
1703 thread_wakeup((event_t
)&compaction_swapper_running
);
1705 lck_mtx_unlock_always(c_list_lock
);
1707 thread_block((thread_continue_t
)vm_compressor_swap_trigger_thread
);
1714 vm_compressor_record_warmup_start(void)
1718 lck_mtx_lock_spin_always(c_list_lock
);
1720 if (first_c_segment_to_warm_generation_id
== 0) {
1721 if (!queue_empty(&c_age_list_head
)) {
1723 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
1725 first_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
1727 first_c_segment_to_warm_generation_id
= 0;
1729 fastwake_recording_in_progress
= TRUE
;
1731 lck_mtx_unlock_always(c_list_lock
);
1736 vm_compressor_record_warmup_end(void)
1740 lck_mtx_lock_spin_always(c_list_lock
);
1742 if (fastwake_recording_in_progress
== TRUE
) {
1744 if (!queue_empty(&c_age_list_head
)) {
1746 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
1748 last_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
1750 last_c_segment_to_warm_generation_id
= first_c_segment_to_warm_generation_id
;
1752 fastwake_recording_in_progress
= FALSE
;
1754 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
1756 lck_mtx_unlock_always(c_list_lock
);
1760 #define DELAY_TRIM_ON_WAKE_SECS 4
1763 vm_compressor_delay_trim(void)
1768 clock_get_system_nanotime(&sec
, &nsec
);
1769 dont_trim_until_ts
= sec
+ DELAY_TRIM_ON_WAKE_SECS
;
1774 vm_compressor_do_warmup(void)
1776 lck_mtx_lock_spin_always(c_list_lock
);
1778 if (first_c_segment_to_warm_generation_id
== last_c_segment_to_warm_generation_id
) {
1779 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
1781 lck_mtx_unlock_always(c_list_lock
);
1785 if (compaction_swapper_running
== 0) {
1787 fastwake_warmup
= TRUE
;
1788 compaction_swapper_running
= 1;
1789 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1791 lck_mtx_unlock_always(c_list_lock
);
1796 do_fastwake_warmup(void)
1798 uint64_t my_thread_id
;
1799 c_segment_t c_seg
= NULL
;
1800 AbsoluteTime startTime
, endTime
;
1804 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
1806 clock_get_uptime(&startTime
);
1808 lck_mtx_unlock_always(c_list_lock
);
1810 my_thread_id
= current_thread()->thread_id
;
1811 proc_set_task_policy_thread(kernel_task
, my_thread_id
,
1812 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER2
);
1814 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1816 lck_mtx_lock_spin_always(c_list_lock
);
1818 while (!queue_empty(&c_swappedout_list_head
) && fastwake_warmup
== TRUE
) {
1820 c_seg
= (c_segment_t
) queue_first(&c_swappedout_list_head
);
1822 if (c_seg
->c_generation_id
< first_c_segment_to_warm_generation_id
||
1823 c_seg
->c_generation_id
> last_c_segment_to_warm_generation_id
)
1826 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1827 lck_mtx_unlock_always(c_list_lock
);
1829 if (c_seg
->c_busy
) {
1830 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1831 c_seg_wait_on_busy(c_seg
);
1832 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1834 c_seg_swapin(c_seg
, TRUE
);
1836 lck_mtx_unlock_always(&c_seg
->c_lock
);
1837 c_segment_warmup_count
++;
1839 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1840 vm_pageout_io_throttle();
1841 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1843 lck_mtx_lock_spin_always(c_list_lock
);
1845 lck_mtx_unlock_always(c_list_lock
);
1847 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1849 proc_set_task_policy_thread(kernel_task
, my_thread_id
,
1850 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER0
);
1852 clock_get_uptime(&endTime
);
1853 SUB_ABSOLUTETIME(&endTime
, &startTime
);
1854 absolutetime_to_nanoseconds(endTime
, &nsec
);
1856 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec
/ 1000000ULL);
1858 lck_mtx_lock_spin_always(c_list_lock
);
1860 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
1865 vm_compressor_compact_and_swap(boolean_t flush_all
)
1867 c_segment_t c_seg
, c_seg_next
;
1868 boolean_t keep_compacting
;
1871 if (fastwake_warmup
== TRUE
) {
1872 uint64_t starting_warmup_count
;
1874 starting_warmup_count
= c_segment_warmup_count
;
1876 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_START
, c_segment_warmup_count
,
1877 first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
, 0, 0);
1878 do_fastwake_warmup();
1879 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_END
, c_segment_warmup_count
, c_segment_warmup_count
- starting_warmup_count
, 0, 0, 0);
1881 fastwake_warmup
= FALSE
;
1885 * it's possible for the c_age_list_head to be empty if we
1886 * hit our limits for growing the compressor pool and we subsequently
1887 * hibernated... on the next hibernation we could see the queue as
1888 * empty and not proceeed even though we have a bunch of segments on
1889 * the swapped in queue that need to be dealt with.
1891 vm_compressor_do_delayed_compactions(flush_all
);
1893 vm_compressor_age_swapped_in_segments(flush_all
);
1896 while (!queue_empty(&c_age_list_head
) && compaction_swapper_abort
== 0) {
1898 if (hibernate_flushing
== TRUE
) {
1902 if (hibernate_should_abort()) {
1903 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
1906 if (hibernate_no_swapspace
== TRUE
) {
1907 HIBLOG("vm_compressor_flush - out of swap space\n");
1910 clock_get_system_nanotime(&sec
, &nsec
);
1912 if (sec
> hibernate_flushing_deadline
) {
1913 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
1917 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
1919 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 100, 1000*NSEC_PER_USEC
);
1921 lck_mtx_unlock_always(c_list_lock
);
1923 thread_block(THREAD_CONTINUE_NULL
);
1925 lck_mtx_lock_spin_always(c_list_lock
);
1930 vm_compressor_do_delayed_compactions(flush_all
);
1932 vm_compressor_age_swapped_in_segments(flush_all
);
1934 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
1936 * we timed out on the above thread_block
1937 * let's loop around and try again
1938 * the timeout allows us to continue
1939 * to do minor compactions to make
1940 * more memory available
1946 * Swap out segments?
1948 if (flush_all
== FALSE
) {
1949 boolean_t needs_to_swap
;
1951 lck_mtx_unlock_always(c_list_lock
);
1953 needs_to_swap
= compressor_needs_to_swap();
1955 lck_mtx_lock_spin_always(c_list_lock
);
1957 if (needs_to_swap
== FALSE
)
1960 if (queue_empty(&c_age_list_head
))
1962 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1964 if (flush_all
== TRUE
&& c_seg
->c_generation_id
> c_generation_id_flush_barrier
)
1967 if (c_seg
->c_filling
) {
1969 * we're at or near the head... no more work to do
1973 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1975 if (c_seg
->c_busy
) {
1977 lck_mtx_unlock_always(c_list_lock
);
1978 c_seg_wait_on_busy(c_seg
);
1979 lck_mtx_lock_spin_always(c_list_lock
);
1985 if (c_seg_do_minor_compaction_and_unlock(c_seg
, FALSE
, TRUE
, TRUE
)) {
1987 * found an empty c_segment and freed it
1988 * so go grab the next guy in the queue
1995 keep_compacting
= TRUE
;
1997 while (keep_compacting
== TRUE
) {
1999 assert(c_seg
->c_busy
);
2001 /* look for another segment to consolidate */
2003 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
2005 if (queue_end(&c_age_list_head
, (queue_entry_t
)c_seg_next
))
2008 if (c_seg_major_compact_ok(c_seg
, c_seg_next
) == FALSE
)
2011 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2013 if (c_seg_next
->c_busy
) {
2015 lck_mtx_unlock_always(c_list_lock
);
2016 c_seg_wait_on_busy(c_seg_next
);
2017 lck_mtx_lock_spin_always(c_list_lock
);
2021 /* grab that segment */
2022 C_SEG_BUSY(c_seg_next
);
2024 if (c_seg_do_minor_compaction_and_unlock(c_seg_next
, FALSE
, TRUE
, TRUE
)) {
2026 * found an empty c_segment and freed it
2027 * so we can't continue to use c_seg_next
2032 /* unlock the list ... */
2033 lck_mtx_unlock_always(c_list_lock
);
2035 /* do the major compaction */
2037 keep_compacting
= c_seg_major_compact(c_seg
, c_seg_next
);
2039 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2041 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2043 * run a minor compaction on the donor segment
2044 * since we pulled at least some of it's
2045 * data into our target... if we've emptied
2046 * it, now is a good time to free it which
2047 * c_seg_minor_compaction_and_unlock also takes care of
2049 * by passing TRUE, we ask for c_busy to be cleared
2050 * and c_wanted to be taken care of
2052 c_seg_minor_compaction_and_unlock(c_seg_next
, TRUE
);
2054 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2056 /* relock the list */
2057 lck_mtx_lock_spin_always(c_list_lock
);
2059 } /* major compaction */
2061 c_seg_major_compact_stats
.wasted_space_in_swapouts
+= C_SEG_BUFSIZE
- c_seg
->c_bytes_used
;
2062 c_seg_major_compact_stats
.count_of_swapouts
++;
2064 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2066 assert(c_seg
->c_busy
);
2067 assert(c_seg
->c_on_age_q
);
2068 assert(!c_seg
->c_on_minorcompact_q
);
2070 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
2071 c_seg
->c_on_age_q
= 0;
2074 if (vm_swap_up
== TRUE
) {
2075 queue_enter(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2076 c_seg
->c_on_swapout_q
= 1;
2079 queue_enter(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2080 c_seg
->c_on_swappedout_q
= 1;
2081 c_swappedout_count
++;
2083 C_SEG_WAKEUP_DONE(c_seg
);
2085 lck_mtx_unlock_always(&c_seg
->c_lock
);
2087 if (c_swapout_count
) {
2088 lck_mtx_unlock_always(c_list_lock
);
2090 thread_wakeup((event_t
)&c_swapout_list_head
);
2092 lck_mtx_lock_spin_always(c_list_lock
);
2098 static uint32_t no_paging_space_action_in_progress
= 0;
2099 extern void memorystatus_send_low_swap_note(void);
2103 c_seg_allocate(c_segment_t
*current_chead
)
2110 if ( (c_seg
= *current_chead
) == NULL
) {
2113 if (vm_compressor_low_on_space() || vm_swap_low_on_space()) {
2115 if (no_paging_space_action_in_progress
== 0) {
2117 if (OSCompareAndSwap(0, 1, (UInt32
*)&no_paging_space_action_in_progress
)) {
2119 if (no_paging_space_action()) {
2120 memorystatus_send_low_swap_note();
2123 no_paging_space_action_in_progress
= 0;
2127 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2129 lck_mtx_lock_spin_always(c_list_lock
);
2131 while (c_segments_busy
== TRUE
) {
2132 assert_wait((event_t
) (&c_segments_busy
), THREAD_UNINT
);
2134 lck_mtx_unlock_always(c_list_lock
);
2136 thread_block(THREAD_CONTINUE_NULL
);
2138 lck_mtx_lock_spin_always(c_list_lock
);
2140 if (c_free_segno_head
== (uint32_t)-1) {
2142 if (c_segments_available
>= c_segments_limit
|| c_segment_pages_compressed
>= c_segment_pages_compressed_limit
) {
2143 lck_mtx_unlock_always(c_list_lock
);
2145 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, 0, 0, 0, 1, 0);
2148 c_segments_busy
= TRUE
;
2149 lck_mtx_unlock_always(c_list_lock
);
2151 kernel_memory_populate(kernel_map
, (vm_offset_t
)c_segments_next_page
, PAGE_SIZE
, KMA_KOBJECT
);
2152 c_segments_next_page
+= PAGE_SIZE
;
2154 for (c_segno
= c_segments_available
+ 1; c_segno
< (c_segments_available
+ C_SEGMENTS_PER_PAGE
); c_segno
++)
2155 c_segments
[c_segno
- 1].c_segno
= c_segno
;
2157 lck_mtx_lock_spin_always(c_list_lock
);
2159 c_segments
[c_segno
- 1].c_segno
= c_free_segno_head
;
2160 c_free_segno_head
= c_segments_available
;
2161 c_segments_available
+= C_SEGMENTS_PER_PAGE
;
2163 c_segments_busy
= FALSE
;
2164 thread_wakeup((event_t
) (&c_segments_busy
));
2166 c_segno
= c_free_segno_head
;
2167 c_free_segno_head
= c_segments
[c_segno
].c_segno
;
2169 lck_mtx_unlock_always(c_list_lock
);
2171 c_seg
= (c_segment_t
)zalloc(compressor_segment_zone
);
2172 bzero((char *)c_seg
, sizeof(struct c_segment
));
2174 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)(&c_seg
->c_store
.c_buffer
), C_SEG_ALLOCSIZE
, 0, KMA_COMPRESSOR
| KMA_VAONLY
) != KERN_SUCCESS
) {
2175 zfree(compressor_segment_zone
, c_seg
);
2177 lck_mtx_lock_spin_always(c_list_lock
);
2179 c_segments
[c_segno
].c_segno
= c_free_segno_head
;
2180 c_free_segno_head
= c_segno
;
2182 lck_mtx_unlock_always(c_list_lock
);
2184 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, 0, 0, 0, 2, 0);
2188 OSAddAtomic64(C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
2190 #if __i386__ || __x86_64__
2191 lck_mtx_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
2192 #else /* __i386__ || __x86_64__ */
2193 lck_spin_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
2194 #endif /* __i386__ || __x86_64__ */
2196 kernel_memory_populate(kernel_map
, (vm_offset_t
)(c_seg
->c_store
.c_buffer
), 3 * PAGE_SIZE
, KMA_COMPRESSOR
);
2198 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(3 * PAGE_SIZE
);
2199 c_seg
->c_firstemptyslot
= C_SLOT_MAX
;
2200 c_seg
->c_mysegno
= c_segno
;
2201 c_seg
->c_filling
= 1;
2203 lck_mtx_lock_spin_always(c_list_lock
);
2206 c_segments
[c_segno
].c_seg
= c_seg
;
2208 c_seg
->c_generation_id
= c_generation_id
++;
2210 queue_enter(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
2211 c_seg
->c_on_age_q
= 1;
2214 lck_mtx_unlock_always(c_list_lock
);
2216 clock_get_system_nanotime(&sec
, &nsec
);
2217 c_seg
->c_creation_ts
= (uint32_t)sec
;
2219 *current_chead
= c_seg
;
2221 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, c_seg
, 0, 0, 3, 0);
2223 slotarray
= C_SEG_SLOTARRAY_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
2225 if (c_seg
->c_slots
[slotarray
] == 0) {
2226 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2228 c_seg
->c_slots
[slotarray
] = (struct c_slot
*)kalloc(sizeof(struct c_slot
) * C_SEG_SLOT_ARRAY_SIZE
);
2230 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
2233 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2235 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2243 c_current_seg_filled(c_segment_t c_seg
, c_segment_t
*current_chead
)
2245 uint32_t unused_bytes
;
2246 uint32_t offset_to_depopulate
;
2248 unused_bytes
= trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
));
2252 offset_to_depopulate
= C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_nextoffset
)));
2255 * release the extra physical page(s) at the end of the segment
2257 lck_mtx_unlock_always(&c_seg
->c_lock
);
2259 kernel_memory_depopulate(
2261 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[offset_to_depopulate
],
2265 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2267 c_seg
->c_populated_offset
= offset_to_depopulate
;
2269 c_seg
->c_filling
= 0;
2271 if (C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
)
2272 c_seg_need_delayed_compaction(c_seg
);
2274 lck_mtx_unlock_always(&c_seg
->c_lock
);
2276 *current_chead
= NULL
;
2281 * returns with c_seg locked
2284 c_seg_swapin_requeue(c_segment_t c_seg
)
2289 clock_get_system_nanotime(&sec
, &nsec
);
2291 lck_mtx_lock_spin_always(c_list_lock
);
2292 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2294 if (c_seg
->c_on_swappedout_q
) {
2295 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2296 c_seg
->c_on_swappedout_q
= 0;
2297 c_swappedout_count
--;
2299 assert(c_seg
->c_on_swappedout_sparse_q
);
2301 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
2302 c_seg
->c_on_swappedout_sparse_q
= 0;
2303 c_swappedout_sparse_count
--;
2305 if (c_seg
->c_store
.c_buffer
) {
2306 queue_enter(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
2307 c_seg
->c_on_swappedin_q
= 1;
2308 c_swappedin_count
++;
2310 #if TRACK_BAD_C_SEGMENTS
2312 queue_enter(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
2313 c_seg
->c_on_bad_q
= 1;
2317 c_seg
->c_swappedin_ts
= (uint32_t)sec
;
2318 c_seg
->c_ondisk
= 0;
2319 c_seg
->c_was_swapped_in
= 1;
2321 lck_mtx_unlock_always(c_list_lock
);
2327 * c_seg has to be locked and is returned locked.
2328 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
2332 c_seg_swapin(c_segment_t c_seg
, boolean_t force_minor_compaction
)
2334 vm_offset_t addr
= 0;
2335 uint32_t io_size
= 0;
2338 #if !CHECKSUM_THE_SWAP
2339 if (c_seg
->c_ondisk
)
2340 c_seg_trim_tail(c_seg
);
2342 io_size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
2343 f_offset
= c_seg
->c_store
.c_swap_handle
;
2346 lck_mtx_unlock_always(&c_seg
->c_lock
);
2348 if (c_seg
->c_ondisk
) {
2350 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2352 if (kernel_memory_allocate(kernel_map
, &addr
, C_SEG_ALLOCSIZE
, 0, KMA_COMPRESSOR
| KMA_VAONLY
) != KERN_SUCCESS
)
2353 panic("c_seg_swapin: kernel_memory_allocate failed\n");
2355 kernel_memory_populate(kernel_map
, addr
, io_size
, KMA_COMPRESSOR
);
2357 if (vm_swap_get(addr
, f_offset
, io_size
) != KERN_SUCCESS
) {
2358 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2360 kernel_memory_depopulate(kernel_map
, addr
, io_size
, KMA_COMPRESSOR
);
2361 kmem_free(kernel_map
, addr
, C_SEG_ALLOCSIZE
);
2363 c_seg
->c_store
.c_buffer
= (int32_t*) NULL
;
2364 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
2366 c_seg
->c_store
.c_buffer
= (int32_t*) addr
;
2368 vm_swap_decrypt(c_seg
);
2369 #endif /* ENCRYPTED_SWAP */
2371 #if CHECKSUM_THE_SWAP
2372 if (c_seg
->cseg_swap_size
!= io_size
)
2373 panic("swapin size doesn't match swapout size");
2375 if (c_seg
->cseg_hash
!= hash_string((char*) c_seg
->c_store
.c_buffer
, (int)io_size
)) {
2376 panic("c_seg_swapin - Swap hash mismatch\n");
2378 #endif /* CHECKSUM_THE_SWAP */
2380 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2382 if (force_minor_compaction
== TRUE
) {
2383 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2385 c_seg_minor_compaction_and_unlock(c_seg
, FALSE
);
2387 OSAddAtomic64(c_seg
->c_bytes_used
, &compressor_bytes_used
);
2388 OSAddAtomic64(C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
2391 c_seg_swapin_requeue(c_seg
);
2393 C_SEG_WAKEUP_DONE(c_seg
);
2398 c_compress_page(char *src
, c_slot_mapping_t slot_ptr
, c_segment_t
*current_chead
, char *scratch_buf
)
2406 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START
, *current_chead
, 0, 0, 0, 0);
2408 if ((c_seg
= c_seg_allocate(current_chead
)) == NULL
)
2411 * returns with c_seg lock held
2412 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)
2414 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
2416 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(slot_ptr
);
2417 assert(slot_ptr
== (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(cs
));
2419 cs
->c_offset
= c_seg
->c_nextoffset
;
2421 max_csize
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)cs
->c_offset
);
2423 if (max_csize
> PAGE_SIZE
)
2424 max_csize
= PAGE_SIZE
;
2426 if (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
-
2427 c_seg
->c_nextoffset
)
2428 < (unsigned) max_csize
+ PAGE_SIZE
&&
2429 (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
)
2430 < C_SEG_ALLOCSIZE
)) {
2431 lck_mtx_unlock_always(&c_seg
->c_lock
);
2433 kernel_memory_populate(kernel_map
,
2434 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
],
2438 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2440 c_seg
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
);
2443 #if CHECKSUM_THE_DATA
2444 cs
->c_hash_data
= hash_string(src
, PAGE_SIZE
);
2447 c_size
= WKdm_compress_new((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
2448 (WK_word
*)(uintptr_t)scratch_buf
, max_csize
- 4);
2449 assert(c_size
<= (max_csize
- 4) && c_size
>= -1);
2453 if (max_csize
< PAGE_SIZE
) {
2454 c_current_seg_filled(c_seg
, current_chead
);
2456 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2462 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
2464 #if CHECKSUM_THE_COMPRESSED_DATA
2465 cs
->c_hash_compressed_data
= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
2467 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
2469 PACK_C_SIZE(cs
, c_size
);
2470 c_seg
->c_bytes_used
+= c_rounded_size
;
2471 c_seg
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
2473 slot_ptr
->s_cindx
= c_seg
->c_nextslot
++;
2474 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2475 slot_ptr
->s_cseg
= c_seg
->c_mysegno
+ 1;
2477 if (c_seg
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg
->c_nextslot
>= C_SLOT_MAX
)
2478 c_current_seg_filled(c_seg
, current_chead
);
2480 lck_mtx_unlock_always(&c_seg
->c_lock
);
2482 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2484 OSAddAtomic64(c_rounded_size
, &compressor_bytes_used
);
2485 OSAddAtomic64(PAGE_SIZE
, &c_segment_input_bytes
);
2486 OSAddAtomic64(c_size
, &c_segment_compressed_bytes
);
2488 OSAddAtomic(1, &c_segment_pages_compressed
);
2489 OSAddAtomic(1, &sample_period_compression_count
);
2491 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END
, *current_chead
, c_size
, c_segment_input_bytes
, c_segment_compressed_bytes
, 0);
2498 c_decompress_page(char *dst
, volatile c_slot_mapping_t slot_ptr
, int flags
, int *zeroslot
)
2506 boolean_t c_seg_has_data
= TRUE
;
2507 boolean_t c_seg_swappedin
= FALSE
;
2508 boolean_t need_unlock
= TRUE
;
2509 boolean_t consider_defragmenting
= FALSE
;
2512 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2516 * if hibernation is enabled, it indicates (via a call
2517 * to 'vm_decompressor_lock' that no further
2518 * decompressions are allowed once it reaches
2519 * the point of flushing all of the currently dirty
2520 * anonymous memory through the compressor and out
2521 * to disk... in this state we allow freeing of compressed
2522 * pages and must honor the C_DONT_BLOCK case
2524 if (dst
&& decompressions_blocked
== TRUE
) {
2525 if (flags
& C_DONT_BLOCK
) {
2527 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2533 * it's safe to atomically assert and block behind the
2534 * lock held in shared mode because "decompressions_blocked" is
2535 * only set and cleared and the thread_wakeup done when the lock
2536 * is held exclusively
2538 assert_wait((event_t
)&decompressions_blocked
, THREAD_UNINT
);
2540 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2542 thread_block(THREAD_CONTINUE_NULL
);
2547 /* s_cseg is actually "segno+1" */
2548 c_seg
= c_segments
[slot_ptr
->s_cseg
- 1].c_seg
;
2550 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2552 if (flags
& C_DONT_BLOCK
) {
2553 if (c_seg
->c_busy
|| (c_seg
->c_ondisk
&& dst
)) {
2561 if (c_seg
->c_busy
) {
2563 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2565 c_seg_wait_on_busy(c_seg
);
2569 c_indx
= slot_ptr
->s_cindx
;
2571 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
2573 c_size
= UNPACK_C_SIZE(cs
);
2575 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
2578 uint32_t age_of_cseg
;
2579 clock_sec_t cur_ts_sec
;
2580 clock_nsec_t cur_ts_nsec
;
2582 if (c_seg
->c_on_swappedout_q
|| c_seg
->c_on_swappedout_sparse_q
) {
2583 if (c_seg
->c_ondisk
)
2584 c_seg_swappedin
= TRUE
;
2585 c_seg_swapin(c_seg
, FALSE
);
2587 if (c_seg
->c_store
.c_buffer
== NULL
) {
2588 c_seg_has_data
= FALSE
;
2589 goto c_seg_invalid_data
;
2591 #if CHECKSUM_THE_COMPRESSED_DATA
2592 if (cs
->c_hash_compressed_data
!= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))
2593 panic("compressed data doesn't match original");
2595 if (c_rounded_size
== PAGE_SIZE
) {
2597 * page wasn't compressible... just copy it out
2599 memcpy(dst
, &c_seg
->c_store
.c_buffer
[cs
->c_offset
], PAGE_SIZE
);
2605 * we're behind the c_seg lock held in spin mode
2606 * which means pre-emption is disabled... therefore
2607 * the following sequence is atomic and safe
2609 my_cpu_no
= cpu_number();
2611 assert(my_cpu_no
< compressor_cpus
);
2613 scratch_buf
= &compressor_scratch_bufs
[my_cpu_no
* WKdm_SCRATCH_BUF_SIZE
];
2614 WKdm_decompress_new((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
2615 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
2618 #if CHECKSUM_THE_DATA
2619 if (cs
->c_hash_data
!= hash_string(dst
, PAGE_SIZE
))
2620 panic("decompressed data doesn't match original");
2622 if (!c_seg
->c_was_swapped_in
) {
2624 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
2626 age_of_cseg
= (uint32_t)cur_ts_sec
- c_seg
->c_creation_ts
;
2628 if (age_of_cseg
< DECOMPRESSION_SAMPLE_MAX_AGE
)
2629 OSAddAtomic(1, &age_of_decompressions_during_sample_period
[age_of_cseg
]);
2631 OSAddAtomic(1, &overage_decompressions_during_sample_period
);
2633 OSAddAtomic(1, &sample_period_decompression_count
);
2636 if (c_seg
->c_store
.c_buffer
== NULL
)
2637 c_seg_has_data
= FALSE
;
2641 if (c_seg_has_data
== TRUE
) {
2642 if (c_seg_swappedin
== TRUE
)
2649 if (flags
& C_KEEP
) {
2653 c_seg
->c_bytes_unused
+= c_rounded_size
;
2654 c_seg
->c_bytes_used
-= c_rounded_size
;
2657 if (c_indx
< c_seg
->c_firstemptyslot
)
2658 c_seg
->c_firstemptyslot
= c_indx
;
2660 OSAddAtomic(-1, &c_segment_pages_compressed
);
2662 if (c_seg_has_data
== TRUE
&& !c_seg
->c_ondisk
) {
2664 * c_ondisk == TRUE can occur when we're doing a
2665 * free of a compressed page (i.e. dst == NULL)
2667 OSAddAtomic64(-c_rounded_size
, &compressor_bytes_used
);
2669 if (!c_seg
->c_filling
) {
2670 if (c_seg
->c_bytes_used
== 0) {
2671 if (!c_seg
->c_ondisk
) {
2672 int pages_populated
;
2674 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
2675 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
2677 if (pages_populated
) {
2678 assert(c_seg
->c_store
.c_buffer
!= NULL
);
2681 lck_mtx_unlock_always(&c_seg
->c_lock
);
2683 kernel_memory_depopulate(kernel_map
, (vm_offset_t
) c_seg
->c_store
.c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
2685 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2686 C_SEG_WAKEUP_DONE(c_seg
);
2688 if (!c_seg
->c_on_minorcompact_q
&& !c_seg
->c_on_swapout_q
)
2689 c_seg_need_delayed_compaction(c_seg
);
2691 assert(c_seg
->c_on_swappedout_sparse_q
);
2693 } else if (c_seg
->c_on_minorcompact_q
) {
2695 if (C_SEG_INCORE_IS_SPARSE(c_seg
)) {
2696 c_seg_try_minor_compaction_and_unlock(c_seg
);
2697 need_unlock
= FALSE
;
2699 } else if (!c_seg
->c_ondisk
) {
2701 if (c_seg_has_data
== TRUE
&& !c_seg
->c_on_swapout_q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
2702 c_seg_need_delayed_compaction(c_seg
);
2704 } else if (!c_seg
->c_on_swappedout_sparse_q
&& C_SEG_ONDISK_IS_SPARSE(c_seg
)) {
2706 c_seg_move_to_sparse_list(c_seg
);
2707 consider_defragmenting
= TRUE
;
2711 if (need_unlock
== TRUE
)
2712 lck_mtx_unlock_always(&c_seg
->c_lock
);
2714 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2716 if (consider_defragmenting
== TRUE
)
2717 vm_swap_consider_defragmenting();
2725 vm_compressor_get(ppnum_t pn
, int *slot
, int flags
)
2732 dst
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
2734 #error "unsupported architecture"
2737 retval
= c_decompress_page(dst
, (c_slot_mapping_t
)slot
, flags
, &zeroslot
);
2740 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
2741 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'c_ondisk' set
2747 * returns 0 if we successfully decompressed a page from a segment already in memory
2748 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
2749 * returns -1 if we encountered an error swapping in the segment - decompression failed
2750 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'c_ondisk' set
2757 vm_compressor_free(int *slot
, int flags
)
2762 assert(flags
== 0 || flags
== C_DONT_BLOCK
);
2764 retval
= c_decompress_page(NULL
, (c_slot_mapping_t
)slot
, flags
, &zeroslot
);
2766 * returns 0 if we successfully freed the specified compressed page
2767 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
2778 vm_compressor_put(ppnum_t pn
, int *slot
, void **current_chead
, char *scratch_buf
)
2784 src
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
2786 #error "unsupported architecture"
2788 retval
= c_compress_page(src
, (c_slot_mapping_t
)slot
, (c_segment_t
*)current_chead
, scratch_buf
);
2794 vm_compressor_transfer(
2798 c_slot_mapping_t dst_slot
, src_slot
;
2803 dst_slot
= (c_slot_mapping_t
) dst_slot_p
;
2804 src_slot
= (c_slot_mapping_t
) src_slot_p
;
2807 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2808 /* get segment for src_slot */
2809 c_seg
= c_segments
[src_slot
->s_cseg
-1].c_seg
;
2811 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2812 /* wait if it's busy */
2813 if (c_seg
->c_busy
) {
2814 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2815 c_seg_wait_on_busy(c_seg
);
2818 /* find the c_slot */
2819 c_indx
= src_slot
->s_cindx
;
2820 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
2821 /* point the c_slot back to dst_slot instead of src_slot */
2822 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(dst_slot
);
2824 *dst_slot_p
= *src_slot_p
;
2826 lck_mtx_unlock_always(&c_seg
->c_lock
);
2827 PAGE_REPLACEMENT_DISALLOWED(FALSE
);