2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_compressor.h>
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <mach/mach_host.h> /* for host_info() */
39 #include <kern/ledger.h>
41 #include <default_pager/default_pager_alerts.h>
42 #include <default_pager/default_pager_object_server.h>
44 #include <IOKit/IOHibernatePrivate.h>
47 * vm_compressor_mode has a heirarchy of control to set its value.
48 * boot-args are checked first, then device-tree, and finally
49 * the default value that is defined below. See vm_fault_init() for
50 * the boot-arg & device-tree code.
53 extern ipc_port_t min_pages_trigger_port
;
54 extern lck_mtx_t paging_segments_lock
;
55 #define PSL_LOCK() lck_mtx_lock(&paging_segments_lock)
56 #define PSL_UNLOCK() lck_mtx_unlock(&paging_segments_lock)
59 int vm_compressor_mode
= VM_PAGER_COMPRESSOR_WITH_SWAP
;
63 int vm_compressor_is_active
= 0;
64 int vm_compression_limit
= 0;
66 extern boolean_t vm_swap_up
;
67 extern void vm_pageout_io_throttle(void);
69 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
70 extern unsigned int hash_string(char *cp
, int len
);
75 uint64_t c_offset
:C_SEG_OFFSET_BITS
,
79 unsigned int c_hash_data
;
81 #if CHECKSUM_THE_COMPRESSED_DATA
82 unsigned int c_hash_compressed_data
;
87 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
88 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
91 struct c_slot_mapping
{
92 uint32_t s_cseg
:22, /* segment number + 1 */
93 s_cindx
:10; /* index in the segment */
96 typedef struct c_slot_mapping
*c_slot_mapping_t
;
106 #define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS) >> 2)
107 #define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)
110 uint32_t c_segment_count
= 0;
112 uint64_t c_generation_id
= 0;
113 uint64_t c_generation_id_flush_barrier
;
116 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
118 boolean_t hibernate_no_swapspace
= FALSE
;
119 clock_sec_t hibernate_flushing_deadline
= 0;
122 #if TRACK_BAD_C_SEGMENTS
123 queue_head_t c_bad_list_head
;
124 uint32_t c_bad_count
= 0;
127 queue_head_t c_age_list_head
;
128 queue_head_t c_swapout_list_head
;
129 queue_head_t c_swappedin_list_head
;
130 queue_head_t c_swappedout_list_head
;
131 queue_head_t c_swappedout_sparse_list_head
;
133 uint32_t c_age_count
= 0;
134 uint32_t c_swapout_count
= 0;
135 uint32_t c_swappedin_count
= 0;
136 uint32_t c_swappedout_count
= 0;
137 uint32_t c_swappedout_sparse_count
= 0;
139 queue_head_t c_minor_list_head
;
140 uint32_t c_minor_count
= 0;
142 union c_segu
*c_segments
;
143 caddr_t c_segments_next_page
;
144 boolean_t c_segments_busy
;
145 uint32_t c_segments_available
;
146 uint32_t c_segments_limit
;
147 uint32_t c_segments_nearing_limit
;
148 uint32_t c_segment_pages_compressed
;
149 uint32_t c_segment_pages_compressed_limit
;
150 uint32_t c_segment_pages_compressed_nearing_limit
;
151 uint32_t c_free_segno_head
= (uint32_t)-1;
153 uint32_t vm_compressor_minorcompact_threshold_divisor
= 10;
154 uint32_t vm_compressor_majorcompact_threshold_divisor
= 10;
155 uint32_t vm_compressor_unthrottle_threshold_divisor
= 10;
156 uint32_t vm_compressor_catchup_threshold_divisor
= 10;
158 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
161 lck_grp_attr_t vm_compressor_lck_grp_attr
;
162 lck_attr_t vm_compressor_lck_attr
;
163 lck_grp_t vm_compressor_lck_grp
;
166 #if __i386__ || __x86_64__
167 lck_mtx_t
*c_list_lock
;
168 #else /* __i386__ || __x86_64__ */
169 lck_spin_t
*c_list_lock
;
170 #endif /* __i386__ || __x86_64__ */
172 lck_rw_t c_master_lock
;
173 boolean_t decompressions_blocked
= FALSE
;
175 zone_t compressor_segment_zone
;
176 int c_compressor_swap_trigger
= 0;
178 uint32_t compressor_cpus
;
179 char *compressor_scratch_bufs
;
182 clock_sec_t start_of_sample_period_sec
= 0;
183 clock_nsec_t start_of_sample_period_nsec
= 0;
184 clock_sec_t start_of_eval_period_sec
= 0;
185 clock_nsec_t start_of_eval_period_nsec
= 0;
186 uint32_t sample_period_decompression_count
= 0;
187 uint32_t sample_period_compression_count
= 0;
188 uint32_t last_eval_decompression_count
= 0;
189 uint32_t last_eval_compression_count
= 0;
191 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
193 uint32_t swapout_target_age
= 0;
194 uint32_t age_of_decompressions_during_sample_period
[DECOMPRESSION_SAMPLE_MAX_AGE
];
195 uint32_t overage_decompressions_during_sample_period
= 0;
197 void do_fastwake_warmup(void);
198 boolean_t fastwake_warmup
= FALSE
;
199 boolean_t fastwake_recording_in_progress
= FALSE
;
200 clock_sec_t dont_trim_until_ts
= 0;
202 uint64_t c_segment_warmup_count
;
203 uint64_t first_c_segment_to_warm_generation_id
= 0;
204 uint64_t last_c_segment_to_warm_generation_id
= 0;
205 boolean_t hibernate_flushing
= FALSE
;
207 int64_t c_segment_input_bytes
__attribute__((aligned(8))) = 0;
208 int64_t c_segment_compressed_bytes
__attribute__((aligned(8))) = 0;
209 int64_t compressor_bytes_used
__attribute__((aligned(8))) = 0;
210 uint64_t compressor_kvspace_used
__attribute__((aligned(8))) = 0;
211 uint64_t compressor_kvwaste_limit
= 0;
213 static boolean_t
compressor_needs_to_swap(void);
214 static void vm_compressor_swap_trigger_thread(void);
215 static void vm_compressor_do_delayed_compactions(boolean_t
);
216 static void vm_compressor_compact_and_swap(boolean_t
);
217 static void vm_compressor_age_swapped_in_segments(boolean_t
);
219 boolean_t
vm_compressor_low_on_space(void);
221 void compute_swapout_target_age(void);
223 boolean_t
c_seg_major_compact(c_segment_t
, c_segment_t
);
224 boolean_t
c_seg_major_compact_ok(c_segment_t
, c_segment_t
);
226 int c_seg_minor_compaction_and_unlock(c_segment_t
, boolean_t
);
227 int c_seg_do_minor_compaction_and_unlock(c_segment_t
, boolean_t
, boolean_t
, boolean_t
);
228 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
);
229 void c_seg_need_delayed_compaction(c_segment_t
);
231 void c_seg_move_to_sparse_list(c_segment_t
);
232 void c_seg_insert_into_q(queue_head_t
*, c_segment_t
);
234 boolean_t
c_seg_try_free(c_segment_t
);
235 void c_seg_free(c_segment_t
);
236 void c_seg_free_locked(c_segment_t
);
239 uint64_t vm_available_memory(void);
240 uint64_t vm_compressor_pages_compressed(void);
242 extern unsigned int dp_pages_free
, dp_pages_reserve
;
245 vm_available_memory(void)
247 return (((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE_64
);
252 vm_compressor_pages_compressed(void)
254 return (c_segment_pages_compressed
* PAGE_SIZE_64
);
259 vm_compression_available(void)
261 if ( !(COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
))
264 if (c_segments_available
>= c_segments_limit
|| c_segment_pages_compressed
>= c_segment_pages_compressed_limit
)
272 vm_compressor_low_on_space(void)
274 if ((c_segment_pages_compressed
> c_segment_pages_compressed_nearing_limit
) ||
275 (c_segment_count
> c_segments_nearing_limit
))
283 vm_wants_task_throttled(task_t task
)
285 if (task
== kernel_task
)
288 if (vm_compressor_mode
== COMPRESSED_PAGER_IS_ACTIVE
|| vm_compressor_mode
== DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
289 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
290 (unsigned int)pmap_compressed(task
->map
->pmap
) > (c_segment_pages_compressed
/ 4))
293 if (((dp_pages_free
+ dp_pages_reserve
< 2000) && VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) &&
294 get_task_resident_size(task
) > (((AVAILABLE_NON_COMPRESSED_MEMORY
) * PAGE_SIZE
) / 5))
302 vm_compressor_init_locks(void)
304 lck_grp_attr_setdefault(&vm_compressor_lck_grp_attr
);
305 lck_grp_init(&vm_compressor_lck_grp
, "vm_compressor", &vm_compressor_lck_grp_attr
);
306 lck_attr_setdefault(&vm_compressor_lck_attr
);
308 lck_rw_init(&c_master_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
313 vm_decompressor_lock(void)
315 PAGE_REPLACEMENT_ALLOWED(TRUE
);
317 decompressions_blocked
= TRUE
;
319 PAGE_REPLACEMENT_ALLOWED(FALSE
);
323 vm_decompressor_unlock(void)
325 PAGE_REPLACEMENT_ALLOWED(TRUE
);
327 decompressions_blocked
= FALSE
;
329 PAGE_REPLACEMENT_ALLOWED(FALSE
);
331 thread_wakeup((event_t
)&decompressions_blocked
);
337 vm_compressor_init(void)
340 struct c_slot cs_dummy
;
341 c_slot_t cs
= &cs_dummy
;
344 * ensure that any pointer that gets created from
345 * the vm_page zone can be packed properly
347 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_min_address
);
349 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_min_address
)
350 panic("C_SLOT_UNPACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address
);
352 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(zone_map_max_address
);
354 if (C_SLOT_UNPACK_PTR(cs
) != (uintptr_t)zone_map_max_address
)
355 panic("C_SLOT_UNPACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address
);
358 assert((C_SEGMENTS_PER_PAGE
* sizeof(union c_segu
)) == PAGE_SIZE
);
360 PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit
, sizeof (vm_compression_limit
));
362 if (max_mem
<= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
363 vm_compressor_minorcompact_threshold_divisor
= 11;
364 vm_compressor_majorcompact_threshold_divisor
= 13;
365 vm_compressor_unthrottle_threshold_divisor
= 20;
366 vm_compressor_catchup_threshold_divisor
= 35;
368 vm_compressor_minorcompact_threshold_divisor
= 20;
369 vm_compressor_majorcompact_threshold_divisor
= 25;
370 vm_compressor_unthrottle_threshold_divisor
= 35;
371 vm_compressor_catchup_threshold_divisor
= 50;
374 * vm_page_init_lck_grp is now responsible for calling vm_compressor_init_locks
375 * c_master_lock needs to be available early so that "vm_page_find_contiguous" can
376 * use PAGE_REPLACEMENT_ALLOWED to coordinate with the compressor.
379 #if __i386__ || __x86_64__
380 c_list_lock
= lck_mtx_alloc_init(&vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
381 #else /* __i386__ || __x86_64__ */
382 c_list_lock
= lck_spin_alloc_init(&vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
383 #endif /* __i386__ || __x86_64__ */
385 #if TRACK_BAD_C_SEGMENTS
386 queue_init(&c_bad_list_head
);
388 queue_init(&c_age_list_head
);
389 queue_init(&c_minor_list_head
);
390 queue_init(&c_swapout_list_head
);
391 queue_init(&c_swappedin_list_head
);
392 queue_init(&c_swappedout_list_head
);
393 queue_init(&c_swappedout_sparse_list_head
);
395 compressor_segment_zone
= zinit(sizeof (struct c_segment
),
396 128000 * sizeof (struct c_segment
),
397 8192, "compressor_segment");
398 zone_change(compressor_segment_zone
, Z_CALLERACCT
, FALSE
);
399 zone_change(compressor_segment_zone
, Z_NOENCRYPT
, TRUE
);
402 c_free_segno_head
= -1;
403 c_segments_available
= 0;
405 if (vm_compression_limit
== 0) {
406 c_segment_pages_compressed_limit
= (uint32_t)((max_mem
/ PAGE_SIZE
)) * vm_scale
;
408 #define OLD_SWAP_LIMIT (1024 * 1024 * 16)
409 #define MAX_SWAP_LIMIT (1024 * 1024 * 128)
411 if (c_segment_pages_compressed_limit
> (OLD_SWAP_LIMIT
))
412 c_segment_pages_compressed_limit
= OLD_SWAP_LIMIT
;
414 if (c_segment_pages_compressed_limit
< (uint32_t)(max_mem
/ PAGE_SIZE_64
))
415 c_segment_pages_compressed_limit
= (uint32_t)(max_mem
/ PAGE_SIZE_64
);
417 if (vm_compression_limit
< MAX_SWAP_LIMIT
)
418 c_segment_pages_compressed_limit
= vm_compression_limit
;
420 c_segment_pages_compressed_limit
= MAX_SWAP_LIMIT
;
422 if ((c_segments_limit
= c_segment_pages_compressed_limit
/ (C_SEG_BUFSIZE
/ PAGE_SIZE
)) > C_SEG_MAX_LIMIT
)
423 c_segments_limit
= C_SEG_MAX_LIMIT
;
425 c_segment_pages_compressed_nearing_limit
= (c_segment_pages_compressed_limit
* 98) / 100;
426 c_segments_nearing_limit
= (c_segments_limit
* 98) / 100;
428 compressor_kvwaste_limit
= (vm_map_max(kernel_map
) - vm_map_min(kernel_map
)) / 16;
430 c_segments_busy
= FALSE
;
432 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)(&c_segments
), (sizeof(union c_segu
) * c_segments_limit
), 0, KMA_KOBJECT
| KMA_VAONLY
) != KERN_SUCCESS
)
433 panic("vm_compressor_init: kernel_memory_allocate failed\n");
435 c_segments_next_page
= (caddr_t
)c_segments
;
438 host_basic_info_data_t hinfo
;
439 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
442 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
444 compressor_cpus
= hinfo
.max_cpus
;
446 compressor_scratch_bufs
= kalloc(compressor_cpus
* WKdm_SCRATCH_BUF_SIZE
);
449 if (kernel_thread_start_priority((thread_continue_t
)vm_compressor_swap_trigger_thread
, NULL
,
450 BASEPRI_PREEMPT
- 1, &thread
) != KERN_SUCCESS
) {
451 panic("vm_compressor_swap_trigger_thread: create failed");
453 thread
->options
|= TH_OPT_VMPRIV
;
455 thread_deallocate(thread
);
457 assert(default_pager_init_flag
== 0);
459 if (vm_pageout_internal_start() != KERN_SUCCESS
) {
460 panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
463 if ((vm_compressor_mode
== VM_PAGER_COMPRESSOR_WITH_SWAP
) ||
464 (vm_compressor_mode
== VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP
)) {
465 vm_compressor_swap_init();
468 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED
)
469 vm_compressor_is_active
= 1;
472 memorystatus_freeze_enabled
= TRUE
;
473 #endif /* CONFIG_FREEZE */
475 default_pager_init_flag
= 1;
477 vm_page_reactivate_all_throttled();
481 #if VALIDATE_C_SEGMENTS
484 c_seg_validate(c_segment_t c_seg
, boolean_t must_be_compact
)
488 int32_t bytes_unused
;
489 uint32_t c_rounded_size
;
493 if (c_seg
->c_firstemptyslot
< c_seg
->c_nextslot
) {
494 c_indx
= c_seg
->c_firstemptyslot
;
495 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
498 panic("c_seg_validate: no slot backing c_firstemptyslot");
501 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs
->c_size
);
506 for (c_indx
= 0; c_indx
< c_seg
->c_nextslot
; c_indx
++) {
508 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
510 c_size
= UNPACK_C_SIZE(cs
);
512 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
514 bytes_used
+= c_rounded_size
;
516 #if CHECKSUM_THE_COMPRESSED_DATA
517 if (c_size
&& cs
->c_hash_compressed_data
!= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))
518 panic("compressed data doesn't match original");
522 if (bytes_used
!= c_seg
->c_bytes_used
)
523 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used
, c_seg
->c_bytes_used
);
525 if (c_seg
->c_bytes_used
> C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
526 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
527 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
529 if (must_be_compact
) {
530 if (c_seg
->c_bytes_used
!= C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
))
531 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
532 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg
->c_nextoffset
), c_seg
->c_bytes_used
);
540 c_seg_need_delayed_compaction(c_segment_t c_seg
)
542 boolean_t clear_busy
= FALSE
;
544 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
547 lck_mtx_unlock_always(&c_seg
->c_lock
);
548 lck_mtx_lock_spin_always(c_list_lock
);
549 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
553 if (!c_seg
->c_on_minorcompact_q
&& !c_seg
->c_ondisk
&& !c_seg
->c_on_swapout_q
) {
554 queue_enter(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
555 c_seg
->c_on_minorcompact_q
= 1;
558 lck_mtx_unlock_always(c_list_lock
);
560 if (clear_busy
== TRUE
)
561 C_SEG_WAKEUP_DONE(c_seg
);
565 unsigned int c_seg_moved_to_sparse_list
= 0;
568 c_seg_move_to_sparse_list(c_segment_t c_seg
)
570 boolean_t clear_busy
= FALSE
;
572 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
575 lck_mtx_unlock_always(&c_seg
->c_lock
);
576 lck_mtx_lock_spin_always(c_list_lock
);
577 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
581 assert(c_seg
->c_ondisk
);
582 assert(c_seg
->c_on_swappedout_q
);
583 assert(!c_seg
->c_on_swappedout_sparse_q
);
585 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
586 c_seg
->c_on_swappedout_q
= 0;
587 c_swappedout_count
--;
589 c_seg_insert_into_q(&c_swappedout_sparse_list_head
, c_seg
);
590 c_seg
->c_on_swappedout_sparse_q
= 1;
591 c_swappedout_sparse_count
++;
593 c_seg_moved_to_sparse_list
++;
595 lck_mtx_unlock_always(c_list_lock
);
597 if (clear_busy
== TRUE
)
598 C_SEG_WAKEUP_DONE(c_seg
);
603 c_seg_insert_into_q(queue_head_t
*qhead
, c_segment_t c_seg
)
605 c_segment_t c_seg_next
;
607 if (queue_empty(qhead
)) {
608 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
610 c_seg_next
= (c_segment_t
)queue_first(qhead
);
614 if (c_seg
->c_generation_id
< c_seg_next
->c_generation_id
) {
615 queue_insert_before(qhead
, c_seg
, c_seg_next
, c_segment_t
, c_age_list
);
618 c_seg_next
= (c_segment_t
) queue_next(&c_seg_next
->c_age_list
);
620 if (queue_end(qhead
, (queue_entry_t
) c_seg_next
)) {
621 queue_enter(qhead
, c_seg
, c_segment_t
, c_age_list
);
629 int try_minor_compaction_failed
= 0;
630 int try_minor_compaction_succeeded
= 0;
633 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg
)
636 assert(c_seg
->c_on_minorcompact_q
);
638 * c_seg is currently on the delayed minor compaction
639 * queue and we have c_seg locked... if we can get the
640 * c_list_lock w/o blocking (if we blocked we could deadlock
641 * because the lock order is c_list_lock then c_seg's lock)
642 * we'll pull it from the delayed list and free it directly
644 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
646 * c_list_lock is held, we need to bail
648 try_minor_compaction_failed
++;
650 lck_mtx_unlock_always(&c_seg
->c_lock
);
652 try_minor_compaction_succeeded
++;
655 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, FALSE
);
661 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
, boolean_t need_list_lock
, boolean_t disallow_page_replacement
)
665 assert(c_seg
->c_busy
);
667 if (!c_seg
->c_on_minorcompact_q
) {
668 if (clear_busy
== TRUE
)
669 C_SEG_WAKEUP_DONE(c_seg
);
671 lck_mtx_unlock_always(&c_seg
->c_lock
);
675 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
676 c_seg
->c_on_minorcompact_q
= 0;
679 lck_mtx_unlock_always(c_list_lock
);
681 if (disallow_page_replacement
== TRUE
) {
682 lck_mtx_unlock_always(&c_seg
->c_lock
);
684 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
686 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
688 c_seg_freed
= c_seg_minor_compaction_and_unlock(c_seg
, clear_busy
);
690 if (disallow_page_replacement
== TRUE
)
691 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
693 if (need_list_lock
== TRUE
)
694 lck_mtx_lock_spin_always(c_list_lock
);
696 return (c_seg_freed
);
701 c_seg_wait_on_busy(c_segment_t c_seg
)
704 assert_wait((event_t
) (c_seg
), THREAD_UNINT
);
706 lck_mtx_unlock_always(&c_seg
->c_lock
);
707 thread_block(THREAD_CONTINUE_NULL
);
712 int try_free_succeeded
= 0;
713 int try_free_failed
= 0;
716 c_seg_try_free(c_segment_t c_seg
)
719 * c_seg is currently on the delayed minor compaction
720 * or the spapped out sparse queue and we have c_seg locked...
721 * if we can get the c_list_lock w/o blocking (if we blocked we
722 * could deadlock because the lock order is c_list_lock then c_seg's lock)
723 * we'll pull it from the appropriate queue and free it
725 if ( !lck_mtx_try_lock_spin_always(c_list_lock
)) {
727 * c_list_lock is held, we need to bail
732 if (c_seg
->c_on_minorcompact_q
) {
733 queue_remove(&c_minor_list_head
, c_seg
, c_segment_t
, c_list
);
734 c_seg
->c_on_minorcompact_q
= 0;
737 assert(c_seg
->c_on_swappedout_sparse_q
);
740 * c_seg_free_locked will remove it from the swappedout sparse list
743 if (!c_seg
->c_busy_swapping
)
746 c_seg_free_locked(c_seg
);
748 try_free_succeeded
++;
755 c_seg_free(c_segment_t c_seg
)
757 assert(c_seg
->c_busy
);
759 lck_mtx_unlock_always(&c_seg
->c_lock
);
760 lck_mtx_lock_spin_always(c_list_lock
);
761 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
763 c_seg_free_locked(c_seg
);
768 c_seg_free_locked(c_segment_t c_seg
)
771 int pages_populated
= 0;
772 int32_t *c_buffer
= NULL
;
773 uint64_t c_swap_handle
= 0;
775 assert(!c_seg
->c_on_minorcompact_q
);
777 if (c_seg
->c_on_age_q
) {
778 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
779 c_seg
->c_on_age_q
= 0;
781 } else if (c_seg
->c_on_swappedin_q
) {
782 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
783 c_seg
->c_on_swappedin_q
= 0;
785 } else if (c_seg
->c_on_swapout_q
) {
786 queue_remove(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
787 c_seg
->c_on_swapout_q
= 0;
789 thread_wakeup((event_t
)&compaction_swapper_running
);
790 } else if (c_seg
->c_on_swappedout_q
) {
791 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
792 c_seg
->c_on_swappedout_q
= 0;
793 c_swappedout_count
--;
794 } else if (c_seg
->c_on_swappedout_sparse_q
) {
795 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
796 c_seg
->c_on_swappedout_sparse_q
= 0;
797 c_swappedout_sparse_count
--;
799 #if TRACK_BAD_C_SEGMENTS
800 else if (c_seg
->c_on_bad_q
) {
801 queue_remove(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
802 c_seg
->c_on_bad_q
= 0;
806 segno
= c_seg
->c_mysegno
;
807 c_segments
[segno
].c_segno
= c_free_segno_head
;
808 c_free_segno_head
= segno
;
811 lck_mtx_unlock_always(c_list_lock
);
813 if (c_seg
->c_wanted
) {
814 thread_wakeup((event_t
) (c_seg
));
817 if (c_seg
->c_busy_swapping
) {
818 c_seg
->c_must_free
= 1;
820 lck_mtx_unlock_always(&c_seg
->c_lock
);
823 if (c_seg
->c_ondisk
== 0) {
824 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
826 c_buffer
= c_seg
->c_store
.c_buffer
;
827 c_seg
->c_store
.c_buffer
= NULL
;
830 * Free swap space on disk.
832 c_swap_handle
= c_seg
->c_store
.c_swap_handle
;
833 c_seg
->c_store
.c_swap_handle
= (uint64_t)-1;
835 lck_mtx_unlock_always(&c_seg
->c_lock
);
839 kernel_memory_depopulate(kernel_map
, (vm_offset_t
) c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
841 kmem_free(kernel_map
, (vm_offset_t
) c_buffer
, C_SEG_ALLOCSIZE
);
842 OSAddAtomic64(-C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
844 } else if (c_swap_handle
)
845 vm_swap_free(c_swap_handle
);
848 #if __i386__ || __x86_64__
849 lck_mtx_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
850 #else /* __i386__ || __x86_64__ */
851 lck_spin_destroy(&c_seg
->c_lock
, &vm_compressor_lck_grp
);
852 #endif /* __i386__ || __x86_64__ */
854 for (i
= 0; i
< C_SEG_SLOT_ARRAYS
; i
++) {
855 if (c_seg
->c_slots
[i
] == 0)
858 kfree((char *)c_seg
->c_slots
[i
], sizeof(struct c_slot
) * C_SEG_SLOT_ARRAY_SIZE
);
860 zfree(compressor_segment_zone
, c_seg
);
864 int c_seg_trim_page_count
= 0;
867 c_seg_trim_tail(c_segment_t c_seg
)
872 uint32_t c_rounded_size
;
873 uint16_t current_nextslot
;
874 uint32_t current_populated_offset
;
876 if (c_seg
->c_bytes_used
== 0)
878 current_nextslot
= c_seg
->c_nextslot
;
879 current_populated_offset
= c_seg
->c_populated_offset
;
881 while (c_seg
->c_nextslot
) {
883 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, (c_seg
->c_nextslot
- 1));
885 c_size
= UNPACK_C_SIZE(cs
);
888 if (current_nextslot
!= c_seg
->c_nextslot
) {
889 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
890 c_offset
= cs
->c_offset
+ C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
892 c_seg
->c_nextoffset
= c_offset
;
893 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
895 if (c_seg
->c_firstemptyslot
> c_seg
->c_nextslot
)
896 c_seg
->c_firstemptyslot
= c_seg
->c_nextslot
;
898 c_seg_trim_page_count
+= ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset
)) -
899 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
);
905 assert(c_seg
->c_nextslot
);
910 c_seg_minor_compaction_and_unlock(c_segment_t c_seg
, boolean_t clear_busy
)
912 c_slot_mapping_t slot_ptr
;
913 uint32_t c_offset
= 0;
914 uint32_t old_populated_offset
;
915 uint32_t c_rounded_size
;
921 boolean_t need_unlock
= TRUE
;
923 assert(c_seg
->c_busy
);
925 #if VALIDATE_C_SEGMENTS
926 c_seg_validate(c_seg
, FALSE
);
928 if (c_seg
->c_bytes_used
== 0) {
932 if (c_seg
->c_firstemptyslot
>= c_seg
->c_nextslot
|| C_SEG_UNUSED_BYTES(c_seg
) < PAGE_SIZE
)
935 #if VALIDATE_C_SEGMENTS
936 c_seg
->c_was_minor_compacted
++;
938 c_indx
= c_seg
->c_firstemptyslot
;
939 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
941 old_populated_offset
= c_seg
->c_populated_offset
;
942 c_offset
= c_dst
->c_offset
;
944 for (i
= c_indx
+ 1; i
< c_seg
->c_nextslot
&& c_offset
< c_seg
->c_nextoffset
; i
++) {
946 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg
, i
);
948 c_size
= UNPACK_C_SIZE(c_src
);
953 memcpy(&c_seg
->c_store
.c_buffer
[c_offset
], &c_seg
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
955 #if CHECKSUM_THE_DATA
956 c_dst
->c_hash_data
= c_src
->c_hash_data
;
958 #if CHECKSUM_THE_COMPRESSED_DATA
959 c_dst
->c_hash_compressed_data
= c_src
->c_hash_compressed_data
;
961 c_dst
->c_size
= c_src
->c_size
;
962 c_dst
->c_packed_ptr
= c_src
->c_packed_ptr
;
963 c_dst
->c_offset
= c_offset
;
965 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
966 slot_ptr
->s_cindx
= c_indx
;
968 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
970 c_offset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
971 PACK_C_SIZE(c_src
, 0);
974 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
976 c_seg
->c_firstemptyslot
= c_indx
;
977 c_seg
->c_nextslot
= c_indx
;
978 c_seg
->c_nextoffset
= c_offset
;
979 c_seg
->c_populated_offset
= (c_offset
+ (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
) - 1);
980 c_seg
->c_bytes_unused
= 0;
982 #if VALIDATE_C_SEGMENTS
983 c_seg_validate(c_seg
, TRUE
);
986 if (old_populated_offset
> c_seg
->c_populated_offset
) {
990 gc_size
= C_SEG_OFFSET_TO_BYTES(old_populated_offset
- c_seg
->c_populated_offset
);
991 gc_ptr
= &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
];
993 lck_mtx_unlock_always(&c_seg
->c_lock
);
995 kernel_memory_depopulate(kernel_map
, (vm_offset_t
)gc_ptr
, gc_size
, KMA_COMPRESSOR
);
997 if (clear_busy
== TRUE
)
998 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1000 need_unlock
= FALSE
;
1003 if (need_unlock
== TRUE
) {
1004 if (clear_busy
== TRUE
)
1005 C_SEG_WAKEUP_DONE(c_seg
);
1007 lck_mtx_unlock_always(&c_seg
->c_lock
);
1015 uint64_t asked_permission
;
1016 uint64_t compactions
;
1017 uint64_t moved_slots
;
1018 uint64_t moved_bytes
;
1019 uint64_t wasted_space_in_swapouts
;
1020 uint64_t count_of_swapouts
;
1021 } c_seg_major_compact_stats
;
1024 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100)
1028 c_seg_major_compact_ok(
1029 c_segment_t c_seg_dst
,
1030 c_segment_t c_seg_src
)
1033 c_seg_major_compact_stats
.asked_permission
++;
1035 if (c_seg_src
->c_filling
) {
1037 * we're at or near the head... don't compact
1041 if (c_seg_src
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
&&
1042 c_seg_dst
->c_bytes_used
>= C_MAJOR_COMPACTION_SIZE_APPROPRIATE
)
1045 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX
) {
1047 * destination segment is full... can't compact
1057 c_seg_major_compact(
1058 c_segment_t c_seg_dst
,
1059 c_segment_t c_seg_src
)
1061 c_slot_mapping_t slot_ptr
;
1062 uint32_t c_rounded_size
;
1069 boolean_t keep_compacting
= TRUE
;
1072 * segments are not locked but they are both marked c_busy
1073 * which keeps c_decompress from working on them...
1074 * we can safely allocate new pages, move compressed data
1075 * from c_seg_src to c_seg_dst and update both c_segment's
1076 * state w/o holding the master lock
1079 #if VALIDATE_C_SEGMENTS
1080 c_seg_dst
->c_was_major_compacted
++;
1081 c_seg_src
->c_was_major_donor
++;
1083 c_seg_major_compact_stats
.compactions
++;
1085 dst_slot
= c_seg_dst
->c_nextslot
;
1087 for (i
= 0; i
< c_seg_src
->c_nextslot
; i
++) {
1089 c_src
= C_SEG_SLOT_FROM_INDEX(c_seg_src
, i
);
1091 c_size
= UNPACK_C_SIZE(c_src
);
1094 /* BATCH: move what we have so far; */
1098 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
- c_seg_dst
->c_nextoffset
) < (unsigned) c_size
) {
1100 if ((C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) == C_SEG_BUFSIZE
)) {
1102 keep_compacting
= FALSE
;
1105 kernel_memory_populate(kernel_map
,
1106 (vm_offset_t
) &c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_populated_offset
],
1110 c_seg_dst
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
);
1111 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst
->c_populated_offset
) <= C_SEG_BUFSIZE
);
1114 slotarray
= C_SEG_SLOTARRAY_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1116 if (c_seg_dst
->c_slots
[slotarray
] == 0) {
1117 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1118 c_seg_dst
->c_slots
[slotarray
] = (struct c_slot
*)
1119 kalloc(sizeof(struct c_slot
) *
1120 C_SEG_SLOT_ARRAY_SIZE
);
1121 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
1123 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, c_seg_dst
->c_nextslot
);
1125 memcpy(&c_seg_dst
->c_store
.c_buffer
[c_seg_dst
->c_nextoffset
], &c_seg_src
->c_store
.c_buffer
[c_src
->c_offset
], c_size
);
1127 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
1129 c_seg_major_compact_stats
.moved_slots
++;
1130 c_seg_major_compact_stats
.moved_bytes
+= c_size
;
1132 #if CHECKSUM_THE_DATA
1133 c_dst
->c_hash_data
= c_src
->c_hash_data
;
1135 #if CHECKSUM_THE_COMPRESSED_DATA
1136 c_dst
->c_hash_compressed_data
= c_src
->c_hash_compressed_data
;
1138 c_dst
->c_size
= c_src
->c_size
;
1139 c_dst
->c_packed_ptr
= c_src
->c_packed_ptr
;
1140 c_dst
->c_offset
= c_seg_dst
->c_nextoffset
;
1142 if (c_seg_dst
->c_firstemptyslot
== c_seg_dst
->c_nextslot
)
1143 c_seg_dst
->c_firstemptyslot
++;
1144 c_seg_dst
->c_nextslot
++;
1145 c_seg_dst
->c_bytes_used
+= c_rounded_size
;
1146 c_seg_dst
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
1148 PACK_C_SIZE(c_src
, 0);
1150 c_seg_src
->c_bytes_used
-= c_rounded_size
;
1151 c_seg_src
->c_bytes_unused
+= c_rounded_size
;
1152 c_seg_src
->c_firstemptyslot
= 0;
1154 if (c_seg_dst
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg_dst
->c_nextslot
>= C_SLOT_MAX
) {
1155 /* dest segment is now full */
1156 keep_compacting
= FALSE
;
1160 if (dst_slot
< c_seg_dst
->c_nextslot
) {
1162 PAGE_REPLACEMENT_ALLOWED(TRUE
);
1164 * we've now locked out c_decompress from
1165 * converting the slot passed into it into
1166 * a c_segment_t which allows us to use
1167 * the backptr to change which c_segment and
1168 * index the slot points to
1170 while (dst_slot
< c_seg_dst
->c_nextslot
) {
1172 c_dst
= C_SEG_SLOT_FROM_INDEX(c_seg_dst
, dst_slot
);
1174 slot_ptr
= (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(c_dst
);
1175 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
1176 slot_ptr
->s_cseg
= c_seg_dst
->c_mysegno
+ 1;
1177 slot_ptr
->s_cindx
= dst_slot
++;
1179 PAGE_REPLACEMENT_ALLOWED(FALSE
);
1181 return (keep_compacting
);
1186 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec
, clock_nsec_t end_nsec
, clock_sec_t start_sec
, clock_nsec_t start_nsec
)
1189 uint64_t start_msecs
;
1191 end_msecs
= (end_sec
* 1000) + end_nsec
/ 1000000;
1192 start_msecs
= (start_sec
* 1000) + start_nsec
/ 1000000;
1194 return (end_msecs
- start_msecs
);
1199 uint32_t compressor_eval_period_in_msecs
= 250;
1200 uint32_t compressor_sample_min_in_msecs
= 500;
1201 uint32_t compressor_sample_max_in_msecs
= 10000;
1202 uint32_t compressor_thrashing_threshold_per_10msecs
= 50;
1203 uint32_t compressor_thrashing_min_per_10msecs
= 20;
1205 /* When true, reset sample data next chance we get. */
1206 static boolean_t compressor_need_sample_reset
= FALSE
;
1208 extern uint32_t vm_page_filecache_min
;
1212 compute_swapout_target_age(void)
1214 clock_sec_t cur_ts_sec
;
1215 clock_nsec_t cur_ts_nsec
;
1216 uint32_t min_operations_needed_in_this_sample
;
1217 uint64_t elapsed_msecs_in_eval
;
1218 uint64_t elapsed_msecs_in_sample
;
1219 boolean_t need_eval_reset
= FALSE
;
1221 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
1223 elapsed_msecs_in_sample
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_sample_period_sec
, start_of_sample_period_nsec
);
1225 if (compressor_need_sample_reset
||
1226 elapsed_msecs_in_sample
>= compressor_sample_max_in_msecs
) {
1227 compressor_need_sample_reset
= TRUE
;
1228 need_eval_reset
= TRUE
;
1231 elapsed_msecs_in_eval
= vm_compressor_compute_elapsed_msecs(cur_ts_sec
, cur_ts_nsec
, start_of_eval_period_sec
, start_of_eval_period_nsec
);
1233 if (elapsed_msecs_in_eval
< compressor_eval_period_in_msecs
)
1235 need_eval_reset
= TRUE
;
1237 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START
, elapsed_msecs_in_eval
, sample_period_compression_count
, sample_period_decompression_count
, 0, 0);
1239 min_operations_needed_in_this_sample
= (compressor_thrashing_min_per_10msecs
* (uint32_t)elapsed_msecs_in_eval
) / 10;
1241 if ((sample_period_compression_count
- last_eval_compression_count
) < min_operations_needed_in_this_sample
||
1242 (sample_period_decompression_count
- last_eval_decompression_count
) < min_operations_needed_in_this_sample
) {
1244 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_compression_count
- last_eval_compression_count
,
1245 sample_period_decompression_count
- last_eval_decompression_count
, 0, 1, 0);
1247 swapout_target_age
= 0;
1249 compressor_need_sample_reset
= TRUE
;
1250 need_eval_reset
= TRUE
;
1253 last_eval_compression_count
= sample_period_compression_count
;
1254 last_eval_decompression_count
= sample_period_decompression_count
;
1256 if (elapsed_msecs_in_sample
< compressor_sample_min_in_msecs
) {
1258 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, 0, 0, 5, 0);
1261 if (sample_period_decompression_count
> ((compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10)) {
1263 uint64_t running_total
;
1264 uint64_t working_target
;
1265 uint64_t aging_target
;
1266 uint32_t oldest_age_of_csegs_sampled
= 0;
1267 uint64_t working_set_approximation
= 0;
1269 swapout_target_age
= 0;
1271 working_target
= (sample_period_decompression_count
/ 100) * 95; /* 95 percent */
1272 aging_target
= (sample_period_decompression_count
/ 100) * 1; /* 1 percent */
1275 for (oldest_age_of_csegs_sampled
= 0; oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
; oldest_age_of_csegs_sampled
++) {
1277 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1279 working_set_approximation
+= oldest_age_of_csegs_sampled
* age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1281 if (running_total
>= working_target
)
1284 if (oldest_age_of_csegs_sampled
< DECOMPRESSION_SAMPLE_MAX_AGE
) {
1286 working_set_approximation
= (working_set_approximation
* 1000) / elapsed_msecs_in_sample
;
1288 if (working_set_approximation
< VM_PAGE_COMPRESSOR_COUNT
) {
1290 running_total
= overage_decompressions_during_sample_period
;
1292 for (oldest_age_of_csegs_sampled
= DECOMPRESSION_SAMPLE_MAX_AGE
- 1; oldest_age_of_csegs_sampled
; oldest_age_of_csegs_sampled
--) {
1293 running_total
+= age_of_decompressions_during_sample_period
[oldest_age_of_csegs_sampled
];
1295 if (running_total
>= aging_target
)
1298 swapout_target_age
= (uint32_t)cur_ts_sec
- oldest_age_of_csegs_sampled
;
1300 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, swapout_target_age
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 2, 0);
1302 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_set_approximation
, VM_PAGE_COMPRESSOR_COUNT
, 0, 3, 0);
1305 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, working_target
, running_total
, 0, 4, 0);
1307 compressor_need_sample_reset
= TRUE
;
1308 need_eval_reset
= TRUE
;
1310 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END
, sample_period_decompression_count
, (compressor_thrashing_threshold_per_10msecs
* elapsed_msecs_in_sample
) / 10, 0, 6, 0);
1312 if (compressor_need_sample_reset
== TRUE
) {
1313 bzero(age_of_decompressions_during_sample_period
, sizeof(age_of_decompressions_during_sample_period
));
1314 overage_decompressions_during_sample_period
= 0;
1316 start_of_sample_period_sec
= cur_ts_sec
;
1317 start_of_sample_period_nsec
= cur_ts_nsec
;
1318 sample_period_decompression_count
= 0;
1319 sample_period_compression_count
= 0;
1320 last_eval_decompression_count
= 0;
1321 last_eval_compression_count
= 0;
1322 compressor_need_sample_reset
= FALSE
;
1324 if (need_eval_reset
== TRUE
) {
1325 start_of_eval_period_sec
= cur_ts_sec
;
1326 start_of_eval_period_nsec
= cur_ts_nsec
;
1331 int compaction_swapper_inited
= 0;
1332 int compaction_swapper_init_now
= 0;
1333 int compaction_swapper_running
= 0;
1334 int compaction_swapper_abort
= 0;
1338 boolean_t
memorystatus_kill_on_VM_thrashing(boolean_t
);
1339 boolean_t
memorystatus_kill_on_FC_thrashing(boolean_t
);
1340 int compressor_thrashing_induced_jetsam
= 0;
1341 int filecache_thrashing_induced_jetsam
= 0;
1342 static boolean_t vm_compressor_thrashing_detected
= FALSE
;
1343 #endif /* CONFIG_JETSAM */
1346 compressor_needs_to_swap(void)
1348 boolean_t should_swap
= FALSE
;
1350 if (vm_swap_up
== TRUE
) {
1351 if (COMPRESSOR_NEEDS_TO_SWAP()) {
1354 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external
) && vm_page_anonymous_count
< (vm_page_inactive_count
/ 20)) {
1357 if (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
))
1360 compute_swapout_target_age();
1362 if (swapout_target_age
) {
1365 lck_mtx_lock_spin_always(c_list_lock
);
1367 if (!queue_empty(&c_age_list_head
)) {
1369 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1371 if (c_seg
->c_creation_ts
> swapout_target_age
)
1372 swapout_target_age
= 0;
1374 lck_mtx_unlock_always(c_list_lock
);
1376 #if CONFIG_PHANTOM_CACHE
1377 if (vm_phantom_cache_check_pressure())
1380 if (swapout_target_age
)
1383 if (vm_swap_up
== FALSE
) {
1387 if (vm_compressor_thrashing_detected
== FALSE
) {
1388 vm_compressor_thrashing_detected
= TRUE
;
1390 if (swapout_target_age
) {
1391 memorystatus_kill_on_VM_thrashing(TRUE
/* async */);
1392 compressor_thrashing_induced_jetsam
++;
1394 memorystatus_kill_on_FC_thrashing(TRUE
/* async */);
1395 filecache_thrashing_induced_jetsam
++;
1398 * let the jetsam take precedence over
1399 * any major compactions we might have
1400 * been able to do... otherwise we run
1401 * the risk of doing major compactions
1402 * on segments we're about to free up
1403 * due to the jetsam activity.
1405 should_swap
= FALSE
;
1407 #endif /* CONFIG_JETSAM */
1409 should_swap
= COMPRESSOR_NEEDS_TO_MAJOR_COMPACT();
1413 * returning TRUE when swap_supported == FALSE
1414 * will cause the major compaction engine to
1415 * run, but will not trigger any swapping...
1416 * segments that have been major compacted
1417 * will be moved to the swapped_out_q
1418 * but will not have the c_ondisk flag set
1420 return (should_swap
);
1425 * This function is called from the jetsam thread after killing something to
1426 * mitigate thrashing.
1428 * We need to restart our thrashing detection heuristics since memory pressure
1429 * has potentially changed significantly, and we don't want to detect on old
1430 * data from before the jetsam.
1433 vm_thrashing_jetsam_done(void)
1435 vm_compressor_thrashing_detected
= FALSE
;
1437 /* Were we compressor-thrashing or filecache-thrashing? */
1438 if (swapout_target_age
) {
1439 swapout_target_age
= 0;
1440 compressor_need_sample_reset
= TRUE
;
1442 #if CONFIG_PHANTOM_CACHE
1444 vm_phantom_cache_restart_sample();
1448 #endif /* CONFIG_JETSAM */
1450 uint32_t vm_wake_compactor_swapper_calls
= 0;
1453 vm_wake_compactor_swapper(void)
1455 boolean_t need_major_compaction
= FALSE
;
1457 if (compaction_swapper_running
)
1460 if (c_minor_count
== 0 && need_major_compaction
== FALSE
)
1463 lck_mtx_lock_spin_always(c_list_lock
);
1465 fastwake_warmup
= FALSE
;
1467 if (compaction_swapper_running
== 0) {
1468 vm_wake_compactor_swapper_calls
++;
1470 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1472 compaction_swapper_running
= 1;
1474 lck_mtx_unlock_always(c_list_lock
);
1479 vm_consider_waking_compactor_swapper(void)
1481 boolean_t need_wakeup
= FALSE
;
1483 if (compaction_swapper_running
)
1486 if (!compaction_swapper_inited
&& !compaction_swapper_init_now
) {
1487 compaction_swapper_init_now
= 1;
1491 if (c_minor_count
&& (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
1495 } else if (compressor_needs_to_swap()) {
1499 } else if (c_minor_count
) {
1500 uint64_t total_bytes
;
1502 total_bytes
= compressor_object
->resident_page_count
* PAGE_SIZE_64
;
1504 if ((total_bytes
- compressor_bytes_used
) > total_bytes
/ 10)
1507 if (need_wakeup
== TRUE
) {
1509 lck_mtx_lock_spin_always(c_list_lock
);
1511 fastwake_warmup
= FALSE
;
1513 if (compaction_swapper_running
== 0) {
1514 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER
, DBG_FUNC_NONE
);
1516 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1518 compaction_swapper_running
= 1;
1520 lck_mtx_unlock_always(c_list_lock
);
1525 #define C_SWAPOUT_LIMIT 4
1526 #define DELAYED_COMPACTIONS_PER_PASS 30
1529 vm_compressor_do_delayed_compactions(boolean_t flush_all
)
1532 int number_compacted
= 0;
1533 boolean_t needs_to_swap
= FALSE
;
1536 lck_mtx_assert(c_list_lock
, LCK_MTX_ASSERT_OWNED
);
1538 while (!queue_empty(&c_minor_list_head
) && needs_to_swap
== FALSE
) {
1540 c_seg
= (c_segment_t
)queue_first(&c_minor_list_head
);
1542 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1544 if (c_seg
->c_busy
) {
1546 lck_mtx_unlock_always(c_list_lock
);
1547 c_seg_wait_on_busy(c_seg
);
1548 lck_mtx_lock_spin_always(c_list_lock
);
1554 c_seg_do_minor_compaction_and_unlock(c_seg
, TRUE
, FALSE
, TRUE
);
1556 if (vm_swap_up
== TRUE
&& (number_compacted
++ > DELAYED_COMPACTIONS_PER_PASS
)) {
1558 if ((flush_all
== TRUE
|| compressor_needs_to_swap() == TRUE
) && c_swapout_count
< C_SWAPOUT_LIMIT
)
1559 needs_to_swap
= TRUE
;
1561 number_compacted
= 0;
1563 lck_mtx_lock_spin_always(c_list_lock
);
1568 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
1571 vm_compressor_age_swapped_in_segments(boolean_t flush_all
)
1577 clock_get_system_nanotime(&now
, &nsec
);
1579 while (!queue_empty(&c_swappedin_list_head
)) {
1581 c_seg
= (c_segment_t
)queue_first(&c_swappedin_list_head
);
1583 if (flush_all
== FALSE
&& (now
- c_seg
->c_swappedin_ts
) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT
)
1586 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1588 queue_remove(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
1589 c_seg
->c_on_swappedin_q
= 0;
1590 c_swappedin_count
--;
1592 c_seg_insert_into_q(&c_age_list_head
, c_seg
);
1593 c_seg
->c_on_age_q
= 1;
1596 lck_mtx_unlock_always(&c_seg
->c_lock
);
1602 vm_compressor_flush(void)
1604 uint64_t vm_swap_put_failures_at_start
;
1605 wait_result_t wait_result
= 0;
1606 AbsoluteTime startTime
, endTime
;
1607 clock_sec_t now_sec
;
1608 clock_nsec_t now_nsec
;
1611 HIBLOG("vm_compressor_flush - starting\n");
1613 clock_get_uptime(&startTime
);
1615 lck_mtx_lock_spin_always(c_list_lock
);
1617 fastwake_warmup
= FALSE
;
1618 compaction_swapper_abort
= 1;
1620 while (compaction_swapper_running
) {
1621 assert_wait((event_t
)&compaction_swapper_running
, THREAD_UNINT
);
1623 lck_mtx_unlock_always(c_list_lock
);
1625 thread_block(THREAD_CONTINUE_NULL
);
1627 lck_mtx_lock_spin_always(c_list_lock
);
1629 compaction_swapper_abort
= 0;
1630 compaction_swapper_running
= 1;
1632 hibernate_flushing
= TRUE
;
1633 hibernate_no_swapspace
= FALSE
;
1634 c_generation_id_flush_barrier
= c_generation_id
+ 1000;
1636 clock_get_system_nanotime(&now_sec
, &now_nsec
);
1637 hibernate_flushing_deadline
= now_sec
+ HIBERNATE_FLUSHING_SECS_TO_COMPLETE
;
1639 vm_swap_put_failures_at_start
= vm_swap_put_failures
;
1641 vm_compressor_compact_and_swap(TRUE
);
1643 while (!queue_empty(&c_swapout_list_head
)) {
1645 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 5000, 1000*NSEC_PER_USEC
);
1647 lck_mtx_unlock_always(c_list_lock
);
1649 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1651 lck_mtx_lock_spin_always(c_list_lock
);
1653 if (wait_result
== THREAD_TIMED_OUT
)
1656 hibernate_flushing
= FALSE
;
1657 compaction_swapper_running
= 0;
1659 if (vm_swap_put_failures
> vm_swap_put_failures_at_start
)
1660 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
1661 vm_swap_put_failures
- vm_swap_put_failures_at_start
, VM_PAGE_COMPRESSOR_COUNT
);
1663 lck_mtx_unlock_always(c_list_lock
);
1665 clock_get_uptime(&endTime
);
1666 SUB_ABSOLUTETIME(&endTime
, &startTime
);
1667 absolutetime_to_nanoseconds(endTime
, &nsec
);
1669 HIBLOG("vm_compressor_flush completed - took %qd msecs\n", nsec
/ 1000000ULL);
1673 extern void vm_swap_file_set_tuneables(void);
1674 int compaction_swap_trigger_thread_awakened
= 0;
1678 vm_compressor_swap_trigger_thread(void)
1681 * compaction_swapper_init_now is set when the first call to
1682 * vm_consider_waking_compactor_swapper is made from
1683 * vm_pageout_scan... since this function is called upon
1684 * thread creation, we want to make sure to delay adjusting
1685 * the tuneables until we are awakened via vm_pageout_scan
1686 * so that we are at a point where the vm_swapfile_open will
1687 * be operating on the correct directory (in case the default
1688 * of /var/vm/ is overridden by the dymanic_pager
1690 if (compaction_swapper_init_now
&& !compaction_swapper_inited
) {
1691 if (vm_compressor_mode
== VM_PAGER_COMPRESSOR_WITH_SWAP
)
1692 vm_swap_file_set_tuneables();
1694 compaction_swapper_inited
= 1;
1696 lck_mtx_lock_spin_always(c_list_lock
);
1698 compaction_swap_trigger_thread_awakened
++;
1700 vm_compressor_compact_and_swap(FALSE
);
1702 assert_wait((event_t
)&c_compressor_swap_trigger
, THREAD_UNINT
);
1704 compaction_swapper_running
= 0;
1705 thread_wakeup((event_t
)&compaction_swapper_running
);
1707 lck_mtx_unlock_always(c_list_lock
);
1709 thread_block((thread_continue_t
)vm_compressor_swap_trigger_thread
);
1716 vm_compressor_record_warmup_start(void)
1720 lck_mtx_lock_spin_always(c_list_lock
);
1722 if (first_c_segment_to_warm_generation_id
== 0) {
1723 if (!queue_empty(&c_age_list_head
)) {
1725 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
1727 first_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
1729 first_c_segment_to_warm_generation_id
= 0;
1731 fastwake_recording_in_progress
= TRUE
;
1733 lck_mtx_unlock_always(c_list_lock
);
1738 vm_compressor_record_warmup_end(void)
1742 lck_mtx_lock_spin_always(c_list_lock
);
1744 if (fastwake_recording_in_progress
== TRUE
) {
1746 if (!queue_empty(&c_age_list_head
)) {
1748 c_seg
= (c_segment_t
)queue_last(&c_age_list_head
);
1750 last_c_segment_to_warm_generation_id
= c_seg
->c_generation_id
;
1752 last_c_segment_to_warm_generation_id
= first_c_segment_to_warm_generation_id
;
1754 fastwake_recording_in_progress
= FALSE
;
1756 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
1758 lck_mtx_unlock_always(c_list_lock
);
1762 #define DELAY_TRIM_ON_WAKE_SECS 4
1765 vm_compressor_delay_trim(void)
1770 clock_get_system_nanotime(&sec
, &nsec
);
1771 dont_trim_until_ts
= sec
+ DELAY_TRIM_ON_WAKE_SECS
;
1776 vm_compressor_do_warmup(void)
1778 lck_mtx_lock_spin_always(c_list_lock
);
1780 if (first_c_segment_to_warm_generation_id
== last_c_segment_to_warm_generation_id
) {
1781 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
1783 lck_mtx_unlock_always(c_list_lock
);
1787 if (compaction_swapper_running
== 0) {
1789 fastwake_warmup
= TRUE
;
1790 compaction_swapper_running
= 1;
1791 thread_wakeup((event_t
)&c_compressor_swap_trigger
);
1793 lck_mtx_unlock_always(c_list_lock
);
1798 do_fastwake_warmup(void)
1800 uint64_t my_thread_id
;
1801 c_segment_t c_seg
= NULL
;
1802 AbsoluteTime startTime
, endTime
;
1806 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
);
1808 clock_get_uptime(&startTime
);
1810 lck_mtx_unlock_always(c_list_lock
);
1812 my_thread_id
= current_thread()->thread_id
;
1813 proc_set_task_policy_thread(kernel_task
, my_thread_id
,
1814 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER2
);
1816 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1818 lck_mtx_lock_spin_always(c_list_lock
);
1820 while (!queue_empty(&c_swappedout_list_head
) && fastwake_warmup
== TRUE
) {
1822 c_seg
= (c_segment_t
) queue_first(&c_swappedout_list_head
);
1824 if (c_seg
->c_generation_id
< first_c_segment_to_warm_generation_id
||
1825 c_seg
->c_generation_id
> last_c_segment_to_warm_generation_id
)
1828 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1829 lck_mtx_unlock_always(c_list_lock
);
1831 if (c_seg
->c_busy
) {
1832 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1833 c_seg_wait_on_busy(c_seg
);
1834 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1836 c_seg_swapin(c_seg
, TRUE
);
1838 lck_mtx_unlock_always(&c_seg
->c_lock
);
1839 c_segment_warmup_count
++;
1841 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1842 vm_pageout_io_throttle();
1843 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
1845 lck_mtx_lock_spin_always(c_list_lock
);
1847 lck_mtx_unlock_always(c_list_lock
);
1849 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
1851 proc_set_task_policy_thread(kernel_task
, my_thread_id
,
1852 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER0
);
1854 clock_get_uptime(&endTime
);
1855 SUB_ABSOLUTETIME(&endTime
, &startTime
);
1856 absolutetime_to_nanoseconds(endTime
, &nsec
);
1858 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec
/ 1000000ULL);
1860 lck_mtx_lock_spin_always(c_list_lock
);
1862 first_c_segment_to_warm_generation_id
= last_c_segment_to_warm_generation_id
= 0;
1867 vm_compressor_compact_and_swap(boolean_t flush_all
)
1869 c_segment_t c_seg
, c_seg_next
;
1870 boolean_t keep_compacting
;
1873 if (fastwake_warmup
== TRUE
) {
1874 uint64_t starting_warmup_count
;
1876 starting_warmup_count
= c_segment_warmup_count
;
1878 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_START
, c_segment_warmup_count
,
1879 first_c_segment_to_warm_generation_id
, last_c_segment_to_warm_generation_id
, 0, 0);
1880 do_fastwake_warmup();
1881 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE
, 11) | DBG_FUNC_END
, c_segment_warmup_count
, c_segment_warmup_count
- starting_warmup_count
, 0, 0, 0);
1883 fastwake_warmup
= FALSE
;
1887 * it's possible for the c_age_list_head to be empty if we
1888 * hit our limits for growing the compressor pool and we subsequently
1889 * hibernated... on the next hibernation we could see the queue as
1890 * empty and not proceeed even though we have a bunch of segments on
1891 * the swapped in queue that need to be dealt with.
1893 vm_compressor_do_delayed_compactions(flush_all
);
1895 vm_compressor_age_swapped_in_segments(flush_all
);
1898 while (!queue_empty(&c_age_list_head
) && compaction_swapper_abort
== 0) {
1900 if (hibernate_flushing
== TRUE
) {
1904 if (hibernate_should_abort()) {
1905 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
1908 if (hibernate_no_swapspace
== TRUE
) {
1909 HIBLOG("vm_compressor_flush - out of swap space\n");
1912 clock_get_system_nanotime(&sec
, &nsec
);
1914 if (sec
> hibernate_flushing_deadline
) {
1915 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
1919 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
1921 assert_wait_timeout((event_t
) &compaction_swapper_running
, THREAD_INTERRUPTIBLE
, 100, 1000*NSEC_PER_USEC
);
1923 lck_mtx_unlock_always(c_list_lock
);
1925 thread_block(THREAD_CONTINUE_NULL
);
1927 lck_mtx_lock_spin_always(c_list_lock
);
1932 vm_compressor_do_delayed_compactions(flush_all
);
1934 vm_compressor_age_swapped_in_segments(flush_all
);
1936 if (c_swapout_count
>= C_SWAPOUT_LIMIT
) {
1938 * we timed out on the above thread_block
1939 * let's loop around and try again
1940 * the timeout allows us to continue
1941 * to do minor compactions to make
1942 * more memory available
1948 * Swap out segments?
1950 if (flush_all
== FALSE
) {
1951 boolean_t needs_to_swap
;
1953 lck_mtx_unlock_always(c_list_lock
);
1955 needs_to_swap
= compressor_needs_to_swap();
1957 lck_mtx_lock_spin_always(c_list_lock
);
1959 if (needs_to_swap
== FALSE
)
1962 if (queue_empty(&c_age_list_head
))
1964 c_seg
= (c_segment_t
) queue_first(&c_age_list_head
);
1966 if (flush_all
== TRUE
&& c_seg
->c_generation_id
> c_generation_id_flush_barrier
)
1969 if (c_seg
->c_filling
) {
1971 * we're at or near the head... no more work to do
1975 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1977 if (c_seg
->c_busy
) {
1979 lck_mtx_unlock_always(c_list_lock
);
1980 c_seg_wait_on_busy(c_seg
);
1981 lck_mtx_lock_spin_always(c_list_lock
);
1987 if (c_seg_do_minor_compaction_and_unlock(c_seg
, FALSE
, TRUE
, TRUE
)) {
1989 * found an empty c_segment and freed it
1990 * so go grab the next guy in the queue
1997 keep_compacting
= TRUE
;
1999 while (keep_compacting
== TRUE
) {
2001 assert(c_seg
->c_busy
);
2003 /* look for another segment to consolidate */
2005 c_seg_next
= (c_segment_t
) queue_next(&c_seg
->c_age_list
);
2007 if (queue_end(&c_age_list_head
, (queue_entry_t
)c_seg_next
))
2010 if (c_seg_major_compact_ok(c_seg
, c_seg_next
) == FALSE
)
2013 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2015 if (c_seg_next
->c_busy
) {
2017 lck_mtx_unlock_always(c_list_lock
);
2018 c_seg_wait_on_busy(c_seg_next
);
2019 lck_mtx_lock_spin_always(c_list_lock
);
2023 /* grab that segment */
2024 C_SEG_BUSY(c_seg_next
);
2026 if (c_seg_do_minor_compaction_and_unlock(c_seg_next
, FALSE
, TRUE
, TRUE
)) {
2028 * found an empty c_segment and freed it
2029 * so we can't continue to use c_seg_next
2034 /* unlock the list ... */
2035 lck_mtx_unlock_always(c_list_lock
);
2037 /* do the major compaction */
2039 keep_compacting
= c_seg_major_compact(c_seg
, c_seg_next
);
2041 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2043 lck_mtx_lock_spin_always(&c_seg_next
->c_lock
);
2045 * run a minor compaction on the donor segment
2046 * since we pulled at least some of it's
2047 * data into our target... if we've emptied
2048 * it, now is a good time to free it which
2049 * c_seg_minor_compaction_and_unlock also takes care of
2051 * by passing TRUE, we ask for c_busy to be cleared
2052 * and c_wanted to be taken care of
2054 c_seg_minor_compaction_and_unlock(c_seg_next
, TRUE
);
2056 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2058 /* relock the list */
2059 lck_mtx_lock_spin_always(c_list_lock
);
2061 } /* major compaction */
2063 c_seg_major_compact_stats
.wasted_space_in_swapouts
+= C_SEG_BUFSIZE
- c_seg
->c_bytes_used
;
2064 c_seg_major_compact_stats
.count_of_swapouts
++;
2066 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2068 assert(c_seg
->c_busy
);
2069 assert(c_seg
->c_on_age_q
);
2070 assert(!c_seg
->c_on_minorcompact_q
);
2072 queue_remove(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
2073 c_seg
->c_on_age_q
= 0;
2076 if (vm_swap_up
== TRUE
) {
2077 queue_enter(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2078 c_seg
->c_on_swapout_q
= 1;
2081 queue_enter(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2082 c_seg
->c_on_swappedout_q
= 1;
2083 c_swappedout_count
++;
2085 C_SEG_WAKEUP_DONE(c_seg
);
2087 lck_mtx_unlock_always(&c_seg
->c_lock
);
2089 if (c_swapout_count
) {
2090 lck_mtx_unlock_always(c_list_lock
);
2092 thread_wakeup((event_t
)&c_swapout_list_head
);
2094 lck_mtx_lock_spin_always(c_list_lock
);
2100 static uint32_t no_paging_space_action_in_progress
= 0;
2101 extern void memorystatus_send_low_swap_note(void);
2105 c_seg_allocate(c_segment_t
*current_chead
)
2112 if ( (c_seg
= *current_chead
) == NULL
) {
2115 if (vm_compressor_low_on_space() || vm_swap_low_on_space()) {
2117 if (no_paging_space_action_in_progress
== 0) {
2119 if (OSCompareAndSwap(0, 1, (UInt32
*)&no_paging_space_action_in_progress
)) {
2121 if (no_paging_space_action()) {
2122 memorystatus_send_low_swap_note();
2125 no_paging_space_action_in_progress
= 0;
2129 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2131 lck_mtx_lock_spin_always(c_list_lock
);
2133 while (c_segments_busy
== TRUE
) {
2134 assert_wait((event_t
) (&c_segments_busy
), THREAD_UNINT
);
2136 lck_mtx_unlock_always(c_list_lock
);
2138 thread_block(THREAD_CONTINUE_NULL
);
2140 lck_mtx_lock_spin_always(c_list_lock
);
2142 if (c_free_segno_head
== (uint32_t)-1) {
2144 if (c_segments_available
>= c_segments_limit
|| c_segment_pages_compressed
>= c_segment_pages_compressed_limit
) {
2145 lck_mtx_unlock_always(c_list_lock
);
2147 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, 0, 0, 0, 1, 0);
2150 c_segments_busy
= TRUE
;
2151 lck_mtx_unlock_always(c_list_lock
);
2153 kernel_memory_populate(kernel_map
, (vm_offset_t
)c_segments_next_page
, PAGE_SIZE
, KMA_KOBJECT
);
2154 c_segments_next_page
+= PAGE_SIZE
;
2156 for (c_segno
= c_segments_available
+ 1; c_segno
< (c_segments_available
+ C_SEGMENTS_PER_PAGE
); c_segno
++)
2157 c_segments
[c_segno
- 1].c_segno
= c_segno
;
2159 lck_mtx_lock_spin_always(c_list_lock
);
2161 c_segments
[c_segno
- 1].c_segno
= c_free_segno_head
;
2162 c_free_segno_head
= c_segments_available
;
2163 c_segments_available
+= C_SEGMENTS_PER_PAGE
;
2165 c_segments_busy
= FALSE
;
2166 thread_wakeup((event_t
) (&c_segments_busy
));
2168 c_segno
= c_free_segno_head
;
2169 c_free_segno_head
= c_segments
[c_segno
].c_segno
;
2171 lck_mtx_unlock_always(c_list_lock
);
2173 c_seg
= (c_segment_t
)zalloc(compressor_segment_zone
);
2174 bzero((char *)c_seg
, sizeof(struct c_segment
));
2176 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)(&c_seg
->c_store
.c_buffer
), C_SEG_ALLOCSIZE
, 0, KMA_COMPRESSOR
| KMA_VAONLY
) != KERN_SUCCESS
) {
2177 zfree(compressor_segment_zone
, c_seg
);
2179 lck_mtx_lock_spin_always(c_list_lock
);
2181 c_segments
[c_segno
].c_segno
= c_free_segno_head
;
2182 c_free_segno_head
= c_segno
;
2184 lck_mtx_unlock_always(c_list_lock
);
2186 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, 0, 0, 0, 2, 0);
2190 OSAddAtomic64(C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
2192 #if __i386__ || __x86_64__
2193 lck_mtx_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
2194 #else /* __i386__ || __x86_64__ */
2195 lck_spin_init(&c_seg
->c_lock
, &vm_compressor_lck_grp
, &vm_compressor_lck_attr
);
2196 #endif /* __i386__ || __x86_64__ */
2198 kernel_memory_populate(kernel_map
, (vm_offset_t
)(c_seg
->c_store
.c_buffer
), 3 * PAGE_SIZE
, KMA_COMPRESSOR
);
2200 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(3 * PAGE_SIZE
);
2201 c_seg
->c_firstemptyslot
= C_SLOT_MAX
;
2202 c_seg
->c_mysegno
= c_segno
;
2203 c_seg
->c_filling
= 1;
2205 lck_mtx_lock_spin_always(c_list_lock
);
2208 c_segments
[c_segno
].c_seg
= c_seg
;
2210 c_seg
->c_generation_id
= c_generation_id
++;
2212 queue_enter(&c_age_list_head
, c_seg
, c_segment_t
, c_age_list
);
2213 c_seg
->c_on_age_q
= 1;
2216 lck_mtx_unlock_always(c_list_lock
);
2218 clock_get_system_nanotime(&sec
, &nsec
);
2219 c_seg
->c_creation_ts
= (uint32_t)sec
;
2221 *current_chead
= c_seg
;
2223 KERNEL_DEBUG(0xe0400004 | DBG_FUNC_END
, c_seg
, 0, 0, 3, 0);
2225 slotarray
= C_SEG_SLOTARRAY_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
2227 if (c_seg
->c_slots
[slotarray
] == 0) {
2228 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2230 c_seg
->c_slots
[slotarray
] = (struct c_slot
*)kalloc(sizeof(struct c_slot
) * C_SEG_SLOT_ARRAY_SIZE
);
2232 KERNEL_DEBUG(0xe0400008 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
2235 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2237 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2245 c_current_seg_filled(c_segment_t c_seg
, c_segment_t
*current_chead
)
2247 uint32_t unused_bytes
;
2248 uint32_t offset_to_depopulate
;
2250 unused_bytes
= trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
- c_seg
->c_nextoffset
));
2254 offset_to_depopulate
= C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_nextoffset
)));
2257 * release the extra physical page(s) at the end of the segment
2259 lck_mtx_unlock_always(&c_seg
->c_lock
);
2261 kernel_memory_depopulate(
2263 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[offset_to_depopulate
],
2267 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2269 c_seg
->c_populated_offset
= offset_to_depopulate
;
2271 c_seg
->c_filling
= 0;
2273 if (C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
)
2274 c_seg_need_delayed_compaction(c_seg
);
2276 lck_mtx_unlock_always(&c_seg
->c_lock
);
2278 *current_chead
= NULL
;
2283 * returns with c_seg locked
2286 c_seg_swapin_requeue(c_segment_t c_seg
)
2291 clock_get_system_nanotime(&sec
, &nsec
);
2293 lck_mtx_lock_spin_always(c_list_lock
);
2294 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2296 if (c_seg
->c_on_swappedout_q
) {
2297 queue_remove(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
2298 c_seg
->c_on_swappedout_q
= 0;
2299 c_swappedout_count
--;
2301 assert(c_seg
->c_on_swappedout_sparse_q
);
2303 queue_remove(&c_swappedout_sparse_list_head
, c_seg
, c_segment_t
, c_age_list
);
2304 c_seg
->c_on_swappedout_sparse_q
= 0;
2305 c_swappedout_sparse_count
--;
2307 if (c_seg
->c_store
.c_buffer
) {
2308 queue_enter(&c_swappedin_list_head
, c_seg
, c_segment_t
, c_age_list
);
2309 c_seg
->c_on_swappedin_q
= 1;
2310 c_swappedin_count
++;
2312 #if TRACK_BAD_C_SEGMENTS
2314 queue_enter(&c_bad_list_head
, c_seg
, c_segment_t
, c_age_list
);
2315 c_seg
->c_on_bad_q
= 1;
2319 c_seg
->c_swappedin_ts
= (uint32_t)sec
;
2320 c_seg
->c_ondisk
= 0;
2321 c_seg
->c_was_swapped_in
= 1;
2323 lck_mtx_unlock_always(c_list_lock
);
2329 * c_seg has to be locked and is returned locked.
2330 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
2334 c_seg_swapin(c_segment_t c_seg
, boolean_t force_minor_compaction
)
2336 vm_offset_t addr
= 0;
2337 uint32_t io_size
= 0;
2340 #if !CHECKSUM_THE_SWAP
2341 if (c_seg
->c_ondisk
)
2342 c_seg_trim_tail(c_seg
);
2344 io_size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
2345 f_offset
= c_seg
->c_store
.c_swap_handle
;
2348 lck_mtx_unlock_always(&c_seg
->c_lock
);
2350 if (c_seg
->c_ondisk
) {
2352 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2354 if (kernel_memory_allocate(kernel_map
, &addr
, C_SEG_ALLOCSIZE
, 0, KMA_COMPRESSOR
| KMA_VAONLY
) != KERN_SUCCESS
)
2355 panic("c_seg_swapin: kernel_memory_allocate failed\n");
2357 kernel_memory_populate(kernel_map
, addr
, io_size
, KMA_COMPRESSOR
);
2359 if (vm_swap_get(addr
, f_offset
, io_size
) != KERN_SUCCESS
) {
2360 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2362 kernel_memory_depopulate(kernel_map
, addr
, io_size
, KMA_COMPRESSOR
);
2363 kmem_free(kernel_map
, addr
, C_SEG_ALLOCSIZE
);
2365 c_seg
->c_store
.c_buffer
= (int32_t*) NULL
;
2366 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
2368 c_seg
->c_store
.c_buffer
= (int32_t*) addr
;
2370 vm_swap_decrypt(c_seg
);
2371 #endif /* ENCRYPTED_SWAP */
2373 #if CHECKSUM_THE_SWAP
2374 if (c_seg
->cseg_swap_size
!= io_size
)
2375 panic("swapin size doesn't match swapout size");
2377 if (c_seg
->cseg_hash
!= hash_string((char*) c_seg
->c_store
.c_buffer
, (int)io_size
)) {
2378 panic("c_seg_swapin - Swap hash mismatch\n");
2380 #endif /* CHECKSUM_THE_SWAP */
2382 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2384 if (force_minor_compaction
== TRUE
) {
2385 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2387 c_seg_minor_compaction_and_unlock(c_seg
, FALSE
);
2389 OSAddAtomic64(c_seg
->c_bytes_used
, &compressor_bytes_used
);
2390 OSAddAtomic64(C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
2393 c_seg_swapin_requeue(c_seg
);
2395 C_SEG_WAKEUP_DONE(c_seg
);
2400 c_compress_page(char *src
, c_slot_mapping_t slot_ptr
, c_segment_t
*current_chead
, char *scratch_buf
)
2403 int c_rounded_size
= 0;
2408 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START
, *current_chead
, 0, 0, 0, 0);
2410 if ((c_seg
= c_seg_allocate(current_chead
)) == NULL
)
2413 * returns with c_seg lock held
2414 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)
2416 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_seg
->c_nextslot
);
2418 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(slot_ptr
);
2419 assert(slot_ptr
== (c_slot_mapping_t
)C_SLOT_UNPACK_PTR(cs
));
2421 cs
->c_offset
= c_seg
->c_nextoffset
;
2423 max_csize
= C_SEG_BUFSIZE
- C_SEG_OFFSET_TO_BYTES((int32_t)cs
->c_offset
);
2425 if (max_csize
> PAGE_SIZE
)
2426 max_csize
= PAGE_SIZE
;
2428 if (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
-
2429 c_seg
->c_nextoffset
)
2430 < (unsigned) max_csize
+ PAGE_SIZE
&&
2431 (C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
)
2432 < C_SEG_ALLOCSIZE
)) {
2433 lck_mtx_unlock_always(&c_seg
->c_lock
);
2435 kernel_memory_populate(kernel_map
,
2436 (vm_offset_t
) &c_seg
->c_store
.c_buffer
[c_seg
->c_populated_offset
],
2440 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2442 c_seg
->c_populated_offset
+= C_SEG_BYTES_TO_OFFSET(PAGE_SIZE
);
2445 #if CHECKSUM_THE_DATA
2446 cs
->c_hash_data
= hash_string(src
, PAGE_SIZE
);
2449 c_size
= WKdm_compress_new((WK_word
*)(uintptr_t)src
, (WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
2450 (WK_word
*)(uintptr_t)scratch_buf
, max_csize
- 4);
2451 assert(c_size
<= (max_csize
- 4) && c_size
>= -1);
2455 if (max_csize
< PAGE_SIZE
) {
2456 c_current_seg_filled(c_seg
, current_chead
);
2458 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2464 memcpy(&c_seg
->c_store
.c_buffer
[cs
->c_offset
], src
, c_size
);
2466 #if CHECKSUM_THE_COMPRESSED_DATA
2467 cs
->c_hash_compressed_data
= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
);
2469 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
2471 PACK_C_SIZE(cs
, c_size
);
2472 c_seg
->c_bytes_used
+= c_rounded_size
;
2473 c_seg
->c_nextoffset
+= C_SEG_BYTES_TO_OFFSET(c_rounded_size
);
2475 slot_ptr
->s_cindx
= c_seg
->c_nextslot
++;
2476 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
2477 slot_ptr
->s_cseg
= c_seg
->c_mysegno
+ 1;
2479 if (c_seg
->c_nextoffset
>= C_SEG_OFF_LIMIT
|| c_seg
->c_nextslot
>= C_SLOT_MAX
)
2480 c_current_seg_filled(c_seg
, current_chead
);
2482 lck_mtx_unlock_always(&c_seg
->c_lock
);
2484 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2486 OSAddAtomic64(c_rounded_size
, &compressor_bytes_used
);
2487 OSAddAtomic64(PAGE_SIZE
, &c_segment_input_bytes
);
2488 OSAddAtomic64(c_size
, &c_segment_compressed_bytes
);
2490 OSAddAtomic(1, &c_segment_pages_compressed
);
2491 OSAddAtomic(1, &sample_period_compression_count
);
2493 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END
, *current_chead
, c_size
, c_segment_input_bytes
, c_segment_compressed_bytes
, 0);
2500 c_decompress_page(char *dst
, volatile c_slot_mapping_t slot_ptr
, int flags
, int *zeroslot
)
2508 boolean_t c_seg_has_data
= TRUE
;
2509 boolean_t c_seg_swappedin
= FALSE
;
2510 boolean_t need_unlock
= TRUE
;
2511 boolean_t consider_defragmenting
= FALSE
;
2514 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2518 * if hibernation is enabled, it indicates (via a call
2519 * to 'vm_decompressor_lock' that no further
2520 * decompressions are allowed once it reaches
2521 * the point of flushing all of the currently dirty
2522 * anonymous memory through the compressor and out
2523 * to disk... in this state we allow freeing of compressed
2524 * pages and must honor the C_DONT_BLOCK case
2526 if (dst
&& decompressions_blocked
== TRUE
) {
2527 if (flags
& C_DONT_BLOCK
) {
2529 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2535 * it's safe to atomically assert and block behind the
2536 * lock held in shared mode because "decompressions_blocked" is
2537 * only set and cleared and the thread_wakeup done when the lock
2538 * is held exclusively
2540 assert_wait((event_t
)&decompressions_blocked
, THREAD_UNINT
);
2542 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2544 thread_block(THREAD_CONTINUE_NULL
);
2549 /* s_cseg is actually "segno+1" */
2550 c_seg
= c_segments
[slot_ptr
->s_cseg
- 1].c_seg
;
2552 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2554 if (flags
& C_DONT_BLOCK
) {
2555 if (c_seg
->c_busy
|| (c_seg
->c_ondisk
&& dst
)) {
2563 if (c_seg
->c_busy
) {
2565 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2567 c_seg_wait_on_busy(c_seg
);
2571 c_indx
= slot_ptr
->s_cindx
;
2573 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
2575 c_size
= UNPACK_C_SIZE(cs
);
2577 c_rounded_size
= (c_size
+ C_SEG_OFFSET_ALIGNMENT_MASK
) & ~C_SEG_OFFSET_ALIGNMENT_MASK
;
2580 uint32_t age_of_cseg
;
2581 clock_sec_t cur_ts_sec
;
2582 clock_nsec_t cur_ts_nsec
;
2584 if (c_seg
->c_on_swappedout_q
|| c_seg
->c_on_swappedout_sparse_q
) {
2585 if (c_seg
->c_ondisk
)
2586 c_seg_swappedin
= TRUE
;
2587 c_seg_swapin(c_seg
, FALSE
);
2589 if (c_seg
->c_store
.c_buffer
== NULL
) {
2590 c_seg_has_data
= FALSE
;
2591 goto c_seg_invalid_data
;
2593 #if CHECKSUM_THE_COMPRESSED_DATA
2594 if (cs
->c_hash_compressed_data
!= hash_string((char *)&c_seg
->c_store
.c_buffer
[cs
->c_offset
], c_size
))
2595 panic("compressed data doesn't match original");
2597 if (c_rounded_size
== PAGE_SIZE
) {
2599 * page wasn't compressible... just copy it out
2601 memcpy(dst
, &c_seg
->c_store
.c_buffer
[cs
->c_offset
], PAGE_SIZE
);
2607 * we're behind the c_seg lock held in spin mode
2608 * which means pre-emption is disabled... therefore
2609 * the following sequence is atomic and safe
2611 my_cpu_no
= cpu_number();
2613 assert(my_cpu_no
< compressor_cpus
);
2615 scratch_buf
= &compressor_scratch_bufs
[my_cpu_no
* WKdm_SCRATCH_BUF_SIZE
];
2616 WKdm_decompress_new((WK_word
*)(uintptr_t)&c_seg
->c_store
.c_buffer
[cs
->c_offset
],
2617 (WK_word
*)(uintptr_t)dst
, (WK_word
*)(uintptr_t)scratch_buf
, c_size
);
2620 #if CHECKSUM_THE_DATA
2621 if (cs
->c_hash_data
!= hash_string(dst
, PAGE_SIZE
))
2622 panic("decompressed data doesn't match original");
2624 if (!c_seg
->c_was_swapped_in
) {
2626 clock_get_system_nanotime(&cur_ts_sec
, &cur_ts_nsec
);
2628 age_of_cseg
= (uint32_t)cur_ts_sec
- c_seg
->c_creation_ts
;
2630 if (age_of_cseg
< DECOMPRESSION_SAMPLE_MAX_AGE
)
2631 OSAddAtomic(1, &age_of_decompressions_during_sample_period
[age_of_cseg
]);
2633 OSAddAtomic(1, &overage_decompressions_during_sample_period
);
2635 OSAddAtomic(1, &sample_period_decompression_count
);
2638 if (c_seg
->c_store
.c_buffer
== NULL
)
2639 c_seg_has_data
= FALSE
;
2643 if (c_seg_has_data
== TRUE
) {
2644 if (c_seg_swappedin
== TRUE
)
2651 if (flags
& C_KEEP
) {
2655 c_seg
->c_bytes_unused
+= c_rounded_size
;
2656 c_seg
->c_bytes_used
-= c_rounded_size
;
2659 if (c_indx
< c_seg
->c_firstemptyslot
)
2660 c_seg
->c_firstemptyslot
= c_indx
;
2662 OSAddAtomic(-1, &c_segment_pages_compressed
);
2664 if (c_seg_has_data
== TRUE
&& !c_seg
->c_ondisk
) {
2666 * c_ondisk == TRUE can occur when we're doing a
2667 * free of a compressed page (i.e. dst == NULL)
2669 OSAddAtomic64(-c_rounded_size
, &compressor_bytes_used
);
2671 if (!c_seg
->c_filling
) {
2672 if (c_seg
->c_bytes_used
== 0) {
2673 if (!c_seg
->c_ondisk
) {
2674 int pages_populated
;
2676 pages_populated
= (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
))) / PAGE_SIZE
;
2677 c_seg
->c_populated_offset
= C_SEG_BYTES_TO_OFFSET(0);
2679 if (pages_populated
) {
2680 assert(c_seg
->c_store
.c_buffer
!= NULL
);
2683 lck_mtx_unlock_always(&c_seg
->c_lock
);
2685 kernel_memory_depopulate(kernel_map
, (vm_offset_t
) c_seg
->c_store
.c_buffer
, pages_populated
* PAGE_SIZE
, KMA_COMPRESSOR
);
2687 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2688 C_SEG_WAKEUP_DONE(c_seg
);
2690 if (!c_seg
->c_on_minorcompact_q
&& !c_seg
->c_on_swapout_q
)
2691 c_seg_need_delayed_compaction(c_seg
);
2693 assert(c_seg
->c_on_swappedout_sparse_q
);
2695 } else if (c_seg
->c_on_minorcompact_q
) {
2697 if (C_SEG_INCORE_IS_SPARSE(c_seg
)) {
2698 c_seg_try_minor_compaction_and_unlock(c_seg
);
2699 need_unlock
= FALSE
;
2701 } else if (!c_seg
->c_ondisk
) {
2703 if (c_seg_has_data
== TRUE
&& !c_seg
->c_on_swapout_q
&& C_SEG_UNUSED_BYTES(c_seg
) >= PAGE_SIZE
) {
2704 c_seg_need_delayed_compaction(c_seg
);
2706 } else if (!c_seg
->c_on_swappedout_sparse_q
&& C_SEG_ONDISK_IS_SPARSE(c_seg
)) {
2708 c_seg_move_to_sparse_list(c_seg
);
2709 consider_defragmenting
= TRUE
;
2713 if (need_unlock
== TRUE
)
2714 lck_mtx_unlock_always(&c_seg
->c_lock
);
2716 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2718 if (consider_defragmenting
== TRUE
)
2719 vm_swap_consider_defragmenting();
2727 vm_compressor_get(ppnum_t pn
, int *slot
, int flags
)
2734 dst
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
2736 #error "unsupported architecture"
2739 retval
= c_decompress_page(dst
, (c_slot_mapping_t
)slot
, flags
, &zeroslot
);
2742 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
2743 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'c_ondisk' set
2749 * returns 0 if we successfully decompressed a page from a segment already in memory
2750 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
2751 * returns -1 if we encountered an error swapping in the segment - decompression failed
2752 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'c_ondisk' set
2759 vm_compressor_free(int *slot
, int flags
)
2764 assert(flags
== 0 || flags
== C_DONT_BLOCK
);
2766 retval
= c_decompress_page(NULL
, (c_slot_mapping_t
)slot
, flags
, &zeroslot
);
2768 * returns 0 if we successfully freed the specified compressed page
2769 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
2780 vm_compressor_put(ppnum_t pn
, int *slot
, void **current_chead
, char *scratch_buf
)
2786 src
= PHYSMAP_PTOV((uint64_t)pn
<< (uint64_t)PAGE_SHIFT
);
2788 #error "unsupported architecture"
2790 retval
= c_compress_page(src
, (c_slot_mapping_t
)slot
, (c_segment_t
*)current_chead
, scratch_buf
);
2796 vm_compressor_transfer(
2800 c_slot_mapping_t dst_slot
, src_slot
;
2805 dst_slot
= (c_slot_mapping_t
) dst_slot_p
;
2806 src_slot
= (c_slot_mapping_t
) src_slot_p
;
2809 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
2810 /* get segment for src_slot */
2811 c_seg
= c_segments
[src_slot
->s_cseg
-1].c_seg
;
2813 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
2814 /* wait if it's busy */
2815 if (c_seg
->c_busy
) {
2816 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
2817 c_seg_wait_on_busy(c_seg
);
2820 /* find the c_slot */
2821 c_indx
= src_slot
->s_cindx
;
2822 cs
= C_SEG_SLOT_FROM_INDEX(c_seg
, c_indx
);
2823 /* point the c_slot back to dst_slot instead of src_slot */
2824 cs
->c_packed_ptr
= C_SLOT_PACK_PTR(dst_slot
);
2826 *dst_slot_p
= *src_slot_p
;
2828 lck_mtx_unlock_always(&c_seg
->c_lock
);
2829 PAGE_REPLACEMENT_DISALLOWED(FALSE
);