#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/memory_object.h>
+#include <vm/vm_compressor_algorithms.h>
+#include <vm/vm_fault.h>
+#include <vm/vm_protos.h>
#include <mach/mach_host.h> /* for host_info() */
#include <kern/ledger.h>
+#include <kern/policy_internal.h>
+#include <kern/thread_group.h>
+#include <san/kasan.h>
+#if !CONFIG_EMBEDDED
#include <i386/misc_protos.h>
-
-#include <default_pager/default_pager_alerts.h>
-#include <default_pager/default_pager_object_server.h>
+#endif
#include <IOKit/IOHibernatePrivate.h>
+#if POPCOUNT_THE_COMPRESSED_DATA
+boolean_t popcount_c_segs = TRUE;
+
+static inline uint32_t vmc_pop(uintptr_t ins, int sz) {
+ uint32_t rv = 0;
+
+ if (__probable(popcount_c_segs == FALSE)) {
+ return 0xDEAD707C;
+ }
+
+ while (sz >= 16) {
+ uint32_t rv1, rv2;
+ uint64_t *ins64 = (uint64_t *) ins;
+ uint64_t *ins642 = (uint64_t *) (ins + 8);
+ rv1 = __builtin_popcountll(*ins64);
+ rv2 = __builtin_popcountll(*ins642);
+ rv += rv1 + rv2;
+ sz -= 16;
+ ins += 16;
+ }
+
+ while (sz >= 4) {
+ uint32_t *ins32 = (uint32_t *) ins;
+ rv += __builtin_popcount(*ins32);
+ sz -= 4;
+ ins += 4;
+ }
+
+ while (sz > 0) {
+ char *ins8 = (char *)ins;
+ rv += __builtin_popcount(*ins8);
+ sz--;
+ ins++;
+ }
+ return rv;
+}
+#endif
+
/*
* vm_compressor_mode has a heirarchy of control to set its value.
* boot-args are checked first, then device-tree, and finally
* the boot-arg & device-tree code.
*/
+#if CONFIG_EMBEDDED
+#if CONFIG_FREEZE
+int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
+
+void *freezer_chead; /* The chead used to track c_segs allocated for the exclusive use of holding just one task's compressed memory.*/
+char *freezer_compressor_scratch_buf = NULL;
+
+#define VM_MAX_FREEZER_CSEG_SWAP_COUNT 64 /* The maximum number of c_segs holding just one task's compressed memory that can be swapped out to disk.*/
+extern int c_freezer_swapout_count; /* This count keeps track of the # of c_segs holding just one task's compressed memory on the swapout queue. This count is used during each freeze i.e. on a per-task basis.*/
+
+#else /* CONFIG_FREEZE */
+int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
+#endif /* CONFIG_FREEZE */
+
+int vm_scale = 1;
+
+#else /* CONFIG_EMBEDDED */
int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
int vm_scale = 16;
+#endif /* CONFIG_EMBEDDED */
int vm_compressor_is_active = 0;
int vm_compression_limit = 0;
int vm_compressor_available = 0;
-extern boolean_t vm_swap_up;
extern void vm_pageout_io_throttle(void);
-extern int not_in_kdp;
#if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
extern unsigned int hash_string(char *cp, int len);
+static unsigned int vmc_hash(char *, int);
+boolean_t checksum_c_segs = TRUE;
+
+unsigned int vmc_hash(char *cp, int len) {
+ if (__probable(checksum_c_segs == FALSE)) {
+ return 0xDEAD7A37;
+ }
+ return hash_string(cp, len);
+}
#endif
#define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
#define C_SV_CSEG_ID ((1 << 22) - 1)
-struct c_slot_mapping {
- uint32_t s_cseg:22, /* segment number + 1 */
- s_cindx:10; /* index in the segment */
-};
-#define C_SLOT_MAX_INDEX (1 << 10)
-
-typedef struct c_slot_mapping *c_slot_mapping_t;
-
-
union c_segu {
c_segment_t c_seg;
- uint32_t c_segno;
+ uintptr_t c_segno;
};
-#define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS) >> 2)
-#define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)
+#define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) >> 2)
+#define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START)
uint32_t c_segment_count = 0;
lck_grp_attr_t vm_compressor_lck_grp_attr;
lck_attr_t vm_compressor_lck_attr;
lck_grp_t vm_compressor_lck_grp;
-
-#if __i386__ || __x86_64__
lck_mtx_t *c_list_lock;
-#else /* __i386__ || __x86_64__ */
-lck_spin_t *c_list_lock;
-#endif /* __i386__ || __x86_64__ */
-
lck_rw_t c_master_lock;
boolean_t decompressions_blocked = FALSE;
uint32_t swapout_target_age = 0;
uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
uint32_t overage_decompressions_during_sample_period = 0;
+uint32_t vm_compressor_pages_grabbed = 0;
+
-void do_fastwake_warmup(void);
+void do_fastwake_warmup(queue_head_t *, boolean_t);
boolean_t fastwake_warmup = FALSE;
boolean_t fastwake_recording_in_progress = FALSE;
clock_sec_t dont_trim_until_ts = 0;
struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned (8)));
-
static boolean_t compressor_needs_to_swap(void);
static void vm_compressor_swap_trigger_thread(void);
static void vm_compressor_do_delayed_compactions(boolean_t);
static void vm_compressor_compact_and_swap(boolean_t);
static void vm_compressor_age_swapped_in_segments(boolean_t);
+#if !CONFIG_EMBEDDED
static void vm_compressor_take_paging_space_action(void);
-
-boolean_t vm_compressor_low_on_space(void);
+#endif
void compute_swapout_target_age(void);
int c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
int c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
-void c_seg_need_delayed_compaction(c_segment_t);
void c_seg_move_to_sparse_list(c_segment_t);
void c_seg_insert_into_q(queue_head_t *, c_segment_t);
uint64_t vm_available_memory(void);
uint64_t vm_compressor_pages_compressed(void);
-extern unsigned int dp_pages_free, dp_pages_reserve;
+/*
+ * indicate the need to do a major compaction if
+ * the overall set of in-use compression segments
+ * becomes sparse... on systems that support pressure
+ * driven swapping, this will also cause swapouts to
+ * be initiated.
+ */
+static inline boolean_t vm_compressor_needs_to_major_compact()
+{
+ uint32_t incore_seg_count;
+
+ incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
+
+ if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
+ ((incore_seg_count * C_SEG_MAX_PAGES) - VM_PAGE_COMPRESSOR_COUNT) >
+ ((incore_seg_count / 8) * C_SEG_MAX_PAGES))
+ return (1);
+ return (0);
+}
+
uint64_t
vm_available_memory(void)
boolean_t
-vm_compression_available(void)
+vm_compressor_low_on_space(void)
{
- if ( !(COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE))
- return (FALSE);
-
- if (c_segments_available >= c_segments_limit || c_segment_pages_compressed >= c_segment_pages_compressed_limit)
- return (FALSE);
+ if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ||
+ (c_segment_count > c_segments_nearing_limit))
+ return (TRUE);
- return (TRUE);
+ return (FALSE);
}
boolean_t
-vm_compressor_low_on_space(void)
+vm_compressor_out_of_space(void)
{
- if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ||
- (c_segment_count > c_segments_nearing_limit))
+ if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
+ (c_segment_count >= c_segments_limit))
return (TRUE);
return (FALSE);
if (task == kernel_task)
return (0);
- if (COMPRESSED_PAGER_IS_SWAPLESS || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS)
- return (0);
-
- if (COMPRESSED_PAGER_IS_SWAPBACKED || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED) {
+ if (VM_CONFIG_SWAP_IS_ACTIVE) {
if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
(unsigned int)pmap_compressed(task->map->pmap) > (c_segment_pages_compressed / 4))
return (1);
- } else {
- if (((dp_pages_free + dp_pages_reserve < 2000) && VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) &&
- get_task_resident_size(task) > (((AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE) / 5))
- return (1);
}
return (0);
}
+#if DEVELOPMENT || DEBUG
+boolean_t kill_on_no_paging_space = FALSE; /* On compressor/swap exhaustion, kill the largest process regardless of
+ * its chosen process policy. Controlled by a boot-arg of the same name. */
+#endif /* DEVELOPMENT || DEBUG */
+
+#if !CONFIG_EMBEDDED
static uint32_t no_paging_space_action_in_progress = 0;
extern void memorystatus_send_low_swap_note(void);
if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) {
if (no_paging_space_action()) {
+#if DEVELOPMENT || DEBUG
+ if (kill_on_no_paging_space == TRUE) {
+ /*
+ * Since we are choosing to always kill a process, we don't need the
+ * "out of application memory" dialog box in this mode. And, hence we won't
+ * send the knote.
+ */
+ no_paging_space_action_in_progress = 0;
+ return;
+ }
+#endif /* DEVELOPMENT || DEBUG */
memorystatus_send_low_swap_note();
}
}
}
}
-
+#endif /* !CONFIG_EMBEDDED */
void
thread_wakeup((event_t)&decompressions_blocked);
}
+static inline void cslot_copy(c_slot_t cdst, c_slot_t csrc) {
+#if CHECKSUM_THE_DATA
+ cdst->c_hash_data = csrc->c_hash_data;
+#endif
+#if CHECKSUM_THE_COMPRESSED_DATA
+ cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
+#endif
+#if POPCOUNT_THE_COMPRESSED_DATA
+ cdst->c_pop_cdata = csrc->c_pop_cdata;
+#endif
+ cdst->c_size = csrc->c_size;
+ cdst->c_packed_ptr = csrc->c_packed_ptr;
+#if defined(__arm__) || defined(__arm64__)
+ cdst->c_codec = csrc->c_codec;
+#endif
+}
+vm_map_t compressor_map;
+uint64_t compressor_pool_max_size;
+uint64_t compressor_pool_size;
+uint32_t compressor_pool_multiplier;
+
+#if DEVELOPMENT || DEBUG
+/*
+ * Compressor segments are write-protected in development/debug
+ * kernels to help debug memory corruption.
+ * In cases where performance is a concern, this can be disabled
+ * via the boot-arg "-disable_cseg_write_protection".
+ */
+boolean_t write_protect_c_segs = TRUE;
+int vm_compressor_test_seg_wp;
+uint32_t vm_ktrace_enabled;
+#endif /* DEVELOPMENT || DEBUG */
void
vm_compressor_init(void)
c_slot_t cs = &cs_dummy;
int c_segment_min_size;
int c_segment_padded_size;
+ int attempts = 1;
+ kern_return_t retval = KERN_SUCCESS;
+ vm_offset_t start_addr = 0;
+ vm_size_t c_segments_arr_size = 0, compressor_submap_size = 0;
+ vm_map_kernel_flags_t vmk_flags;
+#if RECORD_THE_COMPRESSED_DATA
+ vm_size_t c_compressed_record_sbuf_size = 0;
+#endif /* RECORD_THE_COMPRESSED_DATA */
+
+#if DEVELOPMENT || DEBUG
+ char bootarg_name[32];
+ if (PE_parse_boot_argn("-kill_on_no_paging_space", bootarg_name, sizeof (bootarg_name))) {
+ kill_on_no_paging_space = TRUE;
+ }
+ if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof (bootarg_name))) {
+ write_protect_c_segs = FALSE;
+ }
+ int vmcval = 1;
+ PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
+
+ if (kern_feature_override(KF_COMPRSV_OVRD)) {
+ vmcval = 0;
+ }
+ if (vmcval == 0) {
+#if POPCOUNT_THE_COMPRESSED_DATA
+ popcount_c_segs = FALSE;
+#endif
+#if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
+ checksum_c_segs = FALSE;
+#endif
+ write_protect_c_segs = FALSE;
+ }
+#endif /* DEVELOPMENT || DEBUG */
/*
* ensure that any pointer that gets created from
PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit, sizeof (vm_compression_limit));
+#ifdef CONFIG_EMBEDDED
+ vm_compressor_minorcompact_threshold_divisor = 20;
+ vm_compressor_majorcompact_threshold_divisor = 30;
+ vm_compressor_unthrottle_threshold_divisor = 40;
+ vm_compressor_catchup_threshold_divisor = 60;
+#else
if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
vm_compressor_minorcompact_threshold_divisor = 11;
vm_compressor_majorcompact_threshold_divisor = 13;
vm_compressor_unthrottle_threshold_divisor = 35;
vm_compressor_catchup_threshold_divisor = 50;
}
+#endif
/*
* vm_page_init_lck_grp is now responsible for calling vm_compressor_init_locks
* c_master_lock needs to be available early so that "vm_page_find_contiguous" can
* use PAGE_REPLACEMENT_ALLOWED to coordinate with the compressor.
*/
-#if __i386__ || __x86_64__
c_list_lock = lck_mtx_alloc_init(&vm_compressor_lck_grp, &vm_compressor_lck_attr);
-#else /* __i386__ || __x86_64__ */
- c_list_lock = lck_spin_alloc_init(&vm_compressor_lck_grp, &vm_compressor_lck_attr);
-#endif /* __i386__ || __x86_64__ */
-
queue_init(&c_bad_list_head);
queue_init(&c_age_list_head);
queue_init(&c_swappedout_list_head);
queue_init(&c_swappedout_sparse_list_head);
- c_segment_min_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot));
-
- for (c_segment_padded_size = 128; c_segment_padded_size < c_segment_min_size; c_segment_padded_size = c_segment_padded_size << 1);
-
- compressor_segment_zone = zinit(c_segment_padded_size, 128000 * c_segment_padded_size, PAGE_SIZE, "compressor_segment");
- zone_change(compressor_segment_zone, Z_CALLERACCT, FALSE);
- zone_change(compressor_segment_zone, Z_NOENCRYPT, TRUE);
-
- c_seg_fixed_array_len = (c_segment_padded_size - sizeof(struct c_segment)) / sizeof(struct c_slot);
-
c_free_segno_head = -1;
c_segments_available = 0;
- if (vm_compression_limit == 0) {
- c_segment_pages_compressed_limit = (uint32_t)((max_mem / PAGE_SIZE)) * vm_scale;
+ if (vm_compression_limit)
+ compressor_pool_size = (uint64_t)vm_compression_limit * PAGE_SIZE_64;
-#define OLD_SWAP_LIMIT (1024 * 1024 * 16)
-#define MAX_SWAP_LIMIT (1024 * 1024 * 128)
-
- if (c_segment_pages_compressed_limit > (OLD_SWAP_LIMIT))
- c_segment_pages_compressed_limit = OLD_SWAP_LIMIT;
+ compressor_pool_max_size = C_SEG_MAX_LIMIT;
+ compressor_pool_max_size *= C_SEG_BUFSIZE;
- if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE_64))
- c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE_64);
- } else {
- if (vm_compression_limit < MAX_SWAP_LIMIT)
- c_segment_pages_compressed_limit = vm_compression_limit;
+#if defined(__x86_64__)
+
+ if (vm_compression_limit == 0) {
+
+ if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL))
+ compressor_pool_size = 16ULL * max_mem;
+ else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL))
+ compressor_pool_size = 8ULL * max_mem;
+ else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL))
+ compressor_pool_size = 4ULL * max_mem;
else
- c_segment_pages_compressed_limit = MAX_SWAP_LIMIT;
+ compressor_pool_size = 2ULL * max_mem;
}
- if ((c_segments_limit = c_segment_pages_compressed_limit / (C_SEG_BUFSIZE / PAGE_SIZE)) > C_SEG_MAX_LIMIT)
- c_segments_limit = C_SEG_MAX_LIMIT;
+ if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL))
+ compressor_pool_multiplier = 1;
+ else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL))
+ compressor_pool_multiplier = 2;
+ else
+ compressor_pool_multiplier = 4;
- c_segment_pages_compressed_nearing_limit = (c_segment_pages_compressed_limit * 98) / 100;
- c_segments_nearing_limit = (c_segments_limit * 98) / 100;
+#elif defined(__arm__)
- c_segments_busy = FALSE;
+#define VM_RESERVE_SIZE (1024 * 1024 * 256)
+#define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450)
+
+ if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE)
+ compressor_pool_max_size = MAX_COMPRESSOR_POOL_SIZE;
+
+ if (vm_compression_limit == 0)
+ compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE;
+ compressor_pool_multiplier = 1;
+#else
+ if (compressor_pool_max_size > max_mem)
+ compressor_pool_max_size = max_mem;
+
+ if (vm_compression_limit == 0)
+ compressor_pool_size = max_mem;
+ compressor_pool_multiplier = 1;
+#endif
+ if (compressor_pool_size > compressor_pool_max_size)
+ compressor_pool_size = compressor_pool_max_size;
- if (kernel_memory_allocate(kernel_map, (vm_offset_t *)(&c_segments), (sizeof(union c_segu) * c_segments_limit), 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
+try_again:
+ c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(C_SEG_ALLOCSIZE));
+ c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
+
+ c_segment_pages_compressed_limit = (c_segments_limit * (C_SEG_BUFSIZE / PAGE_SIZE) * compressor_pool_multiplier);
+
+ if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE))
+ c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
+
+ c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
+
+ /*
+ * Submap needs space for:
+ * - c_segments
+ * - c_buffers
+ * - swap reclaimations -- C_SEG_BUFSIZE
+ */
+ c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
+ c_buffers_size = vm_map_round_page(((vm_size_t)C_SEG_ALLOCSIZE * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
+
+ compressor_submap_size = c_segments_arr_size + c_buffers_size + C_SEG_BUFSIZE;
+
+#if RECORD_THE_COMPRESSED_DATA
+ c_compressed_record_sbuf_size = (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2);
+ compressor_submap_size += c_compressed_record_sbuf_size;
+#endif /* RECORD_THE_COMPRESSED_DATA */
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR,
+ &compressor_map);
+
+ if (retval != KERN_SUCCESS) {
+ if (++attempts > 3)
+ panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size);
+
+ compressor_pool_size = compressor_pool_size / 2;
+
+ kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size);
+ goto try_again;
+ }
+ if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments), (sizeof(union c_segu) * c_segments_limit), 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n");
- c_buffers_size = (vm_size_t)C_SEG_ALLOCSIZE * (vm_size_t)c_segments_limit;
- if (kernel_memory_allocate(kernel_map, &c_buffers, c_buffers_size, 0, KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
+ if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0, KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n");
+
+ c_segment_min_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot));
+
+ for (c_segment_padded_size = 128; c_segment_padded_size < c_segment_min_size; c_segment_padded_size = c_segment_padded_size << 1);
+
+ compressor_segment_zone = zinit(c_segment_padded_size, c_segments_limit * c_segment_padded_size, PAGE_SIZE, "compressor_segment");
+ zone_change(compressor_segment_zone, Z_CALLERACCT, FALSE);
+ zone_change(compressor_segment_zone, Z_NOENCRYPT, TRUE);
+
+ c_seg_fixed_array_len = (c_segment_padded_size - sizeof(struct c_segment)) / sizeof(struct c_slot);
+
+ c_segments_busy = FALSE;
+
c_segments_next_page = (caddr_t)c_segments;
+ vm_compressor_algorithm_init();
{
host_basic_info_data_t hinfo;
host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
compressor_cpus = hinfo.max_cpus;
+ compressor_scratch_bufs = kalloc_tag(compressor_cpus * vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR);
- compressor_scratch_bufs = kalloc_tag(compressor_cpus * WKdm_SCRATCH_BUF_SIZE, VM_KERN_MEMORY_COMPRESSOR);
-
- kdp_compressor_scratch_buf = kalloc_tag(WKdm_SCRATCH_BUF_SIZE, VM_KERN_MEMORY_COMPRESSOR);
+ kdp_compressor_scratch_buf = kalloc_tag(vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR);
kdp_compressor_decompressed_page = kalloc_tag(PAGE_SIZE, VM_KERN_MEMORY_COMPRESSOR);
kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page);
kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr);
}
-#if CONFIG_FREEZE
- freezer_compressor_scratch_buf = kalloc_tag(WKdm_SCRATCH_BUF_SIZE, VM_KERN_MEMORY_COMPRESSOR);
+#if CONFIG_FREEZE
+ freezer_compressor_scratch_buf = kalloc_tag(vm_compressor_get_encode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR);
#endif
#if RECORD_THE_COMPRESSED_DATA
- if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&c_compressed_record_sbuf, (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2), 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
+ if (kernel_memory_allocate(compressor_map, (vm_offset_t *)&c_compressed_record_sbuf, c_compressed_record_sbuf_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS)
panic("vm_compressor_init: kernel_memory_allocate failed - c_compressed_record_sbuf\n");
c_compressed_record_cptr = c_compressed_record_sbuf;
- c_compressed_record_ebuf = c_compressed_record_sbuf + C_SEG_ALLOCSIZE + (PAGE_SIZE * 2);
+ c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
#endif
if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
- BASEPRI_PREEMPT - 1, &thread) != KERN_SUCCESS) {
+ BASEPRI_VM, &thread) != KERN_SUCCESS) {
panic("vm_compressor_swap_trigger_thread: create failed");
}
thread_deallocate(thread);
- assert(default_pager_init_flag == 0);
-
if (vm_pageout_internal_start() != KERN_SUCCESS) {
panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
}
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
+ if (VM_CONFIG_SWAP_IS_PRESENT)
vm_compressor_swap_init();
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED)
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE)
vm_compressor_is_active = 1;
#if CONFIG_FREEZE
memorystatus_freeze_enabled = TRUE;
#endif /* CONFIG_FREEZE */
- default_pager_init_flag = 1;
vm_compressor_available = 1;
vm_page_reactivate_all_throttled();
{
int c_indx;
int32_t bytes_used;
- int32_t bytes_unused;
uint32_t c_rounded_size;
uint32_t c_size;
c_slot_t cs;
panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs->c_size);
}
bytes_used = 0;
- bytes_unused = 0;
for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
bytes_used += c_rounded_size;
#if CHECKSUM_THE_COMPRESSED_DATA
- if (c_size && cs->c_hash_compressed_data != hash_string((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))
- panic("compressed data doesn't match original");
+ unsigned csvhash;
+ if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
+ addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
+ panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
+ }
#endif
}
void
-c_seg_need_delayed_compaction(c_segment_t c_seg)
+c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
{
boolean_t clear_busy = FALSE;
- if ( !lck_mtx_try_lock_spin_always(c_list_lock)) {
- C_SEG_BUSY(c_seg);
+ if (c_list_lock_held == FALSE) {
+ if ( !lck_mtx_try_lock_spin_always(c_list_lock)) {
+ C_SEG_BUSY(c_seg);
- lck_mtx_unlock_always(&c_seg->c_lock);
- lck_mtx_lock_spin_always(c_list_lock);
- lck_mtx_lock_spin_always(&c_seg->c_lock);
+ lck_mtx_unlock_always(&c_seg->c_lock);
+ lck_mtx_lock_spin_always(c_list_lock);
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
- clear_busy = TRUE;
+ clear_busy = TRUE;
+ }
}
assert(c_seg->c_state != C_IS_FILLING);
- if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ONDISK(c_seg))) {
+ if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg))) {
queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
c_seg->c_on_minorcompact_q = 1;
c_minor_count++;
}
- lck_mtx_unlock_always(c_list_lock);
+ if (c_list_lock_held == FALSE)
+ lck_mtx_unlock_always(c_list_lock);
if (clear_busy == TRUE)
C_SEG_WAKEUP_DONE(c_seg);
int c_seg_freed;
assert(c_seg->c_busy);
+ assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
/*
* check for the case that can occur when we are not swapping
* in the next major compaction sweep... if we don't do this
* we will eventually run into the c_segments_limit
*/
- if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT(c_seg)) {
+ if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
}
{
int old_state = c_seg->c_state;
-#if DEVELOPMENT || DEBUG
#if __i386__ || __x86_64__
if (new_state != C_IS_FILLING)
- lck_mtx_assert(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(c_list_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
#endif
switch (old_state) {
break;
case C_ON_SWAPPEDOUT_Q:
- assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
+ assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
+ new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
break;
case C_ON_SWAPPEDOUTSPARSE_Q:
- assert(new_state == C_ON_SWAPPEDIN_Q ||
+ assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
break;
case C_ON_AGE_Q:
- assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
- old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPOUT_Q);
+ assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q || old_state == C_ON_SWAPOUT_Q ||
+ old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
if (old_state == C_IS_FILLING)
queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
- else
- c_seg_insert_into_q(&c_age_list_head, c_seg);
+ else {
+ if (!queue_empty(&c_age_list_head)) {
+ c_segment_t c_first;
+
+ c_first = (c_segment_t)queue_first(&c_age_list_head);
+ c_seg->c_creation_ts = c_first->c_creation_ts;
+ }
+ queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
+ }
c_age_count++;
break;
case C_ON_SWAPPEDOUTSPARSE_Q:
assert(c_seg->c_state == C_ON_SWAPOUT_Q || c_seg->c_state == C_ON_SWAPPEDOUT_Q);
- c_seg_insert_into_q(&c_swappedout_sparse_list_head, c_seg);
+ if (insert_head == TRUE)
+ queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
+ else
+ queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
+
c_swappedout_sparse_count++;
break;
uint64_t c_swap_handle = 0;
assert(c_seg->c_busy);
+ assert(c_seg->c_slots_used == 0);
assert(!c_seg->c_on_minorcompact_q);
assert(!c_seg->c_busy_swapping);
if (c_buffer) {
if (pages_populated)
- kernel_memory_depopulate(kernel_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
+ kernel_memory_depopulate(compressor_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
} else if (c_swap_handle) {
/*
vm_swap_free(c_swap_handle);
}
lck_mtx_lock_spin_always(&c_seg->c_lock);
-
+ /*
+ * c_seg must remain busy until
+ * after the call to vm_swap_free
+ */
C_SEG_WAKEUP_DONE(c_seg);
lck_mtx_unlock_always(&c_seg->c_lock);
lck_mtx_unlock_always(c_list_lock);
-#if __i386__ || __x86_64__
lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
-#else /* __i386__ || __x86_64__ */
- lck_spin_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
-#endif /* __i386__ || __x86_64__ */
if (c_seg->c_slot_var_array_len)
kfree(c_seg->c_slot_var_array, sizeof(struct c_slot) * c_seg->c_slot_var_array_len);
zfree(compressor_segment_zone, c_seg);
}
-
+#if DEVELOPMENT || DEBUG
int c_seg_trim_page_count = 0;
+#endif
void
c_seg_trim_tail(c_segment_t c_seg)
c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
c_seg->c_nextoffset = c_offset;
- c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
+ c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
+ ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
if (c_seg->c_firstemptyslot > c_seg->c_nextslot)
c_seg->c_firstemptyslot = c_seg->c_nextslot;
-
+#if DEVELOPMENT || DEBUG
c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
- round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE);
+ round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
+ PAGE_SIZE);
+#endif
}
break;
}
int i;
c_slot_t c_dst;
c_slot_t c_src;
- boolean_t need_unlock = TRUE;
assert(c_seg->c_busy);
c_seg_free(c_seg);
return (1);
}
+ lck_mtx_unlock_always(&c_seg->c_lock);
+
if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE)
goto done;
-
+
+/* TODO: assert first emptyslot's c_size is actually 0 */
+
+#if DEVELOPMENT || DEBUG
+ C_SEG_MAKE_WRITEABLE(c_seg);
+#endif
+
#if VALIDATE_C_SEGMENTS
c_seg->c_was_minor_compacted++;
#endif
if (c_size == 0)
continue;
- memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_size);
+ c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
+/* N.B.: This memcpy may be an overlapping copy */
+ memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
-#if CHECKSUM_THE_DATA
- c_dst->c_hash_data = c_src->c_hash_data;
-#endif
-#if CHECKSUM_THE_COMPRESSED_DATA
- c_dst->c_hash_compressed_data = c_src->c_hash_compressed_data;
-#endif
- c_dst->c_size = c_src->c_size;
- c_dst->c_packed_ptr = c_src->c_packed_ptr;
+ cslot_copy(c_dst, c_src);
c_dst->c_offset = c_offset;
slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst);
slot_ptr->s_cindx = c_indx;
- c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
-
c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
PACK_C_SIZE(c_src, 0);
c_indx++;
#if VALIDATE_C_SEGMENTS
c_seg_validate(c_seg, TRUE);
#endif
-
if (old_populated_offset > c_seg->c_populated_offset) {
uint32_t gc_size;
int32_t *gc_ptr;
gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
- lck_mtx_unlock_always(&c_seg->c_lock);
+ kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size, KMA_COMPRESSOR);
+ }
- kernel_memory_depopulate(kernel_map, (vm_offset_t)gc_ptr, gc_size, KMA_COMPRESSOR);
+#if DEVELOPMENT || DEBUG
+ C_SEG_WRITE_PROTECT(c_seg);
+#endif
- if (clear_busy == TRUE)
- lck_mtx_lock_spin_always(&c_seg->c_lock);
- else
- need_unlock = FALSE;
- }
done:
- if (need_unlock == TRUE) {
- if (clear_busy == TRUE)
- C_SEG_WAKEUP_DONE(c_seg);
-
+ if (clear_busy == TRUE) {
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+ C_SEG_WAKEUP_DONE(c_seg);
lck_mtx_unlock_always(&c_seg->c_lock);
}
return (0);
* from c_seg_src to c_seg_dst and update both c_segment's
* state w/o holding the master lock
*/
+#if DEVELOPMENT || DEBUG
+ C_SEG_MAKE_WRITEABLE(c_seg_dst);
+#endif
#if VALIDATE_C_SEGMENTS
c_seg_dst->c_was_major_compacted++;
if (size_to_populate > C_SEG_MAX_POPULATE_SIZE)
size_to_populate = C_SEG_MAX_POPULATE_SIZE;
- kernel_memory_populate(kernel_map,
+ kernel_memory_populate(compressor_map,
(vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
size_to_populate,
KMA_COMPRESSOR,
c_seg_major_compact_stats.moved_slots++;
c_seg_major_compact_stats.moved_bytes += c_size;
-#if CHECKSUM_THE_DATA
- c_dst->c_hash_data = c_src->c_hash_data;
-#endif
-#if CHECKSUM_THE_COMPRESSED_DATA
- c_dst->c_hash_compressed_data = c_src->c_hash_compressed_data;
-#endif
- c_dst->c_size = c_src->c_size;
- c_dst->c_packed_ptr = c_src->c_packed_ptr;
+ cslot_copy(c_dst, c_src);
c_dst->c_offset = c_seg_dst->c_nextoffset;
if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot)
c_seg_dst->c_firstemptyslot++;
+ c_seg_dst->c_slots_used++;
c_seg_dst->c_nextslot++;
c_seg_dst->c_bytes_used += c_rounded_size;
c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
c_seg_src->c_bytes_unused += c_rounded_size;
c_seg_src->c_firstemptyslot = 0;
+ assert(c_seg_src->c_slots_used);
+ c_seg_src->c_slots_used--;
+
if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
/* dest segment is now full */
keep_compacting = FALSE;
break;
}
}
+#if DEVELOPMENT || DEBUG
+ C_SEG_WRITE_PROTECT(c_seg_dst);
+#endif
if (dst_slot < c_seg_dst->c_nextslot) {
PAGE_REPLACEMENT_ALLOWED(TRUE);
}
-int compaction_swapper_inited = 0;
int compaction_swapper_init_now = 0;
int compaction_swapper_running = 0;
+int compaction_swapper_awakened = 0;
int compaction_swapper_abort = 0;
if (age >= vm_ripe_target_age)
return (TRUE);
}
- if ((vm_compressor_mode == VM_PAGER_COMPRESSOR_WITH_SWAP) && vm_swap_up == TRUE) {
+ if (VM_CONFIG_SWAP_IS_ACTIVE) {
if (COMPRESSOR_NEEDS_TO_SWAP()) {
return (TRUE);
}
should_swap = TRUE;
#if CONFIG_JETSAM
- if (should_swap || c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) {
+ if (should_swap || vm_compressor_low_on_space() == TRUE) {
if (vm_compressor_thrashing_detected == FALSE) {
vm_compressor_thrashing_detected = TRUE;
- if (swapout_target_age || c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) {
+ if (swapout_target_age || vm_compressor_low_on_space() == TRUE) {
memorystatus_kill_on_VM_thrashing(TRUE /* async */);
compressor_thrashing_induced_jetsam++;
} else {
if (should_swap == FALSE) {
/*
- * COMPRESSOR_NEEDS_TO_MAJOR_COMPACT returns true only if we're
+ * vm_compressor_needs_to_major_compact returns true only if we're
* about to run out of available compressor segments... in this
* case, we absolutely need to run a major compaction even if
* we've just kicked off a jetsam or we don't otherwise need to
* pages back to the uncompressed cache, but does not guarantee
* that we will free up even a single compression segment
*/
- should_swap = COMPRESSOR_NEEDS_TO_MAJOR_COMPACT();
+ should_swap = vm_compressor_needs_to_major_compact();
}
/*
#endif /* CONFIG_JETSAM */
uint32_t vm_wake_compactor_swapper_calls = 0;
+uint32_t vm_run_compactor_already_running = 0;
+uint32_t vm_run_compactor_empty_minor_q = 0;
+uint32_t vm_run_compactor_did_compact = 0;
+uint32_t vm_run_compactor_waited = 0;
+
+void
+vm_run_compactor(void)
+{
+ if (c_segment_count == 0)
+ return;
+
+ lck_mtx_lock_spin_always(c_list_lock);
+
+ if (c_minor_count == 0) {
+ vm_run_compactor_empty_minor_q++;
+
+ lck_mtx_unlock_always(c_list_lock);
+ return;
+ }
+ if (compaction_swapper_running) {
+
+ if (vm_restricted_to_single_processor == FALSE) {
+ vm_run_compactor_already_running++;
+
+ lck_mtx_unlock_always(c_list_lock);
+ return;
+ }
+ vm_run_compactor_waited++;
+
+ assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
+
+ lck_mtx_unlock_always(c_list_lock);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ return;
+ }
+ vm_run_compactor_did_compact++;
+
+ fastwake_warmup = FALSE;
+ compaction_swapper_running = 1;
+
+ vm_compressor_do_delayed_compactions(FALSE);
+
+ compaction_swapper_running = 0;
+
+ lck_mtx_unlock_always(c_list_lock);
+
+ thread_wakeup((event_t)&compaction_swapper_running);
+}
+
void
vm_wake_compactor_swapper(void)
{
- if (compaction_swapper_running || c_segment_count == 0)
+ if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0)
return;
- if (c_minor_count || COMPRESSOR_NEEDS_TO_MAJOR_COMPACT()) {
+ if (c_minor_count || vm_compressor_needs_to_major_compact()) {
lck_mtx_lock_spin_always(c_list_lock);
fastwake_warmup = FALSE;
- if (compaction_swapper_running == 0) {
+ if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
vm_wake_compactor_swapper_calls++;
+ compaction_swapper_awakened = 1;
thread_wakeup((event_t)&c_compressor_swap_trigger);
-
- compaction_swapper_running = 1;
}
lck_mtx_unlock_always(c_list_lock);
}
clock_sec_t now;
clock_nsec_t nsec;
+ assert(VM_CONFIG_SWAP_IS_PRESENT);
lck_mtx_lock_spin_always(c_list_lock);
vm_swapout_ripe_segments = FALSE;
lck_mtx_unlock_always(c_list_lock);
+
+ thread_wakeup((event_t)&compaction_swapper_running);
}
{
boolean_t need_wakeup = FALSE;
- if (compaction_swapper_running)
+ if (c_segment_count == 0)
return;
- if (c_segment_count == 0)
+ if (compaction_swapper_running || compaction_swapper_awakened)
return;
if (!compaction_swapper_inited && !compaction_swapper_init_now) {
fastwake_warmup = FALSE;
- if (compaction_swapper_running == 0) {
+ if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
+ compaction_swapper_awakened = 1;
thread_wakeup((event_t)&c_compressor_swap_trigger);
-
- compaction_swapper_running = 1;
}
lck_mtx_unlock_always(c_list_lock);
}
boolean_t needs_to_swap = FALSE;
- lck_mtx_assert(c_list_lock, LCK_MTX_ASSERT_OWNED);
+#if !CONFIG_EMBEDDED
+ LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
+#endif /* !CONFIG_EMBEDDED */
while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
- if (vm_swap_up == TRUE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
+ if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT)
needs_to_swap = TRUE;
}
+extern int vm_num_swap_files;
+extern int vm_num_pinned_swap_files;
+extern int vm_swappin_enabled;
+
+extern unsigned int vm_swapfile_total_segs_used;
+extern unsigned int vm_swapfile_total_segs_alloced;
+
+
void
vm_compressor_flush(void)
{
lck_mtx_unlock_always(c_list_lock);
+ thread_wakeup((event_t)&compaction_swapper_running);
+
clock_get_uptime(&endTime);
SUB_ABSOLUTETIME(&endTime, &startTime);
absolutetime_to_nanoseconds(endTime, &nsec);
- HIBLOG("vm_compressor_flush completed - took %qd msecs\n", nsec / 1000000ULL);
+ HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
+ nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
}
-extern void vm_swap_file_set_tuneables(void);
int compaction_swap_trigger_thread_awakened = 0;
-
static void
vm_compressor_swap_trigger_thread(void)
{
* be operating on the correct directory (in case the default
* of /var/vm/ is overridden by the dymanic_pager
*/
- if (compaction_swapper_init_now && !compaction_swapper_inited) {
- if (vm_compressor_mode == VM_PAGER_COMPRESSOR_WITH_SWAP)
- vm_swap_file_set_tuneables();
+ if (compaction_swapper_init_now) {
+ vm_compaction_swapper_do_init();
if (vm_restricted_to_single_processor == TRUE)
thread_vm_bind_group_add();
-
- compaction_swapper_inited = 1;
+ thread_set_thread_name(current_thread(), "VM_cswap_trigger");
+ compaction_swapper_init_now = 0;
}
lck_mtx_lock_spin_always(c_list_lock);
compaction_swap_trigger_thread_awakened++;
+ compaction_swapper_awakened = 0;
- vm_compressor_compact_and_swap(FALSE);
+ if (compaction_swapper_running == 0) {
+
+ compaction_swapper_running = 1;
+ vm_compressor_compact_and_swap(FALSE);
+
+ compaction_swapper_running = 0;
+ }
assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
- compaction_swapper_running = 0;
- thread_wakeup((event_t)&compaction_swapper_running);
+ if (compaction_swapper_running == 0)
+ thread_wakeup((event_t)&compaction_swapper_running);
lck_mtx_unlock_always(c_list_lock);
}
-#define DELAY_TRIM_ON_WAKE_SECS 4
+#define DELAY_TRIM_ON_WAKE_SECS 25
void
vm_compressor_delay_trim(void)
return;
}
- if (compaction_swapper_running == 0) {
+ if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
fastwake_warmup = TRUE;
- compaction_swapper_running = 1;
+
+ compaction_swapper_awakened = 1;
thread_wakeup((event_t)&c_compressor_swap_trigger);
}
lck_mtx_unlock_always(c_list_lock);
}
+void
+do_fastwake_warmup_all(void)
+{
+
+ lck_mtx_lock_spin_always(c_list_lock);
+
+ if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
+
+ lck_mtx_unlock_always(c_list_lock);
+ return;
+ }
+
+ fastwake_warmup = TRUE;
+
+ do_fastwake_warmup(&c_swappedout_list_head, TRUE);
+
+ do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
+
+ fastwake_warmup = FALSE;
+
+ lck_mtx_unlock_always(c_list_lock);
+
+}
void
-do_fastwake_warmup(void)
+do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
{
- uint64_t my_thread_id;
c_segment_t c_seg = NULL;
AbsoluteTime startTime, endTime;
uint64_t nsec;
lck_mtx_unlock_always(c_list_lock);
- my_thread_id = current_thread()->thread_id;
- proc_set_task_policy_thread(kernel_task, my_thread_id,
- TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
+ proc_set_thread_policy(current_thread(),
+ TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
PAGE_REPLACEMENT_DISALLOWED(TRUE);
lck_mtx_lock_spin_always(c_list_lock);
- while (!queue_empty(&c_swappedout_list_head) && fastwake_warmup == TRUE) {
+ while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
- c_seg = (c_segment_t) queue_first(&c_swappedout_list_head);
+ c_seg = (c_segment_t) queue_first(c_queue);
- if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
- c_seg->c_generation_id > last_c_segment_to_warm_generation_id)
- break;
+ if (consider_all_cseg == FALSE) {
+ if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
+ c_seg->c_generation_id > last_c_segment_to_warm_generation_id)
+ break;
- if (vm_page_free_count < (AVAILABLE_MEMORY / 4))
- break;
+ if (vm_page_free_count < (AVAILABLE_MEMORY / 4))
+ break;
+ }
lck_mtx_lock_spin_always(&c_seg->c_lock);
lck_mtx_unlock_always(c_list_lock);
c_seg_wait_on_busy(c_seg);
PAGE_REPLACEMENT_DISALLOWED(TRUE);
} else {
- c_seg_swapin(c_seg, TRUE);
-
- lck_mtx_unlock_always(&c_seg->c_lock);
+ if (c_seg_swapin(c_seg, TRUE, FALSE) == 0)
+ lck_mtx_unlock_always(&c_seg->c_lock);
c_segment_warmup_count++;
PAGE_REPLACEMENT_DISALLOWED(FALSE);
PAGE_REPLACEMENT_DISALLOWED(FALSE);
- proc_set_task_policy_thread(kernel_task, my_thread_id,
- TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
+ proc_set_thread_policy(current_thread(),
+ TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
clock_get_uptime(&endTime);
SUB_ABSOLUTETIME(&endTime, &startTime);
lck_mtx_lock_spin_always(c_list_lock);
- first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
+ if (consider_all_cseg == FALSE) {
+ first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
+ }
}
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
- do_fastwake_warmup();
+ do_fastwake_warmup(&c_swappedout_list_head, FALSE);
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
fastwake_warmup = FALSE;
HIBLOG("vm_compressor_flush - out of swap space\n");
break;
}
+ if (vm_swap_files_pinned() == FALSE) {
+ HIBLOG("vm_compressor_flush - unpinned swap files\n");
+ break;
+ }
+ if (hibernate_in_progress_with_pinned_swap == TRUE &&
+ (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
+ HIBLOG("vm_compressor_flush - out of pinned swap space\n");
+ break;
+ }
clock_get_system_nanotime(&sec, &nsec);
if (sec > hibernate_flushing_deadline) {
needs_to_swap = compressor_needs_to_swap();
+#if !CONFIG_EMBEDDED
if (needs_to_swap == TRUE && vm_swap_low_on_space())
vm_compressor_take_paging_space_action();
+#endif /* !CONFIG_EMBEDDED */
lck_mtx_lock_spin_always(c_list_lock);
assert(c_seg->c_busy);
assert(!c_seg->c_on_minorcompact_q);
- if (vm_swap_up == TRUE) {
+ if (VM_CONFIG_SWAP_IS_ACTIVE) {
/*
* This mode of putting a generic c_seg on the swapout list is
- * only supported when we have general swap ON i.e.
- * we compress pages into c_segs as we process them off
- * the paging queues in vm_pageout_scan().
+ * only supported when we have general swapping enabled
*/
- if (COMPRESSED_PAGER_IS_SWAPBACKED)
- c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
- else {
- if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
- /*
- * we are running compressor sweeps with swap-behind
- * make sure the c_seg has aged enough before swapping it
- * out...
- */
- if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
- c_seg->c_overage_swap = TRUE;
- c_overage_swapped_count++;
- c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
- }
+ c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
+ } else {
+ if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
+
+ assert(VM_CONFIG_SWAP_IS_PRESENT);
+ /*
+ * we are running compressor sweeps with swap-behind
+ * make sure the c_seg has aged enough before swapping it
+ * out...
+ */
+ if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
+ c_seg->c_overage_swap = TRUE;
+ c_overage_swapped_count++;
+ c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
}
}
}
int min_needed;
int size_to_populate;
+#if !CONFIG_EMBEDDED
if (vm_compressor_low_on_space())
vm_compressor_take_paging_space_action();
+#endif /* !CONFIG_EMBEDDED */
if ( (c_seg = *current_chead) == NULL ) {
uint32_t c_segno;
c_segments_busy = TRUE;
lck_mtx_unlock_always(c_list_lock);
- kernel_memory_populate(kernel_map, (vm_offset_t)c_segments_next_page,
+ kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page,
PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
c_segments_next_page += PAGE_SIZE;
c_segno = c_free_segno_head;
assert(c_segno >= 0 && c_segno < c_segments_limit);
- c_free_segno_head = c_segments[c_segno].c_segno;
+ c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
/*
* do the rest of the bookkeeping now while we're still behind
c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
-#if __i386__ || __x86_64__
lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, &vm_compressor_lck_attr);
-#else /* __i386__ || __x86_64__ */
- lck_spin_init(&c_seg->c_lock, &vm_compressor_lck_grp, &vm_compressor_lck_attr);
-#endif /* __i386__ || __x86_64__ */
c_seg->c_state = C_IS_EMPTY;
c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
c_empty_count++;
c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
c_segments[c_segno].c_seg = c_seg;
+ assert(c_segments[c_segno].c_segno > c_segments_available);
lck_mtx_unlock_always(c_list_lock);
*current_chead = c_seg;
+
+#if DEVELOPMENT || DEBUG
+ C_SEG_MAKE_WRITEABLE(c_seg);
+#endif
+
}
c_seg_alloc_nextslot(c_seg);
if (size_to_populate > C_SEG_MAX_POPULATE_SIZE)
size_to_populate = C_SEG_MAX_POPULATE_SIZE;
+ vm_compressor_pages_grabbed += size_to_populate / PAGE_SIZE;
- kernel_memory_populate(kernel_map,
+ kernel_memory_populate(compressor_map,
(vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
size_to_populate,
KMA_COMPRESSOR,
unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
+#ifndef _OPEN_SOURCE
+ /* TODO: The HW codec can generate, lazily, a '2nd page not mapped'
+ * exception. So on such a platform, or platforms where we're confident
+ * the codec does not require a buffer page to absorb trailing writes,
+ * we can create an unmapped hole at the tail of the segment, rather
+ * than a populated mapping. This will also guarantee that the codec
+ * does not overwrite valid data past the edge of the segment and
+ * thus eliminate the depopulation overhead.
+ */
+#endif
if (unused_bytes) {
-
offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
/*
lck_mtx_unlock_always(&c_seg->c_lock);
kernel_memory_depopulate(
- kernel_map,
+ compressor_map,
(vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
unused_bytes,
KMA_COMPRESSOR);
}
assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= C_SEG_BUFSIZE);
+#if DEVELOPMENT || DEBUG
+ {
+ boolean_t c_seg_was_busy = FALSE;
+
+ if ( !c_seg->c_busy)
+ C_SEG_BUSY(c_seg);
+ else
+ c_seg_was_busy = TRUE;
+
+ lck_mtx_unlock_always(&c_seg->c_lock);
+
+ C_SEG_WRITE_PROTECT(c_seg);
+
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+
+ if (c_seg_was_busy == FALSE)
+ C_SEG_WAKEUP_DONE(c_seg);
+ }
+#endif
+
#if CONFIG_FREEZE
- if (current_chead == (c_segment_t*)&freezer_chead && DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED &&
+ if (current_chead == (c_segment_t*)&freezer_chead &&
+ VM_CONFIG_SWAP_IS_PRESENT &&
+ VM_CONFIG_FREEZER_SWAP_IS_ACTIVE &&
c_freezer_swapout_count < VM_MAX_FREEZER_CSEG_SWAP_COUNT) {
new_state = C_ON_SWAPOUT_Q;
}
lck_mtx_lock_spin_always(c_list_lock);
+ c_seg->c_generation_id = c_generation_id++;
+ c_seg_switch_state(c_seg, new_state, FALSE);
+
#if CONFIG_FREEZE
if (c_seg->c_state == C_ON_SWAPOUT_Q)
c_freezer_swapout_count++;
#endif /* CONFIG_FREEZE */
- c_seg->c_generation_id = c_generation_id++;
- c_seg_switch_state(c_seg, new_state, FALSE);
+ if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE)
+ c_seg_need_delayed_compaction(c_seg, TRUE);
lck_mtx_unlock_always(c_list_lock);
thread_wakeup((event_t)&c_swapout_list_head);
#endif /* CONFIG_FREEZE */
- if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE)
- c_seg_need_delayed_compaction(c_seg);
-
*current_chead = NULL;
}
+
/*
* returns with c_seg locked
*/
void
-c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data)
+c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
{
clock_sec_t sec;
clock_nsec_t nsec;
lck_mtx_lock_spin_always(c_list_lock);
lck_mtx_lock_spin_always(&c_seg->c_lock);
+ assert(c_seg->c_busy_swapping);
+ assert(c_seg->c_busy);
+
c_seg->c_busy_swapping = 0;
if (c_seg->c_overage_swap == TRUE) {
c_seg->c_overage_swap = FALSE;
}
if (has_data == TRUE) {
- c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
+ if (age_on_swapin_q == TRUE)
+ c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
+ else
+ c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
+
+ if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE)
+ c_seg_need_delayed_compaction(c_seg, TRUE);
} else {
c_seg->c_store.c_buffer = (int32_t*) NULL;
c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
/*
- * c_seg has to be locked and is returned locked.
+ * c_seg has to be locked and is returned locked if the c_seg isn't freed
* PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
+ * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
*/
-void
-c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction)
+int
+c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
{
vm_offset_t addr = 0;
uint32_t io_size = 0;
PAGE_REPLACEMENT_DISALLOWED(FALSE);
addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
+ c_seg->c_store.c_buffer = (int32_t*) addr;
- kernel_memory_populate(kernel_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
+ kernel_memory_populate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
- if (vm_swap_get(addr, f_offset, io_size) != KERN_SUCCESS) {
+ if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
PAGE_REPLACEMENT_DISALLOWED(TRUE);
- kernel_memory_depopulate(kernel_map, addr, io_size, KMA_COMPRESSOR);
+ kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR);
- c_seg_swapin_requeue(c_seg, FALSE);
+ c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
} else {
- c_seg->c_store.c_buffer = (int32_t*) addr;
#if ENCRYPTED_SWAP
vm_swap_decrypt(c_seg);
#endif /* ENCRYPTED_SWAP */
if (c_seg->cseg_swap_size != io_size)
panic("swapin size doesn't match swapout size");
- if (c_seg->cseg_hash != hash_string((char*) c_seg->c_store.c_buffer, (int)io_size)) {
+ if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
panic("c_seg_swapin - Swap hash mismatch\n");
}
#endif /* CHECKSUM_THE_SWAP */
PAGE_REPLACEMENT_DISALLOWED(TRUE);
+ c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
+
+ OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
+
if (force_minor_compaction == TRUE) {
+ if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
+ /*
+ * c_seg was completely empty so it was freed,
+ * so be careful not to reference it again
+ *
+ * Drop the rwlock_count so that the thread priority
+ * is returned back to where it is supposed to be.
+ */
+ clear_thread_rwlock_boost();
+ return (1);
+ }
+
lck_mtx_lock_spin_always(&c_seg->c_lock);
-
- c_seg_minor_compaction_and_unlock(c_seg, FALSE);
}
- OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
-
- c_seg_swapin_requeue(c_seg, TRUE);
}
C_SEG_WAKEUP_DONE(c_seg);
* is returned back to where it is supposed to be.
*/
clear_thread_rwlock_boost();
+
+ return (0);
}
KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
retry:
- if ((c_seg = c_seg_allocate(current_chead)) == NULL)
+ if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
return (1);
+ }
/*
* returns with c_seg lock held
* and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
max_csize = PAGE_SIZE;
#if CHECKSUM_THE_DATA
- cs->c_hash_data = hash_string(src, PAGE_SIZE);
+ cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
#endif
-
+ boolean_t incomp_copy = FALSE;
+ int max_csize_adj = (max_csize - 4);
+
+ if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
+#if defined(__arm__) || defined(__arm64__)
+ uint16_t ccodec = CINVALID;
+
+ if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
+ c_size = metacompressor((const uint8_t *) src,
+ (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
+ max_csize_adj, &ccodec,
+ scratch_buf, &incomp_copy);
+#if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
+ if (c_size > max_csize_adj) {
+ c_size = -1;
+ }
+#endif
+ } else {
+ c_size = -1;
+ }
+ assert(ccodec == CCWK || ccodec == CCLZ4);
+ cs->c_codec = ccodec;
+#endif
+ } else {
+#if defined(__arm__) || defined(__arm64__)
+ cs->c_codec = CCWK;
+#endif
+#if defined(__arm64__)
+ __unreachable_ok_push
+ if (PAGE_SIZE == 4096)
+ c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
+ (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
+ else {
+ c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
+ (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
+ }
+ __unreachable_ok_pop
+#else
c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
- (WK_word *)(uintptr_t)scratch_buf, max_csize - 4);
- assert(c_size <= (max_csize - 4) && c_size >= -1);
+ (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
+#endif
+ }
+ assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
+ "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
if (c_size == -1) {
-
if (max_csize < PAGE_SIZE) {
c_current_seg_filled(c_seg, current_chead);
assert(*current_chead == NULL);
lck_mtx_unlock_always(&c_seg->c_lock);
-
+ /* TODO: it may be worth requiring codecs to distinguish
+ * between incompressible inputs and failures due to
+ * budget exhaustion.
+ */
PAGE_REPLACEMENT_DISALLOWED(FALSE);
goto retry;
}
c_size = PAGE_SIZE;
- memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
+ if (incomp_copy == FALSE) {
+ memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
+ }
OSAddAtomic(1, &c_segment_noncompressible_pages);
#if RECORD_THE_COMPRESSED_DATA
c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
#endif
-
#if CHECKSUM_THE_COMPRESSED_DATA
- cs->c_hash_compressed_data = hash_string((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
+ cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
+#endif
+#if POPCOUNT_THE_COMPRESSED_DATA
+ cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
#endif
c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
PACK_C_SIZE(cs, c_size);
c_seg->c_bytes_used += c_rounded_size;
c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
+ c_seg->c_slots_used++;
slot_ptr->s_cindx = c_seg->c_nextslot++;
/* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
return (0);
}
+static inline void sv_decompress(int32_t *ddst, int32_t pattern) {
+#if __x86_64__
+ memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
+#else
+ size_t i;
+
+ /* Unroll the pattern fill loop 4x to encourage the
+ * compiler to emit NEON stores, cf.
+ * <rdar://problem/25839866> Loop autovectorization
+ * anomalies.
+ * We use separate loops for each PAGE_SIZE
+ * to allow the autovectorizer to engage, as PAGE_SIZE
+ * is currently not a constant.
+ */
+
+ __unreachable_ok_push
+ if (PAGE_SIZE == 4096) {
+ for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ }
+ } else {
+ assert(PAGE_SIZE == 16384);
+ for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ *ddst++ = pattern;
+ }
+ }
+ __unreachable_ok_pop
+#endif
+}
static int
c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int *zeroslot)
{
c_slot_t cs;
c_segment_t c_seg;
+ uint32_t c_segno;
int c_indx;
int c_rounded_size;
uint32_t c_size;
boolean_t consider_defragmenting = FALSE;
boolean_t kdp_mode = FALSE;
- if (flags & C_KDP) {
+ if (__improbable(flags & C_KDP)) {
if (not_in_kdp) {
panic("C_KDP passed to decompress page from outside of debugger context");
}
- assert((flags & C_KEEP) == C_KEEP);
+ assert((flags & C_KEEP) == C_KEEP);
assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
}
kdp_mode = TRUE;
+ *zeroslot = 0;
}
ReTry:
- if (!kdp_mode) {
+ if (__probable(!kdp_mode)) {
PAGE_REPLACEMENT_DISALLOWED(TRUE);
} else {
if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
* to disk... in this state we allow freeing of compressed
* pages and must honor the C_DONT_BLOCK case
*/
- if (dst && decompressions_blocked == TRUE) {
+ if (__improbable(dst && decompressions_blocked == TRUE)) {
if (flags & C_DONT_BLOCK) {
- if (!kdp_mode) {
+ if (__probable(!kdp_mode)) {
PAGE_REPLACEMENT_DISALLOWED(FALSE);
}
}
#endif
/* s_cseg is actually "segno+1" */
- c_seg = c_segments[slot_ptr->s_cseg - 1].c_seg;
+ c_segno = slot_ptr->s_cseg - 1;
+
+ if (__improbable(c_segno >= c_segments_available))
+ panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
+ c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
+
+ if (__improbable(c_segments[c_segno].c_segno < c_segments_available))
+ panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
+ c_segno, slot_ptr, *(int *)((void *)slot_ptr));
- if (!kdp_mode) {
+ c_seg = c_segments[c_segno].c_seg;
+
+ if (__probable(!kdp_mode)) {
lck_mtx_lock_spin_always(&c_seg->c_lock);
} else {
if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
+ if (dst == NULL && c_seg->c_busy_swapping) {
+ assert(c_seg->c_busy);
+
+ goto bypass_busy_check;
+ }
if (flags & C_DONT_BLOCK) {
if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
*zeroslot = 0;
goto ReTry;
}
+bypass_busy_check:
+
c_indx = slot_ptr->s_cindx;
+ if (__improbable(c_indx >= c_seg->c_nextslot))
+ panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
+ c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
+
cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
c_size = UNPACK_C_SIZE(cs);
+ if (__improbable(c_size == 0))
+ panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
+ c_seg, slot_ptr, *(int *)((void *)slot_ptr));
+
c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
if (dst) {
if (C_SEG_IS_ONDISK(c_seg)) {
assert(kdp_mode == FALSE);
- c_seg_swapin(c_seg, FALSE);
+ retval = c_seg_swapin(c_seg, FALSE, TRUE);
+ assert(retval == 0);
retval = 1;
}
if (c_seg->c_state == C_ON_BAD_Q) {
assert(c_seg->c_store.c_buffer == NULL);
+ *zeroslot = 0;
retval = -1;
- goto c_seg_invalid_data;
+ goto done;
+ }
+
+#if POPCOUNT_THE_COMPRESSED_DATA
+ unsigned csvpop;
+ uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
+ if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
+ panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
}
+#endif
+
#if CHECKSUM_THE_COMPRESSED_DATA
- if (cs->c_hash_compressed_data != hash_string((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))
- panic("compressed data doesn't match original");
+ unsigned csvhash;
+ if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
+ panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
+ }
#endif
if (c_rounded_size == PAGE_SIZE) {
/*
*/
dptr = (int32_t *)(uintptr_t)dst;
data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
-#if __x86_64__
- memset_word(dptr, data, PAGE_SIZE / sizeof(int32_t));
-#else
- {
- int i;
-
- for (i = 0; i < (int)(PAGE_SIZE / sizeof(int32_t)); i++)
- *dptr++ = data;
- }
-#endif
+ sv_decompress(dptr, data);
} else {
uint32_t my_cpu_no;
char *scratch_buf;
- if (!kdp_mode) {
+ if (__probable(!kdp_mode)) {
/*
* we're behind the c_seg lock held in spin mode
* which means pre-emption is disabled... therefore
assert(my_cpu_no < compressor_cpus);
- scratch_buf = &compressor_scratch_bufs[my_cpu_no * WKdm_SCRATCH_BUF_SIZE];
+ scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
} else {
scratch_buf = kdp_compressor_scratch_buf;
}
+
+ if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
+#if defined(__arm__) || defined(__arm64__)
+ uint16_t c_codec = cs->c_codec;
+ metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
+ (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf);
+#endif
+ } else {
+#if defined(__arm64__)
+ __unreachable_ok_push
+ if (PAGE_SIZE == 4096)
+ WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
+ (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
+ else {
+ WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
+ (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
+ }
+ __unreachable_ok_pop
+#else
WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
(WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
+#endif
+ }
}
#if CHECKSUM_THE_DATA
- if (cs->c_hash_data != hash_string(dst, PAGE_SIZE))
- panic("decompressed data doesn't match original");
+ if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
+#if defined(__arm__) || defined(__arm64__)
+ int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
+ panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
+#else
+ panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
+#endif
+ }
#endif
if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
-
if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE)
OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
else
OSAddAtomic(1, &sample_period_decompression_count);
}
}
-c_seg_invalid_data:
-
if (flags & C_KEEP) {
*zeroslot = 0;
goto done;
}
-
assert(kdp_mode == FALSE);
+
c_seg->c_bytes_unused += c_rounded_size;
c_seg->c_bytes_used -= c_rounded_size;
+
+ assert(c_seg->c_slots_used);
+ c_seg->c_slots_used--;
+
PACK_C_SIZE(cs, 0);
if (c_indx < c_seg->c_firstemptyslot)
*/
OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
}
+ if (c_seg->c_busy_swapping) {
+ /*
+ * bypass case for c_busy_swapping...
+ * let the swapin/swapout paths deal with putting
+ * the c_seg on the minor compaction queue if needed
+ */
+ assert(c_seg->c_busy);
+ goto done;
+ }
+ assert(!c_seg->c_busy);
+
if (c_seg->c_state != C_IS_FILLING) {
if (c_seg->c_bytes_used == 0) {
if ( !(C_SEG_IS_ONDISK(c_seg))) {
C_SEG_BUSY(c_seg);
lck_mtx_unlock_always(&c_seg->c_lock);
- kernel_memory_depopulate(kernel_map, (vm_offset_t) c_seg->c_store.c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
+ kernel_memory_depopulate(compressor_map, (vm_offset_t) c_seg->c_store.c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
lck_mtx_lock_spin_always(&c_seg->c_lock);
C_SEG_WAKEUP_DONE(c_seg);
}
- if (!c_seg->c_on_minorcompact_q)
- c_seg_need_delayed_compaction(c_seg);
- } else
- assert(c_seg->c_state == C_ON_SWAPPEDOUTSPARSE_Q);
+ if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q)
+ c_seg_need_delayed_compaction(c_seg, FALSE);
+ } else {
+ if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
+ c_seg_move_to_sparse_list(c_seg);
+ consider_defragmenting = TRUE;
+ }
+ }
} else if (c_seg->c_on_minorcompact_q) {
assert(c_seg->c_state != C_ON_BAD_Q);
+ assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
- if (C_SEG_SHOULD_MINORCOMPACT(c_seg)) {
+ if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
c_seg_try_minor_compaction_and_unlock(c_seg);
need_unlock = FALSE;
}
} else if ( !(C_SEG_IS_ONDISK(c_seg))) {
if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
- c_seg_need_delayed_compaction(c_seg);
+ c_seg_need_delayed_compaction(c_seg, FALSE);
}
} else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
}
}
done:
- if (kdp_mode) {
+ if (__improbable(kdp_mode)) {
return retval;
}
if (consider_defragmenting == TRUE)
vm_swap_consider_defragmenting();
+#if CONFIG_EMBEDDED
+ if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact())
+ vm_wake_compactor_swapper();
+#endif
return (retval);
}
#if __x86_64__
dst = PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ dst = (char *) phystokv((pmap_paddr_t)pn << PAGE_SHIFT);
#else
#error "unsupported architecture"
#endif
*dptr++ = data;
}
#endif
- c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
-
if ( !(flags & C_KEEP)) {
+ c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
+
OSAddAtomic(-1, &c_segment_pages_compressed);
*slot = 0;
}
#if __x86_64__
src = PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT);
+#elif __arm__ || __arm64__
+ src = (char *) phystokv((pmap_paddr_t)pn << PAGE_SHIFT);
#else
#error "unsupported architecture"
#endif
+
retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf);
return (retval);
c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
-
+//is platform alignment actually necessary since wkdm aligns its output?
c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
-#if CHECKSUM_THE_DATA
- c_dst->c_hash_data = c_src->c_hash_data;
-#endif
-#if CHECKSUM_THE_COMPRESSED_DATA
- c_dst->c_hash_compressed_data = c_src->c_hash_compressed_data;
-#endif
-
- c_dst->c_size = c_src->c_size;
- c_dst->c_packed_ptr = c_src->c_packed_ptr;
+ cslot_copy(c_dst, c_src);
c_dst->c_offset = c_seg_dst->c_nextoffset;
if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot)
c_seg_dst->c_firstemptyslot++;
+ c_seg_dst->c_slots_used++;
c_seg_dst->c_nextslot++;
c_seg_dst->c_bytes_used += c_rounded_size;
c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
c_seg_src->c_bytes_used -= c_rounded_size;
c_seg_src->c_bytes_unused += c_rounded_size;
+
+ assert(c_seg_src->c_slots_used);
+ c_seg_src->c_slots_used--;
if (c_indx < c_seg_src->c_firstemptyslot) {
c_seg_src->c_firstemptyslot = c_indx;
if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
if (!c_seg_src->c_on_minorcompact_q)
- c_seg_need_delayed_compaction(c_seg_src);
+ c_seg_need_delayed_compaction(c_seg_src, FALSE);
}
lck_mtx_unlock_always(&c_seg_src->c_lock);