2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_compressor_pager.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_page.h>
32 #include <vm/vm_protos.h>
33 #include <vm/WKdm_new.h>
34 #include <vm/vm_object.h>
35 #include <vm/vm_map.h>
36 #include <machine/pmap.h>
37 #include <kern/locks.h>
39 #include <sys/kdebug.h>
41 #if defined(__arm64__)
42 #include <arm/proc_reg.h>
45 #define C_SEG_OFFSET_BITS 16
46 #define C_SEG_BUFSIZE (1024 * 256)
47 #define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE)
49 #if !defined(__x86_64__)
50 #define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 512)))
51 #define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE + PAGE_SIZE)
53 #define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128)))
54 #define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE)
55 #endif /* !defined(__x86_64__) */
56 #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
58 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
60 #if defined(PLATFORM_WatchOS)
61 #define VALIDATE_C_SEGMENTS (1)
63 #endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */
66 #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS
67 #define ENABLE_SWAP_CHECKS 1
68 #define ENABLE_COMPRESSOR_CHECKS 1
69 #define POPCOUNT_THE_COMPRESSED_DATA (1)
71 #define ENABLE_SWAP_CHECKS 0
72 #define ENABLE_COMPRESSOR_CHECKS 0
75 #define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */
76 #define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */
77 #define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */
79 #ifndef VALIDATE_C_SEGMENTS
80 #define VALIDATE_C_SEGMENTS ENABLE_COMPRESSOR_CHECKS /* Debug compaction */
83 #define RECORD_THE_COMPRESSED_DATA 0
86 * The c_slot structure embeds a packed pointer to a c_slot_mapping
87 * (32bits) which we ideally want to span as much VA space as possible
88 * to not limit zalloc in how it sets itself up.
90 #if !defined(__LP64__) /* no packing */
91 #define C_SLOT_PACKED_PTR_BITS 32
92 #define C_SLOT_PACKED_PTR_SHIFT 0
93 #define C_SLOT_PACKED_PTR_BASE 0
95 #define C_SLOT_C_SIZE_BITS 12
96 #define C_SLOT_C_CODEC_BITS 1
97 #define C_SLOT_C_POPCOUNT_BITS 0
98 #define C_SLOT_C_PADDING_BITS 3
100 #elif __ARM_WKDM_POPCNT__ /* no packing */
101 #define C_SLOT_PACKED_PTR_BITS 47
102 #define C_SLOT_PACKED_PTR_SHIFT 0
103 #define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
105 #define C_SLOT_C_SIZE_BITS 14
106 #define C_SLOT_C_CODEC_BITS 1
107 #define C_SLOT_C_POPCOUNT_BITS 18
108 #define C_SLOT_C_PADDING_BITS 0
110 #elif defined(__arm64__) /* 32G from the heap start */
111 #define C_SLOT_PACKED_PTR_BITS 33
112 #define C_SLOT_PACKED_PTR_SHIFT 2
113 #define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
115 #define C_SLOT_C_SIZE_BITS 14
116 #define C_SLOT_C_CODEC_BITS 1
117 #define C_SLOT_C_POPCOUNT_BITS 0
118 #define C_SLOT_C_PADDING_BITS 0
120 #elif defined(__x86_64__) /* 256G from the heap start */
121 #define C_SLOT_PACKED_PTR_BITS 36
122 #define C_SLOT_PACKED_PTR_SHIFT 2
123 #define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START)
125 #define C_SLOT_C_SIZE_BITS 12
126 #define C_SLOT_C_CODEC_BITS 0 /* not used */
127 #define C_SLOT_C_POPCOUNT_BITS 0
128 #define C_SLOT_C_PADDING_BITS 0
131 #error vm_compressor parameters undefined for this architecture
135 * Popcounts needs to represent both 0 and full which requires
136 * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits.
138 * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown".
140 #define C_SLOT_NO_POPCOUNT ((16u << C_SLOT_C_SIZE_BITS) - 1)
142 static_assert((C_SEG_OFFSET_BITS
+ C_SLOT_C_SIZE_BITS
+
143 C_SLOT_C_CODEC_BITS
+ C_SLOT_C_POPCOUNT_BITS
+
144 C_SLOT_C_PADDING_BITS
+ C_SLOT_PACKED_PTR_BITS
) % 32 == 0);
147 uint64_t c_offset
:C_SEG_OFFSET_BITS
;
148 uint64_t c_size
:C_SLOT_C_SIZE_BITS
;
149 #if C_SLOT_C_CODEC_BITS
150 uint64_t c_codec
:C_SLOT_C_CODEC_BITS
;
152 #if C_SLOT_C_POPCOUNT_BITS
154 * This value may not agree with c_pop_cdata, as it may be the
155 * population count of the uncompressed data.
157 * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm
160 uint32_t c_inline_popcount
:C_SLOT_C_POPCOUNT_BITS
;
162 #if C_SLOT_C_PADDING_BITS
163 uint64_t c_padding
:C_SLOT_C_PADDING_BITS
;
165 uint64_t c_packed_ptr
:C_SLOT_PACKED_PTR_BITS
;
167 /* debugging fields, typically not present on release kernels */
168 #if CHECKSUM_THE_DATA
169 unsigned int c_hash_data
;
171 #if CHECKSUM_THE_COMPRESSED_DATA
172 unsigned int c_hash_compressed_data
;
174 #if POPCOUNT_THE_COMPRESSED_DATA
175 unsigned int c_pop_cdata
;
177 } __attribute__((packed
, aligned(4)));
181 #define C_IS_FILLING 2
183 #define C_ON_SWAPOUT_Q 4
184 #define C_ON_SWAPPEDOUT_Q 5
185 #define C_ON_SWAPPEDOUTSPARSE_Q 6
186 #define C_ON_SWAPPEDIN_Q 7
187 #define C_ON_MAJORCOMPACT_Q 8
189 #define C_ON_SWAPIO_Q 10
194 queue_chain_t c_age_list
;
195 queue_chain_t c_list
;
198 queue_chain_t c_task_list_next_cseg
;
200 #endif /* CONFIG_FREEZE */
202 #define C_SEG_MAX_LIMIT (1 << 20) /* this needs to track the size of c_mysegno */
203 uint32_t c_mysegno
:20,
207 c_on_minorcompact_q
:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */
209 c_state
:4, /* what state is the segment in which dictates which q to find it on */
213 uint32_t c_creation_ts
;
214 uint64_t c_generation_id
;
216 int32_t c_bytes_used
;
217 int32_t c_bytes_unused
;
218 uint32_t c_slots_used
;
220 uint16_t c_firstemptyslot
;
222 uint32_t c_nextoffset
;
223 uint32_t c_populated_offset
;
227 uint64_t c_swap_handle
;
230 #if VALIDATE_C_SEGMENTS
231 uint32_t c_was_minor_compacted
;
232 uint32_t c_was_major_compacted
;
233 uint32_t c_was_major_donor
;
235 #if CHECKSUM_THE_SWAP
236 unsigned int cseg_hash
;
237 unsigned int cseg_swap_size
;
238 #endif /* CHECKSUM_THE_SWAP */
240 thread_t c_busy_for_thread
;
241 uint32_t c_swappedin_ts
;
243 int c_slot_var_array_len
;
244 struct c_slot
*c_slot_var_array
;
245 struct c_slot c_slot_fixed_array
[0];
249 struct c_slot_mapping
{
250 uint32_t s_cseg
:22, /* segment number + 1 */
251 s_cindx
:10; /* index in the segment */
253 #define C_SLOT_MAX_INDEX (1 << 10)
255 typedef struct c_slot_mapping
*c_slot_mapping_t
;
258 #define C_SEG_SLOT_VAR_ARRAY_MIN_LEN C_SEG_MAX_PAGES
260 extern int c_seg_fixed_array_len
;
261 extern vm_offset_t c_buffers
;
262 #define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)C_SEG_ALLOCSIZE)))
264 #define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
266 #define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t))
267 #define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t))
269 #define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
272 #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__
273 #define C_SEG_OFFSET_ALIGNMENT_MASK 0x3ULL
274 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4
276 #define C_SEG_OFFSET_ALIGNMENT_MASK __PLATFORM_WKDM_ALIGNMENT_MASK__
277 #define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__
280 #define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 4)) ? 1 : 0)
283 * the decsion to force a c_seg to be major compacted is based on 2 criteria
284 * 1) is the c_seg buffer almost empty (i.e. we have a chance to merge it with another c_seg)
285 * 2) are there at least a minimum number of slots unoccupied so that we have a chance
286 * of combining this c_seg with another one.
288 #define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg) \
289 ((((cseg->c_bytes_unused + (C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (C_SEG_BUFSIZE / 8)) && \
290 ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (C_SEG_BUFSIZE / PAGE_SIZE))) \
293 #define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0)
294 #define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
295 #define C_SEG_IS_ON_DISK_OR_SOQ(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \
296 cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \
297 cseg->c_state == C_ON_SWAPOUT_Q || \
298 cseg->c_state == C_ON_SWAPIO_Q))
301 #define C_SEG_WAKEUP_DONE(cseg) \
303 assert((cseg)->c_busy); \
304 (cseg)->c_busy = 0; \
305 assert((cseg)->c_busy_for_thread != NULL); \
306 (cseg)->c_busy_for_thread = NULL; \
307 if ((cseg)->c_wanted) { \
308 (cseg)->c_wanted = 0; \
309 thread_wakeup((event_t) (cseg)); \
313 #define C_SEG_BUSY(cseg) \
315 assert((cseg)->c_busy == 0); \
316 (cseg)->c_busy = 1; \
317 assert((cseg)->c_busy_for_thread == NULL); \
318 (cseg)->c_busy_for_thread = current_thread(); \
322 extern vm_map_t compressor_map
;
324 #if DEVELOPMENT || DEBUG
325 extern boolean_t write_protect_c_segs
;
326 extern int vm_compressor_test_seg_wp
;
328 #define C_SEG_MAKE_WRITEABLE(cseg) \
330 if (write_protect_c_segs) { \
331 vm_map_protect(compressor_map, \
332 (vm_map_offset_t)cseg->c_store.c_buffer, \
333 (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\
334 VM_PROT_READ | VM_PROT_WRITE, \
339 #define C_SEG_WRITE_PROTECT(cseg) \
341 if (write_protect_c_segs) { \
342 vm_map_protect(compressor_map, \
343 (vm_map_offset_t)cseg->c_store.c_buffer, \
344 (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\
348 if (vm_compressor_test_seg_wp) { \
349 volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \
350 *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \
356 typedef struct c_segment
*c_segment_t
;
357 typedef struct c_slot
*c_slot_t
;
359 uint64_t vm_compressor_total_compressions(void);
360 void vm_wake_compactor_swapper(void);
361 void vm_run_compactor(void);
362 void vm_thrashing_jetsam_done(void);
363 void vm_consider_waking_compactor_swapper(void);
364 void vm_consider_swapping(void);
365 void vm_compressor_flush(void);
366 void c_seg_free(c_segment_t
);
367 void c_seg_free_locked(c_segment_t
);
368 void c_seg_insert_into_age_q(c_segment_t
);
369 void c_seg_need_delayed_compaction(c_segment_t
, boolean_t
);
370 void c_seg_update_task_owner(c_segment_t
, task_t
);
372 void vm_decompressor_lock(void);
373 void vm_decompressor_unlock(void);
375 void vm_compressor_delay_trim(void);
376 void vm_compressor_do_warmup(void);
377 void vm_compressor_record_warmup_start(void);
378 void vm_compressor_record_warmup_end(void);
380 int vm_wants_task_throttled(task_t
);
382 extern void vm_compaction_swapper_do_init(void);
383 extern void vm_compressor_swap_init(void);
384 extern lck_rw_t c_master_lock
;
387 extern void vm_swap_decrypt(c_segment_t
);
388 #endif /* ENCRYPTED_SWAP */
390 extern int vm_swap_low_on_space(void);
391 extern int vm_swap_out_of_space(void);
392 extern kern_return_t
vm_swap_get(c_segment_t
, uint64_t, uint64_t);
393 extern void vm_swap_free(uint64_t);
394 extern void vm_swap_consider_defragmenting(int);
396 extern void c_seg_swapin_requeue(c_segment_t
, boolean_t
, boolean_t
, boolean_t
);
397 extern int c_seg_swapin(c_segment_t
, boolean_t
, boolean_t
);
398 extern void c_seg_wait_on_busy(c_segment_t
);
399 extern void c_seg_trim_tail(c_segment_t
);
400 extern void c_seg_switch_state(c_segment_t
, int, boolean_t
);
402 extern boolean_t fastwake_recording_in_progress
;
403 extern int compaction_swapper_inited
;
404 extern int compaction_swapper_running
;
405 extern uint64_t vm_swap_put_failures
;
407 extern int c_overage_swapped_count
;
408 extern int c_overage_swapped_limit
;
410 extern queue_head_t c_minor_list_head
;
411 extern queue_head_t c_age_list_head
;
412 extern queue_head_t c_swapout_list_head
;
413 extern queue_head_t c_swappedout_list_head
;
414 extern queue_head_t c_swappedout_sparse_list_head
;
416 extern uint32_t c_age_count
;
417 extern uint32_t c_swapout_count
;
418 extern uint32_t c_swappedout_count
;
419 extern uint32_t c_swappedout_sparse_count
;
421 extern int64_t compressor_bytes_used
;
422 extern uint64_t first_c_segment_to_warm_generation_id
;
423 extern uint64_t last_c_segment_to_warm_generation_id
;
424 extern boolean_t hibernate_flushing
;
425 extern boolean_t hibernate_no_swapspace
;
426 extern boolean_t hibernate_in_progress_with_pinned_swap
;
427 extern uint32_t swapout_target_age
;
429 extern void c_seg_insert_into_q(queue_head_t
*, c_segment_t
);
431 extern uint32_t vm_compressor_minorcompact_threshold_divisor
;
432 extern uint32_t vm_compressor_majorcompact_threshold_divisor
;
433 extern uint32_t vm_compressor_unthrottle_threshold_divisor
;
434 extern uint32_t vm_compressor_catchup_threshold_divisor
;
436 extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden
;
437 extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden
;
438 extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden
;
439 extern uint32_t vm_compressor_catchup_threshold_divisor_overridden
;
441 extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t
, clock_nsec_t
, clock_sec_t
, clock_nsec_t
);
443 extern void kdp_compressor_busy_find_owner(event64_t wait_event
, thread_waitinfo_t
*waitinfo
);
445 #define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock))
446 #define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock))
449 #define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
450 #define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
453 * TODO, there may be a minor optimisation opportunity to replace these divisions
454 * with multiplies and shifts
456 * By multiplying by 10, the divisors can have more precision w/o resorting to floating point... a divisor specified as 25 is in reality a divide by 2.5
457 * By multiplying by 9, you get a number ~11% smaller which allows us to have another limit point derived from the same base
458 * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis
461 #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10))
462 #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10))
464 #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10))
465 #define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11))
467 #define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11))
468 #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10))
469 #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9))
471 #if !XNU_TARGET_OS_OSX
472 #define AVAILABLE_NON_COMPRESSED_MIN 20000
473 #define COMPRESSOR_NEEDS_TO_SWAP() (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \
474 (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0)
475 #else /* !XNU_TARGET_OS_OSX */
476 #define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
477 #endif /* !XNU_TARGET_OS_OSX */
479 #define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0)
480 #define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
481 #define SWAPPER_NEEDS_TO_RETHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0)
482 #define SWAPPER_NEEDS_TO_CATCHUP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0)
483 #define SWAPPER_HAS_CAUGHTUP() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0)
484 #define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0)
487 #if !XNU_TARGET_OS_OSX
488 #define COMPRESSOR_FREE_RESERVED_LIMIT 28
489 #else /* !XNU_TARGET_OS_OSX */
490 #define COMPRESSOR_FREE_RESERVED_LIMIT 128
491 #endif /* !XNU_TARGET_OS_OSX */
493 uint32_t vm_compressor_get_encode_scratch_size(void) __pure2
;
494 uint32_t vm_compressor_get_decode_scratch_size(void) __pure2
;
496 #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size()
498 #if RECORD_THE_COMPRESSED_DATA
499 extern void c_compressed_record_init(void);
500 extern void c_compressed_record_write(char *, int);
503 extern lck_mtx_t c_list_lock_storage
;
504 #define c_list_lock (&c_list_lock_storage)
506 #if DEVELOPMENT || DEBUG
507 extern uint32_t vm_ktrace_enabled
;
509 #define VMKDBG(x, ...) \
511 if (vm_ktrace_enabled) { \
512 KDBG(x, ## __VA_ARGS__);\