]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor.h
xnu-3248.20.55.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor.h
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kalloc.h>
30#include <vm/vm_compressor_pager.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_page.h>
33#include <vm/vm_protos.h>
34#include <vm/WKdm_new.h>
35#include <vm/vm_object.h>
36#include <machine/pmap.h>
37#include <kern/locks.h>
38
39#include <sys/kdebug.h>
40
41
42#define C_SEG_OFFSET_BITS 16
43#define C_SEG_BUFSIZE (1024 * 256)
3e170ce0 44#define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE)
39236c6e 45
3e170ce0
A
46#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128)))
47#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE)
48#define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
39236c6e
A
49
50
51#define CHECKSUM_THE_SWAP 0 /* Debug swap data */
52#define CHECKSUM_THE_DATA 0 /* Debug compressor/decompressor data */
53#define CHECKSUM_THE_COMPRESSED_DATA 0 /* Debug compressor/decompressor compressed data */
54#define VALIDATE_C_SEGMENTS 0 /* Debug compaction */
3e170ce0
A
55
56#define RECORD_THE_COMPRESSED_DATA 0
57
58
59
60struct c_slot {
61 uint64_t c_offset:C_SEG_OFFSET_BITS,
62 c_size:12,
63 c_packed_ptr:36;
64#if CHECKSUM_THE_DATA
65 unsigned int c_hash_data;
66#endif
67#if CHECKSUM_THE_COMPRESSED_DATA
68 unsigned int c_hash_compressed_data;
69#endif
70
71};
72
73#define C_IS_EMPTY 0
74#define C_IS_FREE 1
75#define C_IS_FILLING 2
76#define C_ON_AGE_Q 3
77#define C_ON_SWAPOUT_Q 4
78#define C_ON_SWAPPEDOUT_Q 5
79#define C_ON_SWAPPEDOUTSPARSE_Q 6
80#define C_ON_SWAPPEDIN_Q 7
81#define C_ON_MAJORCOMPACT_Q 8
82#define C_ON_BAD_Q 9
83
39236c6e
A
84
85struct c_segment {
86#if __i386__ || __x86_64__
87 lck_mtx_t c_lock;
88#else /* __i386__ || __x86_64__ */
89 lck_spin_t c_lock;
90#endif /* __i386__ || __x86_64__ */
91 queue_chain_t c_age_list;
92 queue_chain_t c_list;
93
94 uint64_t c_generation_id;
95 int32_t c_bytes_used;
96 int32_t c_bytes_unused;
97
98#define C_SEG_MAX_LIMIT (1 << 19) /* this needs to track the size of c_mysegno */
99 uint32_t c_mysegno:19,
39236c6e
A
100 c_busy:1,
101 c_busy_swapping:1,
102 c_wanted:1,
3e170ce0
A
103 c_on_minorcompact_q:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */
104
105 c_state:4, /* what state is the segment in which dictates which q to find it on */
106 c_overage_swap:1,
107 c_reserved:4;
108
39236c6e
A
109 uint16_t c_firstemptyslot;
110 uint16_t c_nextslot;
111 uint32_t c_nextoffset;
112 uint32_t c_populated_offset;
113
114 uint32_t c_creation_ts;
115 uint32_t c_swappedin_ts;
116
117 union {
118 int32_t *c_buffer;
119 uint64_t c_swap_handle;
120 } c_store;
121
39236c6e
A
122#if VALIDATE_C_SEGMENTS
123 uint32_t c_was_minor_compacted;
124 uint32_t c_was_major_compacted;
125 uint32_t c_was_major_donor;
126#endif
127#if CHECKSUM_THE_SWAP
128 unsigned int cseg_hash;
129 unsigned int cseg_swap_size;
130#endif /* CHECKSUM_THE_SWAP */
131
fe8ab488
A
132#if MACH_ASSERT
133 thread_t c_busy_for_thread;
134#endif /* MACH_ASSERT */
135
3e170ce0
A
136 int c_slot_var_array_len;
137 struct c_slot *c_slot_var_array;
138 struct c_slot c_slot_fixed_array[0];
39236c6e
A
139};
140
3e170ce0
A
141#define C_SEG_SLOT_VAR_ARRAY_MIN_LEN C_SEG_MAX_PAGES
142
143extern int c_seg_fixed_array_len;
144extern vm_offset_t c_buffers;
145#define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)C_SEG_ALLOCSIZE)))
39236c6e 146
3e170ce0 147#define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
39236c6e
A
148
149#define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t))
150#define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t))
151
152#define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
153
154#define C_SEG_OFFSET_ALIGNMENT_MASK 0x3
155
156#define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < (C_SEG_BUFSIZE / 2)) ? 1 : 0)
3e170ce0
A
157#define C_SEG_SHOULD_MINORCOMPACT(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 3)) ? 1 : 0)
158#define C_SEG_SHOULD_MAJORCOMPACT(cseg) (((cseg->c_bytes_unused + (C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (C_SEG_BUFSIZE / 8)) ? 1 : 0)
159
160#define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
161
39236c6e
A
162
163#define C_SEG_WAKEUP_DONE(cseg) \
164 MACRO_BEGIN \
fe8ab488 165 assert((cseg)->c_busy); \
39236c6e 166 (cseg)->c_busy = 0; \
fe8ab488
A
167 assert((cseg)->c_busy_for_thread != NULL); \
168 assert((((cseg)->c_busy_for_thread = NULL), TRUE)); \
39236c6e
A
169 if ((cseg)->c_wanted) { \
170 (cseg)->c_wanted = 0; \
171 thread_wakeup((event_t) (cseg)); \
172 } \
173 MACRO_END
174
fe8ab488
A
175#define C_SEG_BUSY(cseg) \
176 MACRO_BEGIN \
177 assert((cseg)->c_busy == 0); \
178 (cseg)->c_busy = 1; \
179 assert((cseg)->c_busy_for_thread == NULL); \
180 assert((((cseg)->c_busy_for_thread = current_thread()), TRUE)); \
181 MACRO_END
182
183
39236c6e
A
184
185typedef struct c_segment *c_segment_t;
186typedef struct c_slot *c_slot_t;
187
188uint64_t vm_compressor_total_compressions(void);
189void vm_wake_compactor_swapper(void);
fe8ab488 190void vm_thrashing_jetsam_done(void);
39236c6e 191void vm_consider_waking_compactor_swapper(void);
3e170ce0 192void vm_consider_swapping(void);
39236c6e
A
193void vm_compressor_flush(void);
194void c_seg_free(c_segment_t);
195void c_seg_free_locked(c_segment_t);
196void c_seg_insert_into_age_q(c_segment_t);
197
198void vm_decompressor_lock(void);
199void vm_decompressor_unlock(void);
200
8a3053a0 201void vm_compressor_delay_trim(void);
39236c6e
A
202void vm_compressor_do_warmup(void);
203void vm_compressor_record_warmup_start(void);
204void vm_compressor_record_warmup_end(void);
205
fe8ab488 206int vm_wants_task_throttled(task_t);
39236c6e
A
207boolean_t vm_compression_available(void);
208
fe8ab488 209extern void vm_compressor_swap_init(void);
39236c6e
A
210extern void vm_compressor_init_locks(void);
211extern lck_rw_t c_master_lock;
212
fe8ab488 213#if ENCRYPTED_SWAP
39236c6e 214extern void vm_swap_decrypt(c_segment_t);
fe8ab488 215#endif /* ENCRYPTED_SWAP */
39236c6e 216
fe8ab488 217extern int vm_swap_low_on_space(void);
39236c6e
A
218extern kern_return_t vm_swap_get(vm_offset_t, uint64_t, uint64_t);
219extern void vm_swap_free(uint64_t);
220extern void vm_swap_consider_defragmenting(void);
221
3e170ce0 222extern void c_seg_swapin_requeue(c_segment_t, boolean_t);
39236c6e
A
223extern void c_seg_swapin(c_segment_t, boolean_t);
224extern void c_seg_wait_on_busy(c_segment_t);
225extern void c_seg_trim_tail(c_segment_t);
3e170ce0 226extern void c_seg_switch_state(c_segment_t, int, boolean_t);
39236c6e
A
227
228extern boolean_t fastwake_recording_in_progress;
229extern int compaction_swapper_running;
230extern uint64_t vm_swap_put_failures;
231
3e170ce0
A
232extern int c_overage_swapped_count;
233extern int c_overage_swapped_limit;
234
39236c6e
A
235extern queue_head_t c_minor_list_head;
236extern queue_head_t c_age_list_head;
237extern queue_head_t c_swapout_list_head;
238extern queue_head_t c_swappedout_list_head;
239extern queue_head_t c_swappedout_sparse_list_head;
240
241extern uint32_t c_age_count;
242extern uint32_t c_swapout_count;
243extern uint32_t c_swappedout_count;
244extern uint32_t c_swappedout_sparse_count;
245
246extern int64_t compressor_bytes_used;
247extern uint64_t first_c_segment_to_warm_generation_id;
248extern uint64_t last_c_segment_to_warm_generation_id;
249extern boolean_t hibernate_flushing;
250extern boolean_t hibernate_no_swapspace;
251extern uint32_t swapout_target_age;
252
253extern void c_seg_insert_into_q(queue_head_t *, c_segment_t);
254
255extern uint32_t vm_compressor_minorcompact_threshold_divisor;
256extern uint32_t vm_compressor_majorcompact_threshold_divisor;
257extern uint32_t vm_compressor_unthrottle_threshold_divisor;
258extern uint32_t vm_compressor_catchup_threshold_divisor;
fe8ab488 259extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t);
39236c6e
A
260
261#define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock))
262#define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock))
263
264
265#define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
266#define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
267
268#define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 1))
269#define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 1))
270#define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 1))
271#define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 1))
272
273#define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
274
275#define VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE() \
3e170ce0 276 (vm_compressor_mode == VM_PAGER_COMPRESSOR_WITH_SWAP && \
39236c6e
A
277 ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0))
278#define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < (VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 2) ? 1 : 0)
279#define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
280#define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0)
39236c6e 281
3e170ce0
A
282/*
283 * indicate the need to do a major compaction if
284 * the overall set of in-use compression segments
285 * becomes sparse... on systems that support pressure
286 * driven swapping, this will also cause swapouts to
287 * be initiated.
288 */
289#define COMPRESSOR_NEEDS_TO_MAJOR_COMPACT() (((c_segment_count >= (c_segments_nearing_limit / 8)) && \
290 ((c_segment_count * C_SEG_MAX_PAGES) - VM_PAGE_COMPRESSOR_COUNT) > \
291 ((c_segment_count / 8) * C_SEG_MAX_PAGES)) \
fe8ab488 292 ? 1 : 0)
39236c6e 293
3e170ce0 294#define COMPRESSOR_FREE_RESERVED_LIMIT 128
39236c6e
A
295
296#define COMPRESSOR_SCRATCH_BUF_SIZE WKdm_SCRATCH_BUF_SIZE
297
298
3e170ce0
A
299#if RECORD_THE_COMPRESSED_DATA
300extern void c_compressed_record_init(void);
301extern void c_compressed_record_write(char *, int);
302#endif
303
304
39236c6e
A
305#if __i386__ || __x86_64__
306extern lck_mtx_t *c_list_lock;
307#else /* __i386__ || __x86_64__ */
308extern lck_spin_t *c_list_lock;
309#endif /* __i386__ || __x86_64__ */