]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor.h
xnu-3789.31.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor.h
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <vm/vm_compressor_pager.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_page.h>
33 #include <vm/vm_protos.h>
34 #include <vm/WKdm_new.h>
35 #include <vm/vm_object.h>
36 #include <vm/vm_map.h>
37 #include <machine/pmap.h>
38 #include <kern/locks.h>
39
40 #include <sys/kdebug.h>
41
42 #define C_SEG_OFFSET_BITS 16
43 #define C_SEG_BUFSIZE (1024 * 256)
44 #define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE)
45
46 #define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128)))
47 #define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE)
48 #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE)
49
50 #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS
51 #define ENABLE_SWAP_CHECKS 1
52 #define ENABLE_COMPRESSOR_CHECKS 1
53 #else
54 #define ENABLE_SWAP_CHECKS 0
55 #define ENABLE_COMPRESSOR_CHECKS 0
56 #endif
57
58 #define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */
59 #define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */
60 #define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */
61 #define VALIDATE_C_SEGMENTS ENABLE_COMPRESSOR_CHECKS /* Debug compaction */
62
63 #define RECORD_THE_COMPRESSED_DATA 0
64
65 struct c_slot {
66 uint64_t c_offset:C_SEG_OFFSET_BITS,
67 c_size:12,
68 c_packed_ptr:36;
69 #if CHECKSUM_THE_DATA
70 unsigned int c_hash_data;
71 #endif
72 #if CHECKSUM_THE_COMPRESSED_DATA
73 unsigned int c_hash_compressed_data;
74 #endif
75
76 };
77
78 #define C_IS_EMPTY 0
79 #define C_IS_FREE 1
80 #define C_IS_FILLING 2
81 #define C_ON_AGE_Q 3
82 #define C_ON_SWAPOUT_Q 4
83 #define C_ON_SWAPPEDOUT_Q 5
84 #define C_ON_SWAPPEDOUTSPARSE_Q 6
85 #define C_ON_SWAPPEDIN_Q 7
86 #define C_ON_MAJORCOMPACT_Q 8
87 #define C_ON_BAD_Q 9
88
89
90 struct c_segment {
91 lck_mtx_t c_lock;
92 queue_chain_t c_age_list;
93 queue_chain_t c_list;
94
95 uint64_t c_generation_id;
96 int32_t c_bytes_used;
97 int32_t c_bytes_unused;
98
99 #define C_SEG_MAX_LIMIT (1 << 19) /* this needs to track the size of c_mysegno */
100 uint32_t c_mysegno:19,
101 c_busy:1,
102 c_busy_swapping:1,
103 c_wanted:1,
104 c_on_minorcompact_q:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */
105
106 c_state:4, /* what state is the segment in which dictates which q to find it on */
107 c_overage_swap:1,
108 c_reserved:4;
109
110 uint16_t c_firstemptyslot;
111 uint16_t c_nextslot;
112 uint32_t c_nextoffset;
113 uint32_t c_populated_offset;
114
115 uint32_t c_creation_ts;
116 uint32_t c_swappedin_ts;
117
118 union {
119 int32_t *c_buffer;
120 uint64_t c_swap_handle;
121 } c_store;
122
123 #if VALIDATE_C_SEGMENTS
124 uint32_t c_was_minor_compacted;
125 uint32_t c_was_major_compacted;
126 uint32_t c_was_major_donor;
127 #endif
128 #if CHECKSUM_THE_SWAP
129 unsigned int cseg_hash;
130 unsigned int cseg_swap_size;
131 #endif /* CHECKSUM_THE_SWAP */
132
133 #if MACH_ASSERT
134 thread_t c_busy_for_thread;
135 #endif /* MACH_ASSERT */
136
137 int c_slot_var_array_len;
138 struct c_slot *c_slot_var_array;
139 struct c_slot c_slot_fixed_array[0];
140 };
141
142 #define C_SEG_SLOT_VAR_ARRAY_MIN_LEN C_SEG_MAX_PAGES
143
144 extern int c_seg_fixed_array_len;
145 extern vm_offset_t c_buffers;
146 #define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)C_SEG_ALLOCSIZE)))
147
148 #define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len]))
149
150 #define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t))
151 #define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t))
152
153 #define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
154
155 #define C_SEG_OFFSET_ALIGNMENT_MASK 0x3
156
157 #define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < (C_SEG_BUFSIZE / 2)) ? 1 : 0)
158 #define C_SEG_SHOULD_MINORCOMPACT(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 3)) ? 1 : 0)
159 #define C_SEG_SHOULD_MAJORCOMPACT(cseg) (((cseg->c_bytes_unused + (C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (C_SEG_BUFSIZE / 8)) ? 1 : 0)
160
161 #define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q))
162
163
164 #define C_SEG_WAKEUP_DONE(cseg) \
165 MACRO_BEGIN \
166 assert((cseg)->c_busy); \
167 (cseg)->c_busy = 0; \
168 assert((cseg)->c_busy_for_thread != NULL); \
169 assert((((cseg)->c_busy_for_thread = NULL), TRUE)); \
170 if ((cseg)->c_wanted) { \
171 (cseg)->c_wanted = 0; \
172 thread_wakeup((event_t) (cseg)); \
173 } \
174 MACRO_END
175
176 #define C_SEG_BUSY(cseg) \
177 MACRO_BEGIN \
178 assert((cseg)->c_busy == 0); \
179 (cseg)->c_busy = 1; \
180 assert((cseg)->c_busy_for_thread == NULL); \
181 assert((((cseg)->c_busy_for_thread = current_thread()), TRUE)); \
182 MACRO_END
183
184
185 #if DEVELOPMENT || DEBUG
186 extern vm_map_t compressor_map;
187
188 #define C_SEG_MAKE_WRITEABLE(cseg) \
189 MACRO_BEGIN \
190 vm_map_protect(compressor_map, \
191 (vm_map_offset_t)cseg->c_store.c_buffer, \
192 (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\
193 VM_PROT_READ | VM_PROT_WRITE, \
194 0); \
195 MACRO_END
196
197 #define C_SEG_WRITE_PROTECT(cseg) \
198 MACRO_BEGIN \
199 vm_map_protect(compressor_map, \
200 (vm_map_offset_t)cseg->c_store.c_buffer, \
201 (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\
202 VM_PROT_READ, \
203 0); \
204 MACRO_END
205 #endif
206
207 typedef struct c_segment *c_segment_t;
208 typedef struct c_slot *c_slot_t;
209
210 uint64_t vm_compressor_total_compressions(void);
211 void vm_wake_compactor_swapper(void);
212 void vm_run_compactor(void);
213 void vm_thrashing_jetsam_done(void);
214 void vm_consider_waking_compactor_swapper(void);
215 void vm_consider_swapping(void);
216 void vm_compressor_flush(void);
217 void c_seg_free(c_segment_t);
218 void c_seg_free_locked(c_segment_t);
219 void c_seg_insert_into_age_q(c_segment_t);
220 void c_seg_need_delayed_compaction(c_segment_t, boolean_t);
221
222 void vm_decompressor_lock(void);
223 void vm_decompressor_unlock(void);
224
225 void vm_compressor_delay_trim(void);
226 void vm_compressor_do_warmup(void);
227 void vm_compressor_record_warmup_start(void);
228 void vm_compressor_record_warmup_end(void);
229
230 int vm_wants_task_throttled(task_t);
231
232 extern void vm_compaction_swapper_do_init(void);
233 extern void vm_compressor_swap_init(void);
234 extern void vm_compressor_init_locks(void);
235 extern lck_rw_t c_master_lock;
236
237 #if ENCRYPTED_SWAP
238 extern void vm_swap_decrypt(c_segment_t);
239 #endif /* ENCRYPTED_SWAP */
240
241 extern int vm_swap_low_on_space(void);
242 extern kern_return_t vm_swap_get(c_segment_t, uint64_t, uint64_t);
243 extern void vm_swap_free(uint64_t);
244 extern void vm_swap_consider_defragmenting(void);
245
246 extern void c_seg_swapin_requeue(c_segment_t, boolean_t, boolean_t, boolean_t);
247 extern int c_seg_swapin(c_segment_t, boolean_t, boolean_t);
248 extern void c_seg_wait_on_busy(c_segment_t);
249 extern void c_seg_trim_tail(c_segment_t);
250 extern void c_seg_switch_state(c_segment_t, int, boolean_t);
251
252 extern boolean_t fastwake_recording_in_progress;
253 extern int compaction_swapper_inited;
254 extern int compaction_swapper_running;
255 extern uint64_t vm_swap_put_failures;
256
257 extern int c_overage_swapped_count;
258 extern int c_overage_swapped_limit;
259
260 extern queue_head_t c_minor_list_head;
261 extern queue_head_t c_age_list_head;
262 extern queue_head_t c_swapout_list_head;
263 extern queue_head_t c_swappedout_list_head;
264 extern queue_head_t c_swappedout_sparse_list_head;
265
266 extern uint32_t c_age_count;
267 extern uint32_t c_swapout_count;
268 extern uint32_t c_swappedout_count;
269 extern uint32_t c_swappedout_sparse_count;
270
271 extern int64_t compressor_bytes_used;
272 extern uint64_t first_c_segment_to_warm_generation_id;
273 extern uint64_t last_c_segment_to_warm_generation_id;
274 extern boolean_t hibernate_flushing;
275 extern boolean_t hibernate_no_swapspace;
276 extern boolean_t hibernate_in_progress_with_pinned_swap;
277 extern uint32_t swapout_target_age;
278
279 extern void c_seg_insert_into_q(queue_head_t *, c_segment_t);
280
281 extern uint32_t vm_compressor_minorcompact_threshold_divisor;
282 extern uint32_t vm_compressor_majorcompact_threshold_divisor;
283 extern uint32_t vm_compressor_unthrottle_threshold_divisor;
284 extern uint32_t vm_compressor_catchup_threshold_divisor;
285 extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t);
286
287 #define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock))
288 #define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock))
289
290
291 #define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
292 #define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
293 /* TODO, there may be a minor optimisation opportunity to replace these divisions
294 * with multiplies and shifts
295 */
296
297 #define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 1))
298 #define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 1))
299 #define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 1))
300 #define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 1))
301
302 #define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
303
304 #define VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE() \
305 (vm_compressor_mode == VM_PAGER_COMPRESSOR_WITH_SWAP && \
306 ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0))
307 #define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < (VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 2) ? 1 : 0)
308 #define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
309 #define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0)
310
311
312 #define COMPRESSOR_FREE_RESERVED_LIMIT 128
313
314 uint32_t vm_compressor_get_encode_scratch_size(void);
315 uint32_t vm_compressor_get_decode_scratch_size(void);
316
317 #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size()
318
319 #if RECORD_THE_COMPRESSED_DATA
320 extern void c_compressed_record_init(void);
321 extern void c_compressed_record_write(char *, int);
322 #endif
323
324 extern lck_mtx_t *c_list_lock;