]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_compressor.h
xnu-2782.10.72.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor.h
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kalloc.h>
30#include <vm/vm_compressor_pager.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_page.h>
33#include <vm/vm_protos.h>
34#include <vm/WKdm_new.h>
35#include <vm/vm_object.h>
36#include <machine/pmap.h>
37#include <kern/locks.h>
38
39#include <sys/kdebug.h>
40
41
42#define C_SEG_OFFSET_BITS 16
43#define C_SEG_BUFSIZE (1024 * 256)
44#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE + PAGE_SIZE)
45#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 512)))
46
47#define C_SEG_SLOT_ARRAYS 6
48#define C_SEG_SLOT_ARRAY_SIZE 64 /* must be a power of 2 */
49#define C_SEG_SLOT_ARRAY_MASK (C_SEG_SLOT_ARRAY_SIZE - 1)
50#define C_SLOT_MAX (C_SEG_SLOT_ARRAYS * C_SEG_SLOT_ARRAY_SIZE)
51
52
53#define CHECKSUM_THE_SWAP 0 /* Debug swap data */
54#define CHECKSUM_THE_DATA 0 /* Debug compressor/decompressor data */
55#define CHECKSUM_THE_COMPRESSED_DATA 0 /* Debug compressor/decompressor compressed data */
56#define VALIDATE_C_SEGMENTS 0 /* Debug compaction */
57#define TRACK_BAD_C_SEGMENTS 0 /* Debug I/O error handling */
58
59struct c_segment {
60#if __i386__ || __x86_64__
61 lck_mtx_t c_lock;
62#else /* __i386__ || __x86_64__ */
63 lck_spin_t c_lock;
64#endif /* __i386__ || __x86_64__ */
65 queue_chain_t c_age_list;
66 queue_chain_t c_list;
67
68 uint64_t c_generation_id;
69 int32_t c_bytes_used;
70 int32_t c_bytes_unused;
71
72#define C_SEG_MAX_LIMIT (1 << 19) /* this needs to track the size of c_mysegno */
73 uint32_t c_mysegno:19,
74 c_filling:1,
75 c_busy:1,
76 c_busy_swapping:1,
77 c_wanted:1,
78 c_must_free:1,
79 c_ondisk:1,
80 c_was_swapped_in:1,
81 c_on_minorcompact_q:1, /* can also be on the age_q or the swappedin_q */
82 c_on_age_q:1, /* creation age ordered list of in-core segments that
83 are available to be major-compacted and swapped out */
84 c_on_swappedin_q:1, /* allows us to age newly swapped in segments */
85 c_on_swapout_q:1, /* this is a transient queue */
86 c_on_swappedout_q:1, /* segment has been major-compacted and
87 possibly swapped out to disk (c_ondisk == 1) */
88 c_on_swappedout_sparse_q:1; /* segment has become sparse and should be garbage
89 collected if too many segments reach this state */
90 uint16_t c_firstemptyslot;
91 uint16_t c_nextslot;
92 uint32_t c_nextoffset;
93 uint32_t c_populated_offset;
94
95 uint32_t c_creation_ts;
96 uint32_t c_swappedin_ts;
97
98 union {
99 int32_t *c_buffer;
100 uint64_t c_swap_handle;
101 } c_store;
102
103#if TRACK_BAD_C_SEGMENTS
104 uint32_t c_on_bad_q;
105#endif
106
107#if VALIDATE_C_SEGMENTS
108 uint32_t c_was_minor_compacted;
109 uint32_t c_was_major_compacted;
110 uint32_t c_was_major_donor;
111#endif
112#if CHECKSUM_THE_SWAP
113 unsigned int cseg_hash;
114 unsigned int cseg_swap_size;
115#endif /* CHECKSUM_THE_SWAP */
116
fe8ab488
A
117#if MACH_ASSERT
118 thread_t c_busy_for_thread;
119#endif /* MACH_ASSERT */
120
39236c6e
A
121 struct c_slot *c_slots[C_SEG_SLOT_ARRAYS];
122};
123
124
125#define C_SEG_SLOT_FROM_INDEX(cseg, index) (&(cseg->c_slots[index / C_SEG_SLOT_ARRAY_SIZE])[index & C_SEG_SLOT_ARRAY_MASK])
126#define C_SEG_SLOTARRAY_FROM_INDEX(cseg, index) (index / C_SEG_SLOT_ARRAY_SIZE)
127
128#define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t))
129#define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t))
130
131#define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset)))
132
133#define C_SEG_OFFSET_ALIGNMENT_MASK 0x3
134
135#define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < (C_SEG_BUFSIZE / 2)) ? 1 : 0)
136#define C_SEG_INCORE_IS_SPARSE(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 2)) ? 1 : 0)
137
138#define C_SEG_WAKEUP_DONE(cseg) \
139 MACRO_BEGIN \
fe8ab488 140 assert((cseg)->c_busy); \
39236c6e 141 (cseg)->c_busy = 0; \
fe8ab488
A
142 assert((cseg)->c_busy_for_thread != NULL); \
143 assert((((cseg)->c_busy_for_thread = NULL), TRUE)); \
39236c6e
A
144 if ((cseg)->c_wanted) { \
145 (cseg)->c_wanted = 0; \
146 thread_wakeup((event_t) (cseg)); \
147 } \
148 MACRO_END
149
fe8ab488
A
150#define C_SEG_BUSY(cseg) \
151 MACRO_BEGIN \
152 assert((cseg)->c_busy == 0); \
153 (cseg)->c_busy = 1; \
154 assert((cseg)->c_busy_for_thread == NULL); \
155 assert((((cseg)->c_busy_for_thread = current_thread()), TRUE)); \
156 MACRO_END
157
158
39236c6e
A
159
160typedef struct c_segment *c_segment_t;
161typedef struct c_slot *c_slot_t;
162
163uint64_t vm_compressor_total_compressions(void);
164void vm_wake_compactor_swapper(void);
fe8ab488 165void vm_thrashing_jetsam_done(void);
39236c6e
A
166void vm_consider_waking_compactor_swapper(void);
167void vm_compressor_flush(void);
168void c_seg_free(c_segment_t);
169void c_seg_free_locked(c_segment_t);
170void c_seg_insert_into_age_q(c_segment_t);
171
172void vm_decompressor_lock(void);
173void vm_decompressor_unlock(void);
174
8a3053a0 175void vm_compressor_delay_trim(void);
39236c6e
A
176void vm_compressor_do_warmup(void);
177void vm_compressor_record_warmup_start(void);
178void vm_compressor_record_warmup_end(void);
179
fe8ab488 180int vm_wants_task_throttled(task_t);
39236c6e
A
181boolean_t vm_compression_available(void);
182
fe8ab488 183extern void vm_compressor_swap_init(void);
39236c6e
A
184extern void vm_compressor_init_locks(void);
185extern lck_rw_t c_master_lock;
186
fe8ab488 187#if ENCRYPTED_SWAP
39236c6e 188extern void vm_swap_decrypt(c_segment_t);
fe8ab488 189#endif /* ENCRYPTED_SWAP */
39236c6e 190
fe8ab488 191extern int vm_swap_low_on_space(void);
39236c6e
A
192extern kern_return_t vm_swap_get(vm_offset_t, uint64_t, uint64_t);
193extern void vm_swap_free(uint64_t);
194extern void vm_swap_consider_defragmenting(void);
195
196extern void c_seg_swapin_requeue(c_segment_t);
197extern void c_seg_swapin(c_segment_t, boolean_t);
198extern void c_seg_wait_on_busy(c_segment_t);
199extern void c_seg_trim_tail(c_segment_t);
200
201extern boolean_t fastwake_recording_in_progress;
202extern int compaction_swapper_running;
203extern uint64_t vm_swap_put_failures;
204
205extern queue_head_t c_minor_list_head;
206extern queue_head_t c_age_list_head;
207extern queue_head_t c_swapout_list_head;
208extern queue_head_t c_swappedout_list_head;
209extern queue_head_t c_swappedout_sparse_list_head;
210
211extern uint32_t c_age_count;
212extern uint32_t c_swapout_count;
213extern uint32_t c_swappedout_count;
214extern uint32_t c_swappedout_sparse_count;
215
216extern int64_t compressor_bytes_used;
fe8ab488 217extern uint64_t compressor_kvspace_used;
39236c6e
A
218extern uint64_t first_c_segment_to_warm_generation_id;
219extern uint64_t last_c_segment_to_warm_generation_id;
220extern boolean_t hibernate_flushing;
221extern boolean_t hibernate_no_swapspace;
222extern uint32_t swapout_target_age;
223
224extern void c_seg_insert_into_q(queue_head_t *, c_segment_t);
225
226extern uint32_t vm_compressor_minorcompact_threshold_divisor;
227extern uint32_t vm_compressor_majorcompact_threshold_divisor;
228extern uint32_t vm_compressor_unthrottle_threshold_divisor;
229extern uint32_t vm_compressor_catchup_threshold_divisor;
fe8ab488 230extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t);
39236c6e
A
231
232#define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock))
233#define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock))
234
235
236#define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count)
237#define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT)
238
239#define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 1))
240#define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 1))
241#define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 1))
242#define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 1))
243
244#define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0)
245
246#define VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE() \
247 ((vm_compressor_mode == VM_PAGER_COMPRESSOR_WITH_SWAP || \
248 vm_compressor_mode == VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP) && \
249 ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0))
250#define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < (VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 2) ? 1 : 0)
251#define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0)
252#define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0)
39236c6e 253
fe8ab488
A
254#define COMPRESSOR_NEEDS_TO_MAJOR_COMPACT() (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \
255 (compressor_kvspace_used - (compressor_object->resident_page_count * PAGE_SIZE_64)) > compressor_kvwaste_limit) \
256 ? 1 : 0)
39236c6e 257
fe8ab488 258#define COMPRESSOR_FREE_RESERVED_LIMIT 28
39236c6e
A
259
260#define COMPRESSOR_SCRATCH_BUF_SIZE WKdm_SCRATCH_BUF_SIZE
261
262
263#if __i386__ || __x86_64__
264extern lck_mtx_t *c_list_lock;
265#else /* __i386__ || __x86_64__ */
266extern lck_spin_t *c_list_lock;
267#endif /* __i386__ || __x86_64__ */