]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_compressor.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_compressor.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_compressor.h>
30
31 #if CONFIG_PHANTOM_CACHE
32 #include <vm/vm_phantom_cache.h>
33 #endif
34
35 #include <vm/vm_map.h>
36 #include <vm/vm_pageout.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_compressor_algorithms.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_protos.h>
41 #include <mach/mach_host.h> /* for host_info() */
42 #include <kern/ledger.h>
43 #include <kern/policy_internal.h>
44 #include <kern/thread_group.h>
45 #include <san/kasan.h>
46
47 #if defined(__x86_64__)
48 #include <i386/misc_protos.h>
49 #endif
50 #if defined(__arm64__)
51 #include <arm/machine_routines.h>
52 #endif
53
54 #include <IOKit/IOHibernatePrivate.h>
55
56 extern boolean_t vm_darkwake_mode;
57 extern zone_t vm_page_zone;
58
59 #if DEVELOPMENT || DEBUG
60 /* sysctl defined in bsd/dev/arm64/sysctl.c */
61 int do_cseg_wedge_thread(void);
62 int do_cseg_unwedge_thread(void);
63 static event_t debug_cseg_wait_event = NULL;
64 #endif /* DEVELOPMENT || DEBUG */
65
66 #if CONFIG_FREEZE
67 bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */
68 void task_disown_frozen_csegs(task_t owner_task);
69 #endif /* CONFIG_FREEZE */
70
71 #if POPCOUNT_THE_COMPRESSED_DATA
72 boolean_t popcount_c_segs = TRUE;
73
74 static inline uint32_t
75 vmc_pop(uintptr_t ins, int sz)
76 {
77 uint32_t rv = 0;
78
79 if (__probable(popcount_c_segs == FALSE)) {
80 return 0xDEAD707C;
81 }
82
83 while (sz >= 16) {
84 uint32_t rv1, rv2;
85 uint64_t *ins64 = (uint64_t *) ins;
86 uint64_t *ins642 = (uint64_t *) (ins + 8);
87 rv1 = __builtin_popcountll(*ins64);
88 rv2 = __builtin_popcountll(*ins642);
89 rv += rv1 + rv2;
90 sz -= 16;
91 ins += 16;
92 }
93
94 while (sz >= 4) {
95 uint32_t *ins32 = (uint32_t *) ins;
96 rv += __builtin_popcount(*ins32);
97 sz -= 4;
98 ins += 4;
99 }
100
101 while (sz > 0) {
102 char *ins8 = (char *)ins;
103 rv += __builtin_popcount(*ins8);
104 sz--;
105 ins++;
106 }
107 return rv;
108 }
109 #endif
110
111 #if VALIDATE_C_SEGMENTS
112 boolean_t validate_c_segs = TRUE;
113 #endif
114 /*
115 * vm_compressor_mode has a heirarchy of control to set its value.
116 * boot-args are checked first, then device-tree, and finally
117 * the default value that is defined below. See vm_fault_init() for
118 * the boot-arg & device-tree code.
119 */
120
121 #if !XNU_TARGET_OS_OSX
122
123 #if CONFIG_FREEZE
124 int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT;
125 struct freezer_context freezer_context_global;
126 #else /* CONFIG_FREEZE */
127 int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED;
128 #endif /* CONFIG_FREEZE */
129
130 #else /* !XNU_TARGET_OS_OSX */
131 int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP;
132
133 #endif /* !XNU_TARGET_OS_OSX */
134
135 TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0);
136 int vm_compressor_is_active = 0;
137 int vm_compressor_available = 0;
138
139 extern uint64_t vm_swap_get_max_configured_space(void);
140 extern void vm_pageout_io_throttle(void);
141
142 #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA
143 extern unsigned int hash_string(char *cp, int len);
144 static unsigned int vmc_hash(char *, int);
145 boolean_t checksum_c_segs = TRUE;
146
147 unsigned int
148 vmc_hash(char *cp, int len)
149 {
150 if (__probable(checksum_c_segs == FALSE)) {
151 return 0xDEAD7A37;
152 }
153 return hash_string(cp, len);
154 }
155 #endif
156
157 #define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size)
158 #define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size))
159
160
161 struct c_sv_hash_entry {
162 union {
163 struct {
164 uint32_t c_sv_he_ref;
165 uint32_t c_sv_he_data;
166 } c_sv_he;
167 uint64_t c_sv_he_record;
168 } c_sv_he_un;
169 };
170
171 #define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref
172 #define he_data c_sv_he_un.c_sv_he.c_sv_he_data
173 #define he_record c_sv_he_un.c_sv_he_record
174
175 #define C_SV_HASH_MAX_MISS 32
176 #define C_SV_HASH_SIZE ((1 << 10))
177 #define C_SV_HASH_MASK ((1 << 10) - 1)
178 #define C_SV_CSEG_ID ((1 << 22) - 1)
179
180
181 union c_segu {
182 c_segment_t c_seg;
183 uintptr_t c_segno;
184 };
185
186 #define C_SLOT_ASSERT_PACKABLE(ptr) \
187 VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR);
188
189 #define C_SLOT_PACK_PTR(ptr) \
190 VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR)
191
192 #define C_SLOT_UNPACK_PTR(cslot) \
193 (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR)
194
195 /* for debugging purposes */
196 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params =
197 VM_PACKING_PARAMS(C_SLOT_PACKED_PTR);
198
199 uint32_t c_segment_count = 0;
200 uint32_t c_segment_count_max = 0;
201
202 uint64_t c_generation_id = 0;
203 uint64_t c_generation_id_flush_barrier;
204
205
206 #define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120
207
208 boolean_t hibernate_no_swapspace = FALSE;
209 clock_sec_t hibernate_flushing_deadline = 0;
210
211
212 #if RECORD_THE_COMPRESSED_DATA
213 char *c_compressed_record_sbuf;
214 char *c_compressed_record_ebuf;
215 char *c_compressed_record_cptr;
216 #endif
217
218
219 queue_head_t c_age_list_head;
220 queue_head_t c_swappedin_list_head;
221 queue_head_t c_swapout_list_head;
222 queue_head_t c_swapio_list_head;
223 queue_head_t c_swappedout_list_head;
224 queue_head_t c_swappedout_sparse_list_head;
225 queue_head_t c_major_list_head;
226 queue_head_t c_filling_list_head;
227 queue_head_t c_bad_list_head;
228
229 uint32_t c_age_count = 0;
230 uint32_t c_swappedin_count = 0;
231 uint32_t c_swapout_count = 0;
232 uint32_t c_swapio_count = 0;
233 uint32_t c_swappedout_count = 0;
234 uint32_t c_swappedout_sparse_count = 0;
235 uint32_t c_major_count = 0;
236 uint32_t c_filling_count = 0;
237 uint32_t c_empty_count = 0;
238 uint32_t c_bad_count = 0;
239
240
241 queue_head_t c_minor_list_head;
242 uint32_t c_minor_count = 0;
243
244 int c_overage_swapped_count = 0;
245 int c_overage_swapped_limit = 0;
246
247 int c_seg_fixed_array_len;
248 union c_segu *c_segments;
249 vm_offset_t c_buffers;
250 vm_size_t c_buffers_size;
251 caddr_t c_segments_next_page;
252 boolean_t c_segments_busy;
253 uint32_t c_segments_available;
254 uint32_t c_segments_limit;
255 uint32_t c_segments_nearing_limit;
256
257 uint32_t c_segment_svp_in_hash;
258 uint32_t c_segment_svp_hash_succeeded;
259 uint32_t c_segment_svp_hash_failed;
260 uint32_t c_segment_svp_zero_compressions;
261 uint32_t c_segment_svp_nonzero_compressions;
262 uint32_t c_segment_svp_zero_decompressions;
263 uint32_t c_segment_svp_nonzero_decompressions;
264
265 uint32_t c_segment_noncompressible_pages;
266
267 uint32_t c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor */
268 #if CONFIG_FREEZE
269 int32_t c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */
270 uint32_t c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */
271 #endif /* CONFIG_FREEZE */
272
273 uint32_t c_segment_pages_compressed_limit;
274 uint32_t c_segment_pages_compressed_nearing_limit;
275 uint32_t c_free_segno_head = (uint32_t)-1;
276
277 uint32_t vm_compressor_minorcompact_threshold_divisor = 10;
278 uint32_t vm_compressor_majorcompact_threshold_divisor = 10;
279 uint32_t vm_compressor_unthrottle_threshold_divisor = 10;
280 uint32_t vm_compressor_catchup_threshold_divisor = 10;
281
282 uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0;
283 uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0;
284 uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0;
285 uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0;
286
287 #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu))
288
289 LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor");
290 LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp);
291 LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp);
292
293 boolean_t decompressions_blocked = FALSE;
294
295 zone_t compressor_segment_zone;
296 int c_compressor_swap_trigger = 0;
297
298 uint32_t compressor_cpus;
299 char *compressor_scratch_bufs;
300 char *kdp_compressor_scratch_buf;
301 char *kdp_compressor_decompressed_page;
302 addr64_t kdp_compressor_decompressed_page_paddr;
303 ppnum_t kdp_compressor_decompressed_page_ppnum;
304
305 clock_sec_t start_of_sample_period_sec = 0;
306 clock_nsec_t start_of_sample_period_nsec = 0;
307 clock_sec_t start_of_eval_period_sec = 0;
308 clock_nsec_t start_of_eval_period_nsec = 0;
309 uint32_t sample_period_decompression_count = 0;
310 uint32_t sample_period_compression_count = 0;
311 uint32_t last_eval_decompression_count = 0;
312 uint32_t last_eval_compression_count = 0;
313
314 #define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30)
315
316 boolean_t vm_swapout_ripe_segments = FALSE;
317 uint32_t vm_ripe_target_age = (60 * 60 * 48);
318
319 uint32_t swapout_target_age = 0;
320 uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
321 uint32_t overage_decompressions_during_sample_period = 0;
322
323
324 void do_fastwake_warmup(queue_head_t *, boolean_t);
325 boolean_t fastwake_warmup = FALSE;
326 boolean_t fastwake_recording_in_progress = FALSE;
327 clock_sec_t dont_trim_until_ts = 0;
328
329 uint64_t c_segment_warmup_count;
330 uint64_t first_c_segment_to_warm_generation_id = 0;
331 uint64_t last_c_segment_to_warm_generation_id = 0;
332 boolean_t hibernate_flushing = FALSE;
333
334 int64_t c_segment_input_bytes __attribute__((aligned(8))) = 0;
335 int64_t c_segment_compressed_bytes __attribute__((aligned(8))) = 0;
336 int64_t compressor_bytes_used __attribute__((aligned(8))) = 0;
337
338
339 struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned(8)));
340
341 static boolean_t compressor_needs_to_swap(void);
342 static void vm_compressor_swap_trigger_thread(void);
343 static void vm_compressor_do_delayed_compactions(boolean_t);
344 static void vm_compressor_compact_and_swap(boolean_t);
345 static void vm_compressor_age_swapped_in_segments(boolean_t);
346
347 #if XNU_TARGET_OS_OSX
348 static void vm_compressor_take_paging_space_action(void);
349 #endif /* XNU_TARGET_OS_OSX */
350
351 void compute_swapout_target_age(void);
352
353 boolean_t c_seg_major_compact(c_segment_t, c_segment_t);
354 boolean_t c_seg_major_compact_ok(c_segment_t, c_segment_t);
355
356 int c_seg_minor_compaction_and_unlock(c_segment_t, boolean_t);
357 int c_seg_do_minor_compaction_and_unlock(c_segment_t, boolean_t, boolean_t, boolean_t);
358 void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg);
359
360 void c_seg_move_to_sparse_list(c_segment_t);
361 void c_seg_insert_into_q(queue_head_t *, c_segment_t);
362
363 uint64_t vm_available_memory(void);
364 uint64_t vm_compressor_pages_compressed(void);
365
366 /*
367 * indicate the need to do a major compaction if
368 * the overall set of in-use compression segments
369 * becomes sparse... on systems that support pressure
370 * driven swapping, this will also cause swapouts to
371 * be initiated.
372 */
373 static inline boolean_t
374 vm_compressor_needs_to_major_compact()
375 {
376 uint32_t incore_seg_count;
377
378 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
379
380 if ((c_segment_count >= (c_segments_nearing_limit / 8)) &&
381 ((incore_seg_count * C_SEG_MAX_PAGES) - VM_PAGE_COMPRESSOR_COUNT) >
382 ((incore_seg_count / 8) * C_SEG_MAX_PAGES)) {
383 return 1;
384 }
385 return 0;
386 }
387
388
389 uint64_t
390 vm_available_memory(void)
391 {
392 return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64;
393 }
394
395
396 uint64_t
397 vm_compressor_pages_compressed(void)
398 {
399 return c_segment_pages_compressed * PAGE_SIZE_64;
400 }
401
402
403 boolean_t
404 vm_compressor_low_on_space(void)
405 {
406 #if CONFIG_FREEZE
407 uint64_t incore_seg_count;
408 uint32_t incore_compressed_pages;
409 if (freezer_incore_cseg_acct) {
410 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
411 incore_compressed_pages = c_segment_pages_compressed_incore;
412 } else {
413 incore_seg_count = c_segment_count;
414 incore_compressed_pages = c_segment_pages_compressed;
415 }
416
417 if ((incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ||
418 (incore_seg_count > c_segments_nearing_limit)) {
419 return TRUE;
420 }
421 #else /* CONFIG_FREEZE */
422 if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ||
423 (c_segment_count > c_segments_nearing_limit)) {
424 return TRUE;
425 }
426 #endif /* CONFIG_FREEZE */
427 return FALSE;
428 }
429
430
431 boolean_t
432 vm_compressor_out_of_space(void)
433 {
434 #if CONFIG_FREEZE
435 uint64_t incore_seg_count;
436 uint32_t incore_compressed_pages;
437 if (freezer_incore_cseg_acct) {
438 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
439 incore_compressed_pages = c_segment_pages_compressed_incore;
440 } else {
441 incore_seg_count = c_segment_count;
442 incore_compressed_pages = c_segment_pages_compressed;
443 }
444
445 if ((incore_compressed_pages >= c_segment_pages_compressed_limit) ||
446 (incore_seg_count > c_segments_incore_limit)) {
447 return TRUE;
448 }
449 #else /* CONFIG_FREEZE */
450 if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) ||
451 (c_segment_count >= c_segments_limit)) {
452 return TRUE;
453 }
454 #endif /* CONFIG_FREEZE */
455 return FALSE;
456 }
457
458
459 int
460 vm_wants_task_throttled(task_t task)
461 {
462 if (task == kernel_task) {
463 return 0;
464 }
465
466 if (VM_CONFIG_SWAP_IS_ACTIVE) {
467 if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) &&
468 (unsigned int)pmap_compressed(task->map->pmap) > (c_segment_pages_compressed / 4)) {
469 return 1;
470 }
471 }
472 return 0;
473 }
474
475
476 #if DEVELOPMENT || DEBUG
477 /*
478 * On compressor/swap exhaustion, kill the largest process regardless of
479 * its chosen process policy.
480 */
481 TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false);
482 #endif /* DEVELOPMENT || DEBUG */
483
484 #if XNU_TARGET_OS_OSX
485
486 static uint32_t no_paging_space_action_in_progress = 0;
487 extern void memorystatus_send_low_swap_note(void);
488
489 static void
490 vm_compressor_take_paging_space_action(void)
491 {
492 if (no_paging_space_action_in_progress == 0) {
493 if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) {
494 if (no_paging_space_action()) {
495 #if DEVELOPMENT || DEBUG
496 if (kill_on_no_paging_space) {
497 /*
498 * Since we are choosing to always kill a process, we don't need the
499 * "out of application memory" dialog box in this mode. And, hence we won't
500 * send the knote.
501 */
502 no_paging_space_action_in_progress = 0;
503 return;
504 }
505 #endif /* DEVELOPMENT || DEBUG */
506 memorystatus_send_low_swap_note();
507 }
508
509 no_paging_space_action_in_progress = 0;
510 }
511 }
512 }
513 #endif /* XNU_TARGET_OS_OSX */
514
515
516 void
517 vm_decompressor_lock(void)
518 {
519 PAGE_REPLACEMENT_ALLOWED(TRUE);
520
521 decompressions_blocked = TRUE;
522
523 PAGE_REPLACEMENT_ALLOWED(FALSE);
524 }
525
526 void
527 vm_decompressor_unlock(void)
528 {
529 PAGE_REPLACEMENT_ALLOWED(TRUE);
530
531 decompressions_blocked = FALSE;
532
533 PAGE_REPLACEMENT_ALLOWED(FALSE);
534
535 thread_wakeup((event_t)&decompressions_blocked);
536 }
537
538 static inline void
539 cslot_copy(c_slot_t cdst, c_slot_t csrc)
540 {
541 #if CHECKSUM_THE_DATA
542 cdst->c_hash_data = csrc->c_hash_data;
543 #endif
544 #if CHECKSUM_THE_COMPRESSED_DATA
545 cdst->c_hash_compressed_data = csrc->c_hash_compressed_data;
546 #endif
547 #if POPCOUNT_THE_COMPRESSED_DATA
548 cdst->c_pop_cdata = csrc->c_pop_cdata;
549 #endif
550 cdst->c_size = csrc->c_size;
551 cdst->c_packed_ptr = csrc->c_packed_ptr;
552 #if defined(__arm__) || defined(__arm64__)
553 cdst->c_codec = csrc->c_codec;
554 #endif
555 #if __ARM_WKDM_POPCNT__
556 cdst->c_inline_popcount = csrc->c_inline_popcount;
557 #endif
558 }
559
560 vm_map_t compressor_map;
561 uint64_t compressor_pool_max_size;
562 uint64_t compressor_pool_size;
563 uint32_t compressor_pool_multiplier;
564
565 #if DEVELOPMENT || DEBUG
566 /*
567 * Compressor segments are write-protected in development/debug
568 * kernels to help debug memory corruption.
569 * In cases where performance is a concern, this can be disabled
570 * via the boot-arg "-disable_cseg_write_protection".
571 */
572 boolean_t write_protect_c_segs = TRUE;
573 int vm_compressor_test_seg_wp;
574 uint32_t vm_ktrace_enabled;
575 #endif /* DEVELOPMENT || DEBUG */
576
577 void
578 vm_compressor_init(void)
579 {
580 thread_t thread;
581 int attempts = 1;
582 kern_return_t retval = KERN_SUCCESS;
583 vm_offset_t start_addr = 0;
584 vm_size_t c_segments_arr_size = 0, compressor_submap_size = 0;
585 vm_map_kernel_flags_t vmk_flags;
586 #if RECORD_THE_COMPRESSED_DATA
587 vm_size_t c_compressed_record_sbuf_size = 0;
588 #endif /* RECORD_THE_COMPRESSED_DATA */
589
590 #if DEVELOPMENT || DEBUG || CONFIG_FREEZE
591 char bootarg_name[32];
592 #endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */
593
594 #if DEVELOPMENT || DEBUG
595 if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) {
596 write_protect_c_segs = FALSE;
597 }
598 int vmcval = 1;
599 PE_parse_boot_argn("vm_compressor_validation", &vmcval, sizeof(vmcval));
600
601 if (kern_feature_override(KF_COMPRSV_OVRD)) {
602 vmcval = 0;
603 }
604 if (vmcval == 0) {
605 #if POPCOUNT_THE_COMPRESSED_DATA
606 popcount_c_segs = FALSE;
607 #endif
608 #if CHECKSUM_THE_DATA || CHECKSUM_THE_COMPRESSED_DATA
609 checksum_c_segs = FALSE;
610 #endif
611 #if VALIDATE_C_SEGMENTS
612 validate_c_segs = FALSE;
613 #endif
614 write_protect_c_segs = FALSE;
615 }
616 #endif /* DEVELOPMENT || DEBUG */
617
618 #if CONFIG_FREEZE
619 if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) {
620 freezer_incore_cseg_acct = FALSE;
621 }
622 #endif /* CONFIG_FREEZE */
623
624 assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE);
625
626 #if !XNU_TARGET_OS_OSX
627 vm_compressor_minorcompact_threshold_divisor = 20;
628 vm_compressor_majorcompact_threshold_divisor = 30;
629 vm_compressor_unthrottle_threshold_divisor = 40;
630 vm_compressor_catchup_threshold_divisor = 60;
631 #else /* !XNU_TARGET_OS_OSX */
632 if (max_mem <= (3ULL * 1024ULL * 1024ULL * 1024ULL)) {
633 vm_compressor_minorcompact_threshold_divisor = 11;
634 vm_compressor_majorcompact_threshold_divisor = 13;
635 vm_compressor_unthrottle_threshold_divisor = 20;
636 vm_compressor_catchup_threshold_divisor = 35;
637 } else {
638 vm_compressor_minorcompact_threshold_divisor = 20;
639 vm_compressor_majorcompact_threshold_divisor = 25;
640 vm_compressor_unthrottle_threshold_divisor = 35;
641 vm_compressor_catchup_threshold_divisor = 50;
642 }
643 #endif /* !XNU_TARGET_OS_OSX */
644
645 queue_init(&c_bad_list_head);
646 queue_init(&c_age_list_head);
647 queue_init(&c_minor_list_head);
648 queue_init(&c_major_list_head);
649 queue_init(&c_filling_list_head);
650 queue_init(&c_swapout_list_head);
651 queue_init(&c_swapio_list_head);
652 queue_init(&c_swappedin_list_head);
653 queue_init(&c_swappedout_list_head);
654 queue_init(&c_swappedout_sparse_list_head);
655
656 c_free_segno_head = -1;
657 c_segments_available = 0;
658
659 if (vm_compression_limit) {
660 compressor_pool_size = ptoa_64(vm_compression_limit);
661 }
662
663 compressor_pool_max_size = C_SEG_MAX_LIMIT;
664 compressor_pool_max_size *= C_SEG_BUFSIZE;
665
666 #if XNU_TARGET_OS_OSX
667
668 if (vm_compression_limit == 0) {
669 if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) {
670 compressor_pool_size = 16ULL * max_mem;
671 } else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
672 compressor_pool_size = 8ULL * max_mem;
673 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
674 compressor_pool_size = 4ULL * max_mem;
675 } else {
676 compressor_pool_size = 2ULL * max_mem;
677 }
678 }
679 if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) {
680 compressor_pool_multiplier = 1;
681 } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) {
682 compressor_pool_multiplier = 2;
683 } else {
684 compressor_pool_multiplier = 4;
685 }
686
687 #elif defined(__arm__)
688
689 #define VM_RESERVE_SIZE (1024 * 1024 * 256)
690 #define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450)
691
692 if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE) {
693 compressor_pool_max_size = MAX_COMPRESSOR_POOL_SIZE;
694 }
695
696 if (vm_compression_limit == 0) {
697 compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE;
698 }
699 compressor_pool_multiplier = 1;
700
701 #elif defined(__arm64__) && defined(XNU_TARGET_OS_WATCH)
702
703 /*
704 * On M9 watches the compressor can become big and can lead to
705 * churn in workingset resulting in audio drops. Setting a cap
706 * on the compressor size favors reclaiming unused memory
707 * sitting in idle band via jetsams
708 */
709
710 #define COMPRESSOR_CAP_PERCENTAGE 37ULL
711
712 if (compressor_pool_max_size > max_mem) {
713 compressor_pool_max_size = max_mem;
714 }
715
716 if (vm_compression_limit == 0) {
717 compressor_pool_size = (max_mem * COMPRESSOR_CAP_PERCENTAGE) / 100ULL;
718 }
719 compressor_pool_multiplier = 1;
720
721 #else
722
723 if (compressor_pool_max_size > max_mem) {
724 compressor_pool_max_size = max_mem;
725 }
726
727 if (vm_compression_limit == 0) {
728 compressor_pool_size = max_mem;
729 }
730 compressor_pool_multiplier = 1;
731 #endif
732 if (compressor_pool_size > compressor_pool_max_size) {
733 compressor_pool_size = compressor_pool_max_size;
734 }
735
736 try_again:
737 c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(C_SEG_ALLOCSIZE));
738 c_segments_nearing_limit = (uint32_t)(((uint64_t)c_segments_limit * 98ULL) / 100ULL);
739
740 c_segment_pages_compressed_limit = (c_segments_limit * (C_SEG_BUFSIZE / PAGE_SIZE) * compressor_pool_multiplier);
741
742 if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) {
743 if (!vm_compression_limit) {
744 c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE);
745 }
746 }
747
748 c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL);
749
750 #if CONFIG_FREEZE
751 /*
752 * Our in-core limits are based on the size of the compressor pool.
753 * The c_segments_nearing_limit is also based on the compressor pool
754 * size and calculated above.
755 */
756 c_segments_incore_limit = c_segments_limit;
757
758 if (freezer_incore_cseg_acct) {
759 /*
760 * Add enough segments to track all frozen c_segs that can be stored in swap.
761 */
762 c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(C_SEG_ALLOCSIZE));
763 }
764 #endif
765 /*
766 * Submap needs space for:
767 * - c_segments
768 * - c_buffers
769 * - swap reclaimations -- C_SEG_BUFSIZE
770 */
771 c_segments_arr_size = vm_map_round_page((sizeof(union c_segu) * c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
772 c_buffers_size = vm_map_round_page(((vm_size_t)C_SEG_ALLOCSIZE * (vm_size_t)c_segments_limit), VM_MAP_PAGE_MASK(kernel_map));
773
774 compressor_submap_size = c_segments_arr_size + c_buffers_size + C_SEG_BUFSIZE;
775
776 #if RECORD_THE_COMPRESSED_DATA
777 c_compressed_record_sbuf_size = (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2);
778 compressor_submap_size += c_compressed_record_sbuf_size;
779 #endif /* RECORD_THE_COMPRESSED_DATA */
780
781 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
782 vmk_flags.vmkf_permanent = TRUE;
783 retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size,
784 FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR,
785 &compressor_map);
786
787 if (retval != KERN_SUCCESS) {
788 if (++attempts > 3) {
789 panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size);
790 }
791
792 compressor_pool_size = compressor_pool_size / 2;
793
794 kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size);
795 goto try_again;
796 }
797 if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments),
798 (sizeof(union c_segu) * c_segments_limit), 0,
799 KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
800 panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n");
801 }
802 if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0,
803 KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) {
804 panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n");
805 }
806
807
808 /*
809 * Pick a good size that will minimize fragmentation in zalloc
810 * by minimizing the fragmentation in a 16k run.
811 *
812 * C_SEG_SLOT_VAR_ARRAY_MIN_LEN is larger on 4k systems than 16k ones,
813 * making the fragmentation in a 4k page terrible. Using 16k for all
814 * systems matches zalloc() and will minimize fragmentation.
815 */
816 uint32_t c_segment_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot));
817 uint32_t cnt = (16 << 10) / c_segment_size;
818 uint32_t frag = (16 << 10) % c_segment_size;
819
820 c_seg_fixed_array_len = C_SEG_SLOT_VAR_ARRAY_MIN_LEN;
821
822 while (cnt * sizeof(struct c_slot) < frag) {
823 c_segment_size += sizeof(struct c_slot);
824 c_seg_fixed_array_len++;
825 frag -= cnt * sizeof(struct c_slot);
826 }
827
828 compressor_segment_zone = zone_create("compressor_segment",
829 c_segment_size, ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
830
831 c_segments_busy = FALSE;
832
833 c_segments_next_page = (caddr_t)c_segments;
834 vm_compressor_algorithm_init();
835
836 {
837 host_basic_info_data_t hinfo;
838 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
839 size_t bufsize;
840 char *buf;
841
842 #define BSD_HOST 1
843 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
844
845 compressor_cpus = hinfo.max_cpus;
846
847 bufsize = PAGE_SIZE;
848 bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size();
849 bufsize += vm_compressor_get_decode_scratch_size();
850 #if CONFIG_FREEZE
851 bufsize += vm_compressor_get_encode_scratch_size();
852 #endif
853 #if RECORD_THE_COMPRESSED_DATA
854 bufsize += c_compressed_record_sbuf_size;
855 #endif
856
857 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buf, bufsize,
858 PAGE_MASK, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
859 panic("vm_compressor_init: Unable to allocate %zd bytes", bufsize);
860 }
861
862 /*
863 * kdp_compressor_decompressed_page must be page aligned because we access
864 * it through the physical apperture by page number.
865 */
866 kdp_compressor_decompressed_page = buf;
867 kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page);
868 kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr);
869 buf += PAGE_SIZE;
870 bufsize -= PAGE_SIZE;
871
872 compressor_scratch_bufs = buf;
873 buf += compressor_cpus * vm_compressor_get_decode_scratch_size();
874 bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size();
875
876 kdp_compressor_scratch_buf = buf;
877 buf += vm_compressor_get_decode_scratch_size();
878 bufsize -= vm_compressor_get_decode_scratch_size();
879
880 #if CONFIG_FREEZE
881 freezer_context_global.freezer_ctx_compressor_scratch_buf = buf;
882 buf += vm_compressor_get_encode_scratch_size();
883 bufsize -= vm_compressor_get_encode_scratch_size();
884 #endif
885
886 #if RECORD_THE_COMPRESSED_DATA
887 c_compressed_record_sbuf = buf;
888 c_compressed_record_cptr = buf;
889 c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size;
890 buf += c_compressed_record_sbuf_size;
891 bufsize -= c_compressed_record_sbuf_size;
892 #endif
893 assert(bufsize == 0);
894 }
895
896 if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL,
897 BASEPRI_VM, &thread) != KERN_SUCCESS) {
898 panic("vm_compressor_swap_trigger_thread: create failed");
899 }
900 thread_deallocate(thread);
901
902 if (vm_pageout_internal_start() != KERN_SUCCESS) {
903 panic("vm_compressor_init: Failed to start the internal pageout thread.\n");
904 }
905 if (VM_CONFIG_SWAP_IS_PRESENT) {
906 vm_compressor_swap_init();
907 }
908
909 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
910 vm_compressor_is_active = 1;
911 }
912
913 #if CONFIG_FREEZE
914 memorystatus_freeze_enabled = TRUE;
915 #endif /* CONFIG_FREEZE */
916
917 vm_compressor_available = 1;
918
919 vm_page_reactivate_all_throttled();
920 }
921
922
923 #if VALIDATE_C_SEGMENTS
924
925 static void
926 c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact)
927 {
928 uint16_t c_indx;
929 int32_t bytes_used;
930 uint32_t c_rounded_size;
931 uint32_t c_size;
932 c_slot_t cs;
933
934 if (__probable(validate_c_segs == FALSE)) {
935 return;
936 }
937 if (c_seg->c_firstemptyslot < c_seg->c_nextslot) {
938 c_indx = c_seg->c_firstemptyslot;
939 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
940
941 if (cs == NULL) {
942 panic("c_seg_validate: no slot backing c_firstemptyslot");
943 }
944
945 if (cs->c_size) {
946 panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs->c_size);
947 }
948 }
949 bytes_used = 0;
950
951 for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) {
952 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
953
954 c_size = UNPACK_C_SIZE(cs);
955
956 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
957
958 bytes_used += c_rounded_size;
959
960 #if CHECKSUM_THE_COMPRESSED_DATA
961 unsigned csvhash;
962 if (c_size && cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
963 addr64_t csvphys = kvtophys((vm_offset_t)&c_seg->c_store.c_buffer[cs->c_offset]);
964 panic("Compressed data doesn't match original %p phys: 0x%llx %d %p %d %d 0x%x 0x%x", c_seg, csvphys, cs->c_offset, cs, c_indx, c_size, cs->c_hash_compressed_data, csvhash);
965 }
966 #endif
967 #if POPCOUNT_THE_COMPRESSED_DATA
968 unsigned csvpop;
969 if (c_size) {
970 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
971 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
972 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%llx 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, (uint64_t)cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
973 }
974 }
975 #endif
976 }
977
978 if (bytes_used != c_seg->c_bytes_used) {
979 panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used, c_seg->c_bytes_used);
980 }
981
982 if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
983 panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
984 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
985 }
986
987 if (must_be_compact) {
988 if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) {
989 panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n",
990 (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used);
991 }
992 }
993 }
994
995 #endif
996
997
998 void
999 c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held)
1000 {
1001 boolean_t clear_busy = FALSE;
1002
1003 if (c_list_lock_held == FALSE) {
1004 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1005 C_SEG_BUSY(c_seg);
1006
1007 lck_mtx_unlock_always(&c_seg->c_lock);
1008 lck_mtx_lock_spin_always(c_list_lock);
1009 lck_mtx_lock_spin_always(&c_seg->c_lock);
1010
1011 clear_busy = TRUE;
1012 }
1013 }
1014 assert(c_seg->c_state != C_IS_FILLING);
1015
1016 if (!c_seg->c_on_minorcompact_q && !(C_SEG_IS_ON_DISK_OR_SOQ(c_seg))) {
1017 queue_enter(&c_minor_list_head, c_seg, c_segment_t, c_list);
1018 c_seg->c_on_minorcompact_q = 1;
1019 c_minor_count++;
1020 }
1021 if (c_list_lock_held == FALSE) {
1022 lck_mtx_unlock_always(c_list_lock);
1023 }
1024
1025 if (clear_busy == TRUE) {
1026 C_SEG_WAKEUP_DONE(c_seg);
1027 }
1028 }
1029
1030
1031 unsigned int c_seg_moved_to_sparse_list = 0;
1032
1033 void
1034 c_seg_move_to_sparse_list(c_segment_t c_seg)
1035 {
1036 boolean_t clear_busy = FALSE;
1037
1038 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1039 C_SEG_BUSY(c_seg);
1040
1041 lck_mtx_unlock_always(&c_seg->c_lock);
1042 lck_mtx_lock_spin_always(c_list_lock);
1043 lck_mtx_lock_spin_always(&c_seg->c_lock);
1044
1045 clear_busy = TRUE;
1046 }
1047 c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE);
1048
1049 c_seg_moved_to_sparse_list++;
1050
1051 lck_mtx_unlock_always(c_list_lock);
1052
1053 if (clear_busy == TRUE) {
1054 C_SEG_WAKEUP_DONE(c_seg);
1055 }
1056 }
1057
1058
1059 void
1060 c_seg_insert_into_q(queue_head_t *qhead, c_segment_t c_seg)
1061 {
1062 c_segment_t c_seg_next;
1063
1064 if (queue_empty(qhead)) {
1065 queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1066 } else {
1067 c_seg_next = (c_segment_t)queue_first(qhead);
1068
1069 while (TRUE) {
1070 if (c_seg->c_generation_id < c_seg_next->c_generation_id) {
1071 queue_insert_before(qhead, c_seg, c_seg_next, c_segment_t, c_age_list);
1072 break;
1073 }
1074 c_seg_next = (c_segment_t) queue_next(&c_seg_next->c_age_list);
1075
1076 if (queue_end(qhead, (queue_entry_t) c_seg_next)) {
1077 queue_enter(qhead, c_seg, c_segment_t, c_age_list);
1078 break;
1079 }
1080 }
1081 }
1082 }
1083
1084
1085 int try_minor_compaction_failed = 0;
1086 int try_minor_compaction_succeeded = 0;
1087
1088 void
1089 c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg)
1090 {
1091 assert(c_seg->c_on_minorcompact_q);
1092 /*
1093 * c_seg is currently on the delayed minor compaction
1094 * queue and we have c_seg locked... if we can get the
1095 * c_list_lock w/o blocking (if we blocked we could deadlock
1096 * because the lock order is c_list_lock then c_seg's lock)
1097 * we'll pull it from the delayed list and free it directly
1098 */
1099 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
1100 /*
1101 * c_list_lock is held, we need to bail
1102 */
1103 try_minor_compaction_failed++;
1104
1105 lck_mtx_unlock_always(&c_seg->c_lock);
1106 } else {
1107 try_minor_compaction_succeeded++;
1108
1109 C_SEG_BUSY(c_seg);
1110 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, FALSE);
1111 }
1112 }
1113
1114
1115 int
1116 c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement)
1117 {
1118 int c_seg_freed;
1119
1120 assert(c_seg->c_busy);
1121 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
1122
1123 /*
1124 * check for the case that can occur when we are not swapping
1125 * and this segment has been major compacted in the past
1126 * and moved to the majorcompact q to remove it from further
1127 * consideration... if the occupancy falls too low we need
1128 * to put it back on the age_q so that it will be considered
1129 * in the next major compaction sweep... if we don't do this
1130 * we will eventually run into the c_segments_limit
1131 */
1132 if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) {
1133 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
1134 }
1135 if (!c_seg->c_on_minorcompact_q) {
1136 if (clear_busy == TRUE) {
1137 C_SEG_WAKEUP_DONE(c_seg);
1138 }
1139
1140 lck_mtx_unlock_always(&c_seg->c_lock);
1141
1142 return 0;
1143 }
1144 queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list);
1145 c_seg->c_on_minorcompact_q = 0;
1146 c_minor_count--;
1147
1148 lck_mtx_unlock_always(c_list_lock);
1149
1150 if (disallow_page_replacement == TRUE) {
1151 lck_mtx_unlock_always(&c_seg->c_lock);
1152
1153 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1154
1155 lck_mtx_lock_spin_always(&c_seg->c_lock);
1156 }
1157 c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy);
1158
1159 if (disallow_page_replacement == TRUE) {
1160 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1161 }
1162
1163 if (need_list_lock == TRUE) {
1164 lck_mtx_lock_spin_always(c_list_lock);
1165 }
1166
1167 return c_seg_freed;
1168 }
1169
1170 void
1171 kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
1172 {
1173 c_segment_t c_seg = (c_segment_t) wait_event;
1174
1175 waitinfo->owner = thread_tid(c_seg->c_busy_for_thread);
1176 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(c_seg);
1177 }
1178
1179 #if DEVELOPMENT || DEBUG
1180 int
1181 do_cseg_wedge_thread(void)
1182 {
1183 struct c_segment c_seg;
1184 c_seg.c_busy_for_thread = current_thread();
1185
1186 debug_cseg_wait_event = (event_t) &c_seg;
1187
1188 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1189 assert_wait((event_t) (&c_seg), THREAD_INTERRUPTIBLE);
1190
1191 thread_block(THREAD_CONTINUE_NULL);
1192
1193 return 0;
1194 }
1195
1196 int
1197 do_cseg_unwedge_thread(void)
1198 {
1199 thread_wakeup(debug_cseg_wait_event);
1200 debug_cseg_wait_event = NULL;
1201
1202 return 0;
1203 }
1204 #endif /* DEVELOPMENT || DEBUG */
1205
1206 void
1207 c_seg_wait_on_busy(c_segment_t c_seg)
1208 {
1209 c_seg->c_wanted = 1;
1210
1211 thread_set_pending_block_hint(current_thread(), kThreadWaitCompressor);
1212 assert_wait((event_t) (c_seg), THREAD_UNINT);
1213
1214 lck_mtx_unlock_always(&c_seg->c_lock);
1215 thread_block(THREAD_CONTINUE_NULL);
1216 }
1217
1218 #if CONFIG_FREEZE
1219 /*
1220 * We don't have the task lock held while updating the task's
1221 * c_seg queues. We can do that because of the following restrictions:
1222 *
1223 * - SINGLE FREEZER CONTEXT:
1224 * We 'insert' c_segs into the task list on the task_freeze path.
1225 * There can only be one such freeze in progress and the task
1226 * isn't disappearing because we have the VM map lock held throughout
1227 * and we have a reference on the proc too.
1228 *
1229 * - SINGLE TASK DISOWN CONTEXT:
1230 * We 'disown' c_segs of a task ONLY from the task_terminate context. So
1231 * we don't need the task lock but we need the c_list_lock and the
1232 * compressor master lock (shared). We also hold the individual
1233 * c_seg locks (exclusive).
1234 *
1235 * If we either:
1236 * - can't get the c_seg lock on a try, then we start again because maybe
1237 * the c_seg is part of a compaction and might get freed. So we can't trust
1238 * that linkage and need to restart our queue traversal.
1239 * - OR, we run into a busy c_seg (say being swapped in or free-ing) we
1240 * drop all locks again and wait and restart our queue traversal.
1241 *
1242 * - The new_owner_task below is currently only the kernel or NULL.
1243 *
1244 */
1245 void
1246 c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task)
1247 {
1248 task_t owner_task = c_seg->c_task_owner;
1249 uint64_t uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64);
1250
1251 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1252 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1253
1254 if (owner_task) {
1255 task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP);
1256 queue_remove(&owner_task->task_frozen_cseg_q, c_seg,
1257 c_segment_t, c_task_list_next_cseg);
1258 }
1259
1260 if (new_owner_task) {
1261 queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg,
1262 c_segment_t, c_task_list_next_cseg);
1263 task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP);
1264 }
1265
1266 c_seg->c_task_owner = new_owner_task;
1267 }
1268
1269 void
1270 task_disown_frozen_csegs(task_t owner_task)
1271 {
1272 c_segment_t c_seg = NULL, next_cseg = NULL;
1273
1274 again:
1275 PAGE_REPLACEMENT_DISALLOWED(TRUE);
1276 lck_mtx_lock_spin_always(c_list_lock);
1277
1278 for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q);
1279 !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg);
1280 c_seg = next_cseg) {
1281 next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);;
1282
1283 if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) {
1284 lck_mtx_unlock(c_list_lock);
1285 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1286 goto again;
1287 }
1288
1289 if (c_seg->c_busy) {
1290 lck_mtx_unlock(c_list_lock);
1291 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1292
1293 c_seg_wait_on_busy(c_seg);
1294
1295 goto again;
1296 }
1297 assert(c_seg->c_task_owner == owner_task);
1298 c_seg_update_task_owner(c_seg, kernel_task);
1299 lck_mtx_unlock_always(&c_seg->c_lock);
1300 }
1301
1302 lck_mtx_unlock(c_list_lock);
1303 PAGE_REPLACEMENT_DISALLOWED(FALSE);
1304 }
1305 #endif /* CONFIG_FREEZE */
1306
1307 void
1308 c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head)
1309 {
1310 int old_state = c_seg->c_state;
1311
1312 #if XNU_TARGET_OS_OSX
1313 #if DEVELOPMENT || DEBUG
1314 if (new_state != C_IS_FILLING) {
1315 LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED);
1316 }
1317 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
1318 #endif
1319 #endif /* XNU_TARGET_OS_OSX */
1320 switch (old_state) {
1321 case C_IS_EMPTY:
1322 assert(new_state == C_IS_FILLING || new_state == C_IS_FREE);
1323
1324 c_empty_count--;
1325 break;
1326
1327 case C_IS_FILLING:
1328 assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q);
1329
1330 queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1331 c_filling_count--;
1332 break;
1333
1334 case C_ON_AGE_Q:
1335 assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q ||
1336 new_state == C_IS_FREE);
1337
1338 queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1339 c_age_count--;
1340 break;
1341
1342 case C_ON_SWAPPEDIN_Q:
1343 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1344
1345 queue_remove(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1346 c_swappedin_count--;
1347 break;
1348
1349 case C_ON_SWAPOUT_Q:
1350 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q);
1351
1352 #if CONFIG_FREEZE
1353 if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) {
1354 c_seg_update_task_owner(c_seg, NULL);
1355 }
1356 #endif /* CONFIG_FREEZE */
1357
1358 queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1359 thread_wakeup((event_t)&compaction_swapper_running);
1360 c_swapout_count--;
1361 break;
1362
1363 case C_ON_SWAPIO_Q:
1364 assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q);
1365
1366 queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1367 c_swapio_count--;
1368 break;
1369
1370 case C_ON_SWAPPEDOUT_Q:
1371 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1372 new_state == C_ON_SWAPPEDOUTSPARSE_Q ||
1373 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1374
1375 queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1376 c_swappedout_count--;
1377 break;
1378
1379 case C_ON_SWAPPEDOUTSPARSE_Q:
1380 assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q ||
1381 new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE);
1382
1383 queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1384 c_swappedout_sparse_count--;
1385 break;
1386
1387 case C_ON_MAJORCOMPACT_Q:
1388 assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE);
1389
1390 queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1391 c_major_count--;
1392 break;
1393
1394 case C_ON_BAD_Q:
1395 assert(new_state == C_IS_FREE);
1396
1397 queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1398 c_bad_count--;
1399 break;
1400
1401 default:
1402 panic("c_seg %p has bad c_state = %d\n", c_seg, old_state);
1403 }
1404
1405 switch (new_state) {
1406 case C_IS_FREE:
1407 assert(old_state != C_IS_FILLING);
1408
1409 break;
1410
1411 case C_IS_EMPTY:
1412 assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1413
1414 c_empty_count++;
1415 break;
1416
1417 case C_IS_FILLING:
1418 assert(old_state == C_IS_EMPTY);
1419
1420 queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list);
1421 c_filling_count++;
1422 break;
1423
1424 case C_ON_AGE_Q:
1425 assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q ||
1426 old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q ||
1427 old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1428
1429 if (old_state == C_IS_FILLING) {
1430 queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1431 } else {
1432 if (!queue_empty(&c_age_list_head)) {
1433 c_segment_t c_first;
1434
1435 c_first = (c_segment_t)queue_first(&c_age_list_head);
1436 c_seg->c_creation_ts = c_first->c_creation_ts;
1437 }
1438 queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list);
1439 }
1440 c_age_count++;
1441 break;
1442
1443 case C_ON_SWAPPEDIN_Q:
1444 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1445
1446 if (insert_head == TRUE) {
1447 queue_enter_first(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1448 } else {
1449 queue_enter(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list);
1450 }
1451 c_swappedin_count++;
1452 break;
1453
1454 case C_ON_SWAPOUT_Q:
1455 assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING);
1456
1457 if (insert_head == TRUE) {
1458 queue_enter_first(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1459 } else {
1460 queue_enter(&c_swapout_list_head, c_seg, c_segment_t, c_age_list);
1461 }
1462 c_swapout_count++;
1463 break;
1464
1465 case C_ON_SWAPIO_Q:
1466 assert(old_state == C_ON_SWAPOUT_Q);
1467
1468 if (insert_head == TRUE) {
1469 queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1470 } else {
1471 queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list);
1472 }
1473 c_swapio_count++;
1474 break;
1475
1476 case C_ON_SWAPPEDOUT_Q:
1477 assert(old_state == C_ON_SWAPIO_Q);
1478
1479 if (insert_head == TRUE) {
1480 queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1481 } else {
1482 queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list);
1483 }
1484 c_swappedout_count++;
1485 break;
1486
1487 case C_ON_SWAPPEDOUTSPARSE_Q:
1488 assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q);
1489
1490 if (insert_head == TRUE) {
1491 queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1492 } else {
1493 queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list);
1494 }
1495
1496 c_swappedout_sparse_count++;
1497 break;
1498
1499 case C_ON_MAJORCOMPACT_Q:
1500 assert(old_state == C_ON_AGE_Q);
1501
1502 if (insert_head == TRUE) {
1503 queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1504 } else {
1505 queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list);
1506 }
1507 c_major_count++;
1508 break;
1509
1510 case C_ON_BAD_Q:
1511 assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q);
1512
1513 if (insert_head == TRUE) {
1514 queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1515 } else {
1516 queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list);
1517 }
1518 c_bad_count++;
1519 break;
1520
1521 default:
1522 panic("c_seg %p requesting bad c_state = %d\n", c_seg, new_state);
1523 }
1524 c_seg->c_state = new_state;
1525 }
1526
1527
1528
1529 void
1530 c_seg_free(c_segment_t c_seg)
1531 {
1532 assert(c_seg->c_busy);
1533
1534 lck_mtx_unlock_always(&c_seg->c_lock);
1535 lck_mtx_lock_spin_always(c_list_lock);
1536 lck_mtx_lock_spin_always(&c_seg->c_lock);
1537
1538 c_seg_free_locked(c_seg);
1539 }
1540
1541
1542 void
1543 c_seg_free_locked(c_segment_t c_seg)
1544 {
1545 int segno;
1546 int pages_populated = 0;
1547 int32_t *c_buffer = NULL;
1548 uint64_t c_swap_handle = 0;
1549
1550 assert(c_seg->c_busy);
1551 assert(c_seg->c_slots_used == 0);
1552 assert(!c_seg->c_on_minorcompact_q);
1553 assert(!c_seg->c_busy_swapping);
1554
1555 if (c_seg->c_overage_swap == TRUE) {
1556 c_overage_swapped_count--;
1557 c_seg->c_overage_swap = FALSE;
1558 }
1559 if (!(C_SEG_IS_ONDISK(c_seg))) {
1560 c_buffer = c_seg->c_store.c_buffer;
1561 } else {
1562 c_swap_handle = c_seg->c_store.c_swap_handle;
1563 }
1564
1565 c_seg_switch_state(c_seg, C_IS_FREE, FALSE);
1566
1567 if (c_buffer) {
1568 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
1569 c_seg->c_store.c_buffer = NULL;
1570 } else {
1571 #if CONFIG_FREEZE
1572 c_seg_update_task_owner(c_seg, NULL);
1573 #endif /* CONFIG_FREEZE */
1574
1575 c_seg->c_store.c_swap_handle = (uint64_t)-1;
1576 }
1577
1578 lck_mtx_unlock_always(&c_seg->c_lock);
1579
1580 lck_mtx_unlock_always(c_list_lock);
1581
1582 if (c_buffer) {
1583 if (pages_populated) {
1584 kernel_memory_depopulate(compressor_map, (vm_offset_t)c_buffer,
1585 pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1586 }
1587 } else if (c_swap_handle) {
1588 /*
1589 * Free swap space on disk.
1590 */
1591 vm_swap_free(c_swap_handle);
1592 }
1593 lck_mtx_lock_spin_always(&c_seg->c_lock);
1594 /*
1595 * c_seg must remain busy until
1596 * after the call to vm_swap_free
1597 */
1598 C_SEG_WAKEUP_DONE(c_seg);
1599 lck_mtx_unlock_always(&c_seg->c_lock);
1600
1601 segno = c_seg->c_mysegno;
1602
1603 lck_mtx_lock_spin_always(c_list_lock);
1604 /*
1605 * because the c_buffer is now associated with the segno,
1606 * we can't put the segno back on the free list until
1607 * after we have depopulated the c_buffer range, or
1608 * we run the risk of depopulating a range that is
1609 * now being used in one of the compressor heads
1610 */
1611 c_segments[segno].c_segno = c_free_segno_head;
1612 c_free_segno_head = segno;
1613 c_segment_count--;
1614
1615 lck_mtx_unlock_always(c_list_lock);
1616
1617 lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp);
1618
1619 if (c_seg->c_slot_var_array_len) {
1620 kheap_free(KHEAP_DATA_BUFFERS, c_seg->c_slot_var_array,
1621 sizeof(struct c_slot) * c_seg->c_slot_var_array_len);
1622 }
1623
1624 zfree(compressor_segment_zone, c_seg);
1625 }
1626
1627 #if DEVELOPMENT || DEBUG
1628 int c_seg_trim_page_count = 0;
1629 #endif
1630
1631 void
1632 c_seg_trim_tail(c_segment_t c_seg)
1633 {
1634 c_slot_t cs;
1635 uint32_t c_size;
1636 uint32_t c_offset;
1637 uint32_t c_rounded_size;
1638 uint16_t current_nextslot;
1639 uint32_t current_populated_offset;
1640
1641 if (c_seg->c_bytes_used == 0) {
1642 return;
1643 }
1644 current_nextslot = c_seg->c_nextslot;
1645 current_populated_offset = c_seg->c_populated_offset;
1646
1647 while (c_seg->c_nextslot) {
1648 cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1));
1649
1650 c_size = UNPACK_C_SIZE(cs);
1651
1652 if (c_size) {
1653 if (current_nextslot != c_seg->c_nextslot) {
1654 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1655 c_offset = cs->c_offset + C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1656
1657 c_seg->c_nextoffset = c_offset;
1658 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) &
1659 ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1660
1661 if (c_seg->c_firstemptyslot > c_seg->c_nextslot) {
1662 c_seg->c_firstemptyslot = c_seg->c_nextslot;
1663 }
1664 #if DEVELOPMENT || DEBUG
1665 c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) -
1666 round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) /
1667 PAGE_SIZE);
1668 #endif
1669 }
1670 break;
1671 }
1672 c_seg->c_nextslot--;
1673 }
1674 assert(c_seg->c_nextslot);
1675 }
1676
1677
1678 int
1679 c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy)
1680 {
1681 c_slot_mapping_t slot_ptr;
1682 uint32_t c_offset = 0;
1683 uint32_t old_populated_offset;
1684 uint32_t c_rounded_size;
1685 uint32_t c_size;
1686 uint16_t c_indx = 0;
1687 int i;
1688 c_slot_t c_dst;
1689 c_slot_t c_src;
1690
1691 assert(c_seg->c_busy);
1692
1693 #if VALIDATE_C_SEGMENTS
1694 c_seg_validate(c_seg, FALSE);
1695 #endif
1696 if (c_seg->c_bytes_used == 0) {
1697 c_seg_free(c_seg);
1698 return 1;
1699 }
1700 lck_mtx_unlock_always(&c_seg->c_lock);
1701
1702 if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) {
1703 goto done;
1704 }
1705
1706 /* TODO: assert first emptyslot's c_size is actually 0 */
1707
1708 #if DEVELOPMENT || DEBUG
1709 C_SEG_MAKE_WRITEABLE(c_seg);
1710 #endif
1711
1712 #if VALIDATE_C_SEGMENTS
1713 c_seg->c_was_minor_compacted++;
1714 #endif
1715 c_indx = c_seg->c_firstemptyslot;
1716 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1717
1718 old_populated_offset = c_seg->c_populated_offset;
1719 c_offset = c_dst->c_offset;
1720
1721 for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) {
1722 c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i);
1723
1724 c_size = UNPACK_C_SIZE(c_src);
1725
1726 if (c_size == 0) {
1727 continue;
1728 }
1729
1730 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1731 /* N.B.: This memcpy may be an overlapping copy */
1732 memcpy(&c_seg->c_store.c_buffer[c_offset], &c_seg->c_store.c_buffer[c_src->c_offset], c_rounded_size);
1733
1734 cslot_copy(c_dst, c_src);
1735 c_dst->c_offset = c_offset;
1736
1737 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
1738 slot_ptr->s_cindx = c_indx;
1739
1740 c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1741 PACK_C_SIZE(c_src, 0);
1742 c_indx++;
1743
1744 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
1745 }
1746 c_seg->c_firstemptyslot = c_indx;
1747 c_seg->c_nextslot = c_indx;
1748 c_seg->c_nextoffset = c_offset;
1749 c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1);
1750 c_seg->c_bytes_unused = 0;
1751
1752 #if VALIDATE_C_SEGMENTS
1753 c_seg_validate(c_seg, TRUE);
1754 #endif
1755 if (old_populated_offset > c_seg->c_populated_offset) {
1756 uint32_t gc_size;
1757 int32_t *gc_ptr;
1758
1759 gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset);
1760 gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset];
1761
1762 kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size,
1763 KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
1764 }
1765
1766 #if DEVELOPMENT || DEBUG
1767 C_SEG_WRITE_PROTECT(c_seg);
1768 #endif
1769
1770 done:
1771 if (clear_busy == TRUE) {
1772 lck_mtx_lock_spin_always(&c_seg->c_lock);
1773 C_SEG_WAKEUP_DONE(c_seg);
1774 lck_mtx_unlock_always(&c_seg->c_lock);
1775 }
1776 return 0;
1777 }
1778
1779
1780 static void
1781 c_seg_alloc_nextslot(c_segment_t c_seg)
1782 {
1783 struct c_slot *old_slot_array = NULL;
1784 struct c_slot *new_slot_array = NULL;
1785 int newlen;
1786 int oldlen;
1787
1788 if (c_seg->c_nextslot < c_seg_fixed_array_len) {
1789 return;
1790 }
1791
1792 if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) {
1793 oldlen = c_seg->c_slot_var_array_len;
1794 old_slot_array = c_seg->c_slot_var_array;
1795
1796 if (oldlen == 0) {
1797 newlen = C_SEG_SLOT_VAR_ARRAY_MIN_LEN;
1798 } else {
1799 newlen = oldlen * 2;
1800 }
1801
1802 new_slot_array = kheap_alloc(KHEAP_DATA_BUFFERS,
1803 sizeof(struct c_slot) * newlen, Z_WAITOK);
1804
1805 lck_mtx_lock_spin_always(&c_seg->c_lock);
1806
1807 if (old_slot_array) {
1808 memcpy(new_slot_array, old_slot_array,
1809 sizeof(struct c_slot) * oldlen);
1810 }
1811
1812 c_seg->c_slot_var_array_len = newlen;
1813 c_seg->c_slot_var_array = new_slot_array;
1814
1815 lck_mtx_unlock_always(&c_seg->c_lock);
1816
1817 if (old_slot_array) {
1818 kheap_free(KHEAP_DATA_BUFFERS, old_slot_array,
1819 sizeof(struct c_slot) * oldlen);
1820 }
1821 }
1822 }
1823
1824
1825 #define C_SEG_MAJOR_COMPACT_STATS_MAX (30)
1826
1827 struct {
1828 uint64_t asked_permission;
1829 uint64_t compactions;
1830 uint64_t moved_slots;
1831 uint64_t moved_bytes;
1832 uint64_t wasted_space_in_swapouts;
1833 uint64_t count_of_swapouts;
1834 uint64_t count_of_freed_segs;
1835 uint64_t bailed_compactions;
1836 uint64_t bytes_freed_rate_us;
1837 } c_seg_major_compact_stats[C_SEG_MAJOR_COMPACT_STATS_MAX];
1838
1839 int c_seg_major_compact_stats_now = 0;
1840
1841
1842 #define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100)
1843
1844
1845 boolean_t
1846 c_seg_major_compact_ok(
1847 c_segment_t c_seg_dst,
1848 c_segment_t c_seg_src)
1849 {
1850 c_seg_major_compact_stats[c_seg_major_compact_stats_now].asked_permission++;
1851
1852 if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE &&
1853 c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) {
1854 return FALSE;
1855 }
1856
1857 if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
1858 /*
1859 * destination segment is full... can't compact
1860 */
1861 return FALSE;
1862 }
1863
1864 return TRUE;
1865 }
1866
1867
1868 boolean_t
1869 c_seg_major_compact(
1870 c_segment_t c_seg_dst,
1871 c_segment_t c_seg_src)
1872 {
1873 c_slot_mapping_t slot_ptr;
1874 uint32_t c_rounded_size;
1875 uint32_t c_size;
1876 uint16_t dst_slot;
1877 int i;
1878 c_slot_t c_dst;
1879 c_slot_t c_src;
1880 boolean_t keep_compacting = TRUE;
1881
1882 /*
1883 * segments are not locked but they are both marked c_busy
1884 * which keeps c_decompress from working on them...
1885 * we can safely allocate new pages, move compressed data
1886 * from c_seg_src to c_seg_dst and update both c_segment's
1887 * state w/o holding the master lock
1888 */
1889 #if DEVELOPMENT || DEBUG
1890 C_SEG_MAKE_WRITEABLE(c_seg_dst);
1891 #endif
1892
1893 #if VALIDATE_C_SEGMENTS
1894 c_seg_dst->c_was_major_compacted++;
1895 c_seg_src->c_was_major_donor++;
1896 #endif
1897 c_seg_major_compact_stats[c_seg_major_compact_stats_now].compactions++;
1898
1899 dst_slot = c_seg_dst->c_nextslot;
1900
1901 for (i = 0; i < c_seg_src->c_nextslot; i++) {
1902 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i);
1903
1904 c_size = UNPACK_C_SIZE(c_src);
1905
1906 if (c_size == 0) {
1907 /* BATCH: move what we have so far; */
1908 continue;
1909 }
1910
1911 if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) c_size) {
1912 int size_to_populate;
1913
1914 /* doesn't fit */
1915 size_to_populate = C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset);
1916
1917 if (size_to_populate == 0) {
1918 /* can't fit */
1919 keep_compacting = FALSE;
1920 break;
1921 }
1922 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
1923 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
1924 }
1925
1926 kernel_memory_populate(compressor_map,
1927 (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset],
1928 size_to_populate,
1929 KMA_COMPRESSOR,
1930 VM_KERN_MEMORY_COMPRESSOR);
1931
1932 c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
1933 assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= C_SEG_BUFSIZE);
1934 }
1935 c_seg_alloc_nextslot(c_seg_dst);
1936
1937 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
1938
1939 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
1940
1941 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
1942
1943 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_slots++;
1944 c_seg_major_compact_stats[c_seg_major_compact_stats_now].moved_bytes += c_size;
1945
1946 cslot_copy(c_dst, c_src);
1947 c_dst->c_offset = c_seg_dst->c_nextoffset;
1948
1949 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
1950 c_seg_dst->c_firstemptyslot++;
1951 }
1952 c_seg_dst->c_slots_used++;
1953 c_seg_dst->c_nextslot++;
1954 c_seg_dst->c_bytes_used += c_rounded_size;
1955 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
1956
1957 PACK_C_SIZE(c_src, 0);
1958
1959 c_seg_src->c_bytes_used -= c_rounded_size;
1960 c_seg_src->c_bytes_unused += c_rounded_size;
1961 c_seg_src->c_firstemptyslot = 0;
1962
1963 assert(c_seg_src->c_slots_used);
1964 c_seg_src->c_slots_used--;
1965
1966 if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
1967 /* dest segment is now full */
1968 keep_compacting = FALSE;
1969 break;
1970 }
1971 }
1972 #if DEVELOPMENT || DEBUG
1973 C_SEG_WRITE_PROTECT(c_seg_dst);
1974 #endif
1975 if (dst_slot < c_seg_dst->c_nextslot) {
1976 PAGE_REPLACEMENT_ALLOWED(TRUE);
1977 /*
1978 * we've now locked out c_decompress from
1979 * converting the slot passed into it into
1980 * a c_segment_t which allows us to use
1981 * the backptr to change which c_segment and
1982 * index the slot points to
1983 */
1984 while (dst_slot < c_seg_dst->c_nextslot) {
1985 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
1986
1987 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
1988 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
1989 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
1990 slot_ptr->s_cindx = dst_slot++;
1991 }
1992 PAGE_REPLACEMENT_ALLOWED(FALSE);
1993 }
1994 return keep_compacting;
1995 }
1996
1997
1998 uint64_t
1999 vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec)
2000 {
2001 uint64_t end_msecs;
2002 uint64_t start_msecs;
2003
2004 end_msecs = (end_sec * 1000) + end_nsec / 1000000;
2005 start_msecs = (start_sec * 1000) + start_nsec / 1000000;
2006
2007 return end_msecs - start_msecs;
2008 }
2009
2010
2011
2012 uint32_t compressor_eval_period_in_msecs = 250;
2013 uint32_t compressor_sample_min_in_msecs = 500;
2014 uint32_t compressor_sample_max_in_msecs = 10000;
2015 uint32_t compressor_thrashing_threshold_per_10msecs = 50;
2016 uint32_t compressor_thrashing_min_per_10msecs = 20;
2017
2018 /* When true, reset sample data next chance we get. */
2019 static boolean_t compressor_need_sample_reset = FALSE;
2020
2021
2022 void
2023 compute_swapout_target_age(void)
2024 {
2025 clock_sec_t cur_ts_sec;
2026 clock_nsec_t cur_ts_nsec;
2027 uint32_t min_operations_needed_in_this_sample;
2028 uint64_t elapsed_msecs_in_eval;
2029 uint64_t elapsed_msecs_in_sample;
2030 boolean_t need_eval_reset = FALSE;
2031
2032 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
2033
2034 elapsed_msecs_in_sample = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_sample_period_sec, start_of_sample_period_nsec);
2035
2036 if (compressor_need_sample_reset ||
2037 elapsed_msecs_in_sample >= compressor_sample_max_in_msecs) {
2038 compressor_need_sample_reset = TRUE;
2039 need_eval_reset = TRUE;
2040 goto done;
2041 }
2042 elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec);
2043
2044 if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) {
2045 goto done;
2046 }
2047 need_eval_reset = TRUE;
2048
2049 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0);
2050
2051 min_operations_needed_in_this_sample = (compressor_thrashing_min_per_10msecs * (uint32_t)elapsed_msecs_in_eval) / 10;
2052
2053 if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample ||
2054 (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) {
2055 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count,
2056 sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0);
2057
2058 swapout_target_age = 0;
2059
2060 compressor_need_sample_reset = TRUE;
2061 need_eval_reset = TRUE;
2062 goto done;
2063 }
2064 last_eval_compression_count = sample_period_compression_count;
2065 last_eval_decompression_count = sample_period_decompression_count;
2066
2067 if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) {
2068 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0);
2069 goto done;
2070 }
2071 if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) {
2072 uint64_t running_total;
2073 uint64_t working_target;
2074 uint64_t aging_target;
2075 uint32_t oldest_age_of_csegs_sampled = 0;
2076 uint64_t working_set_approximation = 0;
2077
2078 swapout_target_age = 0;
2079
2080 working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */
2081 aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */
2082 running_total = 0;
2083
2084 for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) {
2085 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2086
2087 working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2088
2089 if (running_total >= working_target) {
2090 break;
2091 }
2092 }
2093 if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) {
2094 working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample;
2095
2096 if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) {
2097 running_total = overage_decompressions_during_sample_period;
2098
2099 for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) {
2100 running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled];
2101
2102 if (running_total >= aging_target) {
2103 break;
2104 }
2105 }
2106 swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled;
2107
2108 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 2, 0);
2109 } else {
2110 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0);
2111 }
2112 } else {
2113 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0);
2114 }
2115
2116 compressor_need_sample_reset = TRUE;
2117 need_eval_reset = TRUE;
2118 } else {
2119 KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0);
2120 }
2121 done:
2122 if (compressor_need_sample_reset == TRUE) {
2123 bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period));
2124 overage_decompressions_during_sample_period = 0;
2125
2126 start_of_sample_period_sec = cur_ts_sec;
2127 start_of_sample_period_nsec = cur_ts_nsec;
2128 sample_period_decompression_count = 0;
2129 sample_period_compression_count = 0;
2130 last_eval_decompression_count = 0;
2131 last_eval_compression_count = 0;
2132 compressor_need_sample_reset = FALSE;
2133 }
2134 if (need_eval_reset == TRUE) {
2135 start_of_eval_period_sec = cur_ts_sec;
2136 start_of_eval_period_nsec = cur_ts_nsec;
2137 }
2138 }
2139
2140
2141 int compaction_swapper_init_now = 0;
2142 int compaction_swapper_running = 0;
2143 int compaction_swapper_awakened = 0;
2144 int compaction_swapper_abort = 0;
2145
2146
2147 #if CONFIG_JETSAM
2148 boolean_t memorystatus_kill_on_VM_compressor_thrashing(boolean_t);
2149 boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t);
2150 boolean_t memorystatus_kill_on_FC_thrashing(boolean_t);
2151 int compressor_thrashing_induced_jetsam = 0;
2152 int filecache_thrashing_induced_jetsam = 0;
2153 static boolean_t vm_compressor_thrashing_detected = FALSE;
2154 #endif /* CONFIG_JETSAM */
2155
2156 static boolean_t
2157 compressor_needs_to_swap(void)
2158 {
2159 boolean_t should_swap = FALSE;
2160
2161 if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) {
2162 c_segment_t c_seg;
2163 clock_sec_t now;
2164 clock_sec_t age;
2165 clock_nsec_t nsec;
2166
2167 clock_get_system_nanotime(&now, &nsec);
2168 age = 0;
2169
2170 lck_mtx_lock_spin_always(c_list_lock);
2171
2172 if (!queue_empty(&c_age_list_head)) {
2173 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2174
2175 age = now - c_seg->c_creation_ts;
2176 }
2177 lck_mtx_unlock_always(c_list_lock);
2178
2179 if (age >= vm_ripe_target_age) {
2180 should_swap = TRUE;
2181 goto check_if_low_space;
2182 }
2183 }
2184 if (VM_CONFIG_SWAP_IS_ACTIVE) {
2185 if (COMPRESSOR_NEEDS_TO_SWAP()) {
2186 should_swap = TRUE;
2187 goto check_if_low_space;
2188 }
2189 if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) {
2190 should_swap = TRUE;
2191 goto check_if_low_space;
2192 }
2193 if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) {
2194 should_swap = TRUE;
2195 goto check_if_low_space;
2196 }
2197 }
2198
2199 #if (XNU_TARGET_OS_OSX && __arm64__)
2200 /*
2201 * Thrashing detection disabled.
2202 */
2203 #else /* (XNU_TARGET_OS_OSX && __arm64__) */
2204
2205 compute_swapout_target_age();
2206
2207 if (swapout_target_age) {
2208 c_segment_t c_seg;
2209
2210 lck_mtx_lock_spin_always(c_list_lock);
2211
2212 if (!queue_empty(&c_age_list_head)) {
2213 c_seg = (c_segment_t) queue_first(&c_age_list_head);
2214
2215 if (c_seg->c_creation_ts > swapout_target_age) {
2216 swapout_target_age = 0;
2217 }
2218 }
2219 lck_mtx_unlock_always(c_list_lock);
2220 }
2221 #if CONFIG_PHANTOM_CACHE
2222 if (vm_phantom_cache_check_pressure()) {
2223 should_swap = TRUE;
2224 }
2225 #endif
2226 if (swapout_target_age) {
2227 should_swap = TRUE;
2228 }
2229 #endif /* (XNU_TARGET_OS_OSX && __arm64__) */
2230
2231 check_if_low_space:
2232
2233 #if CONFIG_JETSAM
2234 if (should_swap || vm_compressor_low_on_space() == TRUE) {
2235 if (vm_compressor_thrashing_detected == FALSE) {
2236 vm_compressor_thrashing_detected = TRUE;
2237
2238 if (swapout_target_age) {
2239 /* The compressor is thrashing. */
2240 memorystatus_kill_on_VM_compressor_thrashing(TRUE /* async */);
2241 compressor_thrashing_induced_jetsam++;
2242 } else if (vm_compressor_low_on_space() == TRUE) {
2243 /* The compressor is running low on space. */
2244 memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */);
2245 compressor_thrashing_induced_jetsam++;
2246 } else {
2247 memorystatus_kill_on_FC_thrashing(TRUE /* async */);
2248 filecache_thrashing_induced_jetsam++;
2249 }
2250 }
2251 /*
2252 * let the jetsam take precedence over
2253 * any major compactions we might have
2254 * been able to do... otherwise we run
2255 * the risk of doing major compactions
2256 * on segments we're about to free up
2257 * due to the jetsam activity.
2258 */
2259 should_swap = FALSE;
2260 }
2261
2262 #else /* CONFIG_JETSAM */
2263 if (should_swap && vm_swap_low_on_space()) {
2264 vm_compressor_take_paging_space_action();
2265 }
2266 #endif /* CONFIG_JETSAM */
2267
2268 if (should_swap == FALSE) {
2269 /*
2270 * vm_compressor_needs_to_major_compact returns true only if we're
2271 * about to run out of available compressor segments... in this
2272 * case, we absolutely need to run a major compaction even if
2273 * we've just kicked off a jetsam or we don't otherwise need to
2274 * swap... terminating objects releases
2275 * pages back to the uncompressed cache, but does not guarantee
2276 * that we will free up even a single compression segment
2277 */
2278 should_swap = vm_compressor_needs_to_major_compact();
2279 }
2280
2281 /*
2282 * returning TRUE when swap_supported == FALSE
2283 * will cause the major compaction engine to
2284 * run, but will not trigger any swapping...
2285 * segments that have been major compacted
2286 * will be moved to the majorcompact queue
2287 */
2288 return should_swap;
2289 }
2290
2291 #if CONFIG_JETSAM
2292 /*
2293 * This function is called from the jetsam thread after killing something to
2294 * mitigate thrashing.
2295 *
2296 * We need to restart our thrashing detection heuristics since memory pressure
2297 * has potentially changed significantly, and we don't want to detect on old
2298 * data from before the jetsam.
2299 */
2300 void
2301 vm_thrashing_jetsam_done(void)
2302 {
2303 vm_compressor_thrashing_detected = FALSE;
2304
2305 /* Were we compressor-thrashing or filecache-thrashing? */
2306 if (swapout_target_age) {
2307 swapout_target_age = 0;
2308 compressor_need_sample_reset = TRUE;
2309 }
2310 #if CONFIG_PHANTOM_CACHE
2311 else {
2312 vm_phantom_cache_restart_sample();
2313 }
2314 #endif
2315 }
2316 #endif /* CONFIG_JETSAM */
2317
2318 uint32_t vm_wake_compactor_swapper_calls = 0;
2319 uint32_t vm_run_compactor_already_running = 0;
2320 uint32_t vm_run_compactor_empty_minor_q = 0;
2321 uint32_t vm_run_compactor_did_compact = 0;
2322 uint32_t vm_run_compactor_waited = 0;
2323
2324 void
2325 vm_run_compactor(void)
2326 {
2327 if (c_segment_count == 0) {
2328 return;
2329 }
2330
2331 lck_mtx_lock_spin_always(c_list_lock);
2332
2333 if (c_minor_count == 0) {
2334 vm_run_compactor_empty_minor_q++;
2335
2336 lck_mtx_unlock_always(c_list_lock);
2337 return;
2338 }
2339 if (compaction_swapper_running) {
2340 if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
2341 vm_run_compactor_already_running++;
2342
2343 lck_mtx_unlock_always(c_list_lock);
2344 return;
2345 }
2346 vm_run_compactor_waited++;
2347
2348 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2349
2350 lck_mtx_unlock_always(c_list_lock);
2351
2352 thread_block(THREAD_CONTINUE_NULL);
2353
2354 return;
2355 }
2356 vm_run_compactor_did_compact++;
2357
2358 fastwake_warmup = FALSE;
2359 compaction_swapper_running = 1;
2360
2361 vm_compressor_do_delayed_compactions(FALSE);
2362
2363 compaction_swapper_running = 0;
2364
2365 lck_mtx_unlock_always(c_list_lock);
2366
2367 thread_wakeup((event_t)&compaction_swapper_running);
2368 }
2369
2370
2371 void
2372 vm_wake_compactor_swapper(void)
2373 {
2374 if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) {
2375 return;
2376 }
2377
2378 if (c_minor_count || vm_compressor_needs_to_major_compact()) {
2379 lck_mtx_lock_spin_always(c_list_lock);
2380
2381 fastwake_warmup = FALSE;
2382
2383 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2384 vm_wake_compactor_swapper_calls++;
2385
2386 compaction_swapper_awakened = 1;
2387 thread_wakeup((event_t)&c_compressor_swap_trigger);
2388 }
2389 lck_mtx_unlock_always(c_list_lock);
2390 }
2391 }
2392
2393
2394 void
2395 vm_consider_swapping()
2396 {
2397 c_segment_t c_seg, c_seg_next;
2398 clock_sec_t now;
2399 clock_nsec_t nsec;
2400
2401 assert(VM_CONFIG_SWAP_IS_PRESENT);
2402
2403 lck_mtx_lock_spin_always(c_list_lock);
2404
2405 compaction_swapper_abort = 1;
2406
2407 while (compaction_swapper_running) {
2408 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2409
2410 lck_mtx_unlock_always(c_list_lock);
2411
2412 thread_block(THREAD_CONTINUE_NULL);
2413
2414 lck_mtx_lock_spin_always(c_list_lock);
2415 }
2416 compaction_swapper_abort = 0;
2417 compaction_swapper_running = 1;
2418
2419 vm_swapout_ripe_segments = TRUE;
2420
2421 if (!queue_empty(&c_major_list_head)) {
2422 clock_get_system_nanotime(&now, &nsec);
2423
2424 c_seg = (c_segment_t)queue_first(&c_major_list_head);
2425
2426 while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) {
2427 if (c_overage_swapped_count >= c_overage_swapped_limit) {
2428 break;
2429 }
2430
2431 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
2432
2433 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
2434 lck_mtx_lock_spin_always(&c_seg->c_lock);
2435
2436 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2437
2438 lck_mtx_unlock_always(&c_seg->c_lock);
2439 }
2440 c_seg = c_seg_next;
2441 }
2442 }
2443 vm_compressor_compact_and_swap(FALSE);
2444
2445 compaction_swapper_running = 0;
2446
2447 vm_swapout_ripe_segments = FALSE;
2448
2449 lck_mtx_unlock_always(c_list_lock);
2450
2451 thread_wakeup((event_t)&compaction_swapper_running);
2452 }
2453
2454
2455 void
2456 vm_consider_waking_compactor_swapper(void)
2457 {
2458 boolean_t need_wakeup = FALSE;
2459
2460 if (c_segment_count == 0) {
2461 return;
2462 }
2463
2464 if (compaction_swapper_running || compaction_swapper_awakened) {
2465 return;
2466 }
2467
2468 if (!compaction_swapper_inited && !compaction_swapper_init_now) {
2469 compaction_swapper_init_now = 1;
2470 need_wakeup = TRUE;
2471 }
2472
2473 if (c_minor_count && (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) {
2474 need_wakeup = TRUE;
2475 } else if (compressor_needs_to_swap()) {
2476 need_wakeup = TRUE;
2477 } else if (c_minor_count) {
2478 uint64_t total_bytes;
2479
2480 total_bytes = compressor_object->resident_page_count * PAGE_SIZE_64;
2481
2482 if ((total_bytes - compressor_bytes_used) > total_bytes / 10) {
2483 need_wakeup = TRUE;
2484 }
2485 }
2486 if (need_wakeup == TRUE) {
2487 lck_mtx_lock_spin_always(c_list_lock);
2488
2489 fastwake_warmup = FALSE;
2490
2491 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2492 memoryshot(VM_WAKEUP_COMPACTOR_SWAPPER, DBG_FUNC_NONE);
2493
2494 compaction_swapper_awakened = 1;
2495 thread_wakeup((event_t)&c_compressor_swap_trigger);
2496 }
2497 lck_mtx_unlock_always(c_list_lock);
2498 }
2499 }
2500
2501
2502 #define C_SWAPOUT_LIMIT 4
2503 #define DELAYED_COMPACTIONS_PER_PASS 30
2504
2505 void
2506 vm_compressor_do_delayed_compactions(boolean_t flush_all)
2507 {
2508 c_segment_t c_seg;
2509 int number_compacted = 0;
2510 boolean_t needs_to_swap = FALSE;
2511
2512
2513 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_START, c_minor_count, flush_all, 0, 0);
2514
2515 #if XNU_TARGET_OS_OSX
2516 LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED);
2517 #endif /* XNU_TARGET_OS_OSX */
2518
2519 while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) {
2520 c_seg = (c_segment_t)queue_first(&c_minor_list_head);
2521
2522 lck_mtx_lock_spin_always(&c_seg->c_lock);
2523
2524 if (c_seg->c_busy) {
2525 lck_mtx_unlock_always(c_list_lock);
2526 c_seg_wait_on_busy(c_seg);
2527 lck_mtx_lock_spin_always(c_list_lock);
2528
2529 continue;
2530 }
2531 C_SEG_BUSY(c_seg);
2532
2533 c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
2534
2535 if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) {
2536 if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT) {
2537 needs_to_swap = TRUE;
2538 }
2539
2540 number_compacted = 0;
2541 }
2542 lck_mtx_lock_spin_always(c_list_lock);
2543 }
2544
2545 VM_DEBUG_CONSTANT_EVENT(vm_compressor_do_delayed_compactions, VM_COMPRESSOR_DO_DELAYED_COMPACTIONS, DBG_FUNC_END, c_minor_count, number_compacted, needs_to_swap, 0);
2546 }
2547
2548
2549 #define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10
2550
2551 static void
2552 vm_compressor_age_swapped_in_segments(boolean_t flush_all)
2553 {
2554 c_segment_t c_seg;
2555 clock_sec_t now;
2556 clock_nsec_t nsec;
2557
2558 clock_get_system_nanotime(&now, &nsec);
2559
2560 while (!queue_empty(&c_swappedin_list_head)) {
2561 c_seg = (c_segment_t)queue_first(&c_swappedin_list_head);
2562
2563 if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) {
2564 break;
2565 }
2566
2567 lck_mtx_lock_spin_always(&c_seg->c_lock);
2568
2569 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
2570
2571 lck_mtx_unlock_always(&c_seg->c_lock);
2572 }
2573 }
2574
2575
2576 extern int vm_num_swap_files;
2577 extern int vm_num_pinned_swap_files;
2578 extern int vm_swappin_enabled;
2579
2580 extern unsigned int vm_swapfile_total_segs_used;
2581 extern unsigned int vm_swapfile_total_segs_alloced;
2582
2583
2584 void
2585 vm_compressor_flush(void)
2586 {
2587 uint64_t vm_swap_put_failures_at_start;
2588 wait_result_t wait_result = 0;
2589 AbsoluteTime startTime, endTime;
2590 clock_sec_t now_sec;
2591 clock_nsec_t now_nsec;
2592 uint64_t nsec;
2593
2594 HIBLOG("vm_compressor_flush - starting\n");
2595
2596 clock_get_uptime(&startTime);
2597
2598 lck_mtx_lock_spin_always(c_list_lock);
2599
2600 fastwake_warmup = FALSE;
2601 compaction_swapper_abort = 1;
2602
2603 while (compaction_swapper_running) {
2604 assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT);
2605
2606 lck_mtx_unlock_always(c_list_lock);
2607
2608 thread_block(THREAD_CONTINUE_NULL);
2609
2610 lck_mtx_lock_spin_always(c_list_lock);
2611 }
2612 compaction_swapper_abort = 0;
2613 compaction_swapper_running = 1;
2614
2615 hibernate_flushing = TRUE;
2616 hibernate_no_swapspace = FALSE;
2617 c_generation_id_flush_barrier = c_generation_id + 1000;
2618
2619 clock_get_system_nanotime(&now_sec, &now_nsec);
2620 hibernate_flushing_deadline = now_sec + HIBERNATE_FLUSHING_SECS_TO_COMPLETE;
2621
2622 vm_swap_put_failures_at_start = vm_swap_put_failures;
2623
2624 vm_compressor_compact_and_swap(TRUE);
2625
2626 while (!queue_empty(&c_swapout_list_head)) {
2627 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
2628
2629 lck_mtx_unlock_always(c_list_lock);
2630
2631 wait_result = thread_block(THREAD_CONTINUE_NULL);
2632
2633 lck_mtx_lock_spin_always(c_list_lock);
2634
2635 if (wait_result == THREAD_TIMED_OUT) {
2636 break;
2637 }
2638 }
2639 hibernate_flushing = FALSE;
2640 compaction_swapper_running = 0;
2641
2642 if (vm_swap_put_failures > vm_swap_put_failures_at_start) {
2643 HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n",
2644 vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT);
2645 }
2646
2647 lck_mtx_unlock_always(c_list_lock);
2648
2649 thread_wakeup((event_t)&compaction_swapper_running);
2650
2651 clock_get_uptime(&endTime);
2652 SUB_ABSOLUTETIME(&endTime, &startTime);
2653 absolutetime_to_nanoseconds(endTime, &nsec);
2654
2655 HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n",
2656 nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled);
2657 }
2658
2659
2660 int compaction_swap_trigger_thread_awakened = 0;
2661
2662 static void
2663 vm_compressor_swap_trigger_thread(void)
2664 {
2665 current_thread()->options |= TH_OPT_VMPRIV;
2666
2667 /*
2668 * compaction_swapper_init_now is set when the first call to
2669 * vm_consider_waking_compactor_swapper is made from
2670 * vm_pageout_scan... since this function is called upon
2671 * thread creation, we want to make sure to delay adjusting
2672 * the tuneables until we are awakened via vm_pageout_scan
2673 * so that we are at a point where the vm_swapfile_open will
2674 * be operating on the correct directory (in case the default
2675 * of using the VM volume is overridden by the dynamic_pager)
2676 */
2677 if (compaction_swapper_init_now) {
2678 vm_compaction_swapper_do_init();
2679
2680 if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
2681 thread_vm_bind_group_add();
2682 }
2683 #if CONFIG_THREAD_GROUPS
2684 thread_group_vm_add();
2685 #endif
2686 thread_set_thread_name(current_thread(), "VM_cswap_trigger");
2687 compaction_swapper_init_now = 0;
2688 }
2689 lck_mtx_lock_spin_always(c_list_lock);
2690
2691 compaction_swap_trigger_thread_awakened++;
2692 compaction_swapper_awakened = 0;
2693
2694 if (compaction_swapper_running == 0) {
2695 compaction_swapper_running = 1;
2696
2697 vm_compressor_compact_and_swap(FALSE);
2698
2699 compaction_swapper_running = 0;
2700 }
2701 assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT);
2702
2703 if (compaction_swapper_running == 0) {
2704 thread_wakeup((event_t)&compaction_swapper_running);
2705 }
2706
2707 lck_mtx_unlock_always(c_list_lock);
2708
2709 thread_block((thread_continue_t)vm_compressor_swap_trigger_thread);
2710
2711 /* NOTREACHED */
2712 }
2713
2714
2715 void
2716 vm_compressor_record_warmup_start(void)
2717 {
2718 c_segment_t c_seg;
2719
2720 lck_mtx_lock_spin_always(c_list_lock);
2721
2722 if (first_c_segment_to_warm_generation_id == 0) {
2723 if (!queue_empty(&c_age_list_head)) {
2724 c_seg = (c_segment_t)queue_last(&c_age_list_head);
2725
2726 first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2727 } else {
2728 first_c_segment_to_warm_generation_id = 0;
2729 }
2730
2731 fastwake_recording_in_progress = TRUE;
2732 }
2733 lck_mtx_unlock_always(c_list_lock);
2734 }
2735
2736
2737 void
2738 vm_compressor_record_warmup_end(void)
2739 {
2740 c_segment_t c_seg;
2741
2742 lck_mtx_lock_spin_always(c_list_lock);
2743
2744 if (fastwake_recording_in_progress == TRUE) {
2745 if (!queue_empty(&c_age_list_head)) {
2746 c_seg = (c_segment_t)queue_last(&c_age_list_head);
2747
2748 last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
2749 } else {
2750 last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
2751 }
2752
2753 fastwake_recording_in_progress = FALSE;
2754
2755 HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2756 }
2757 lck_mtx_unlock_always(c_list_lock);
2758 }
2759
2760
2761 #define DELAY_TRIM_ON_WAKE_SECS 25
2762
2763 void
2764 vm_compressor_delay_trim(void)
2765 {
2766 clock_sec_t sec;
2767 clock_nsec_t nsec;
2768
2769 clock_get_system_nanotime(&sec, &nsec);
2770 dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS;
2771 }
2772
2773
2774 void
2775 vm_compressor_do_warmup(void)
2776 {
2777 lck_mtx_lock_spin_always(c_list_lock);
2778
2779 if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
2780 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
2781
2782 lck_mtx_unlock_always(c_list_lock);
2783 return;
2784 }
2785
2786 if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) {
2787 fastwake_warmup = TRUE;
2788
2789 compaction_swapper_awakened = 1;
2790 thread_wakeup((event_t)&c_compressor_swap_trigger);
2791 }
2792 lck_mtx_unlock_always(c_list_lock);
2793 }
2794
2795 void
2796 do_fastwake_warmup_all(void)
2797 {
2798 lck_mtx_lock_spin_always(c_list_lock);
2799
2800 if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
2801 lck_mtx_unlock_always(c_list_lock);
2802 return;
2803 }
2804
2805 fastwake_warmup = TRUE;
2806
2807 do_fastwake_warmup(&c_swappedout_list_head, TRUE);
2808
2809 do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
2810
2811 fastwake_warmup = FALSE;
2812
2813 lck_mtx_unlock_always(c_list_lock);
2814 }
2815
2816 void
2817 do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
2818 {
2819 c_segment_t c_seg = NULL;
2820 AbsoluteTime startTime, endTime;
2821 uint64_t nsec;
2822
2823
2824 HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
2825
2826 clock_get_uptime(&startTime);
2827
2828 lck_mtx_unlock_always(c_list_lock);
2829
2830 proc_set_thread_policy(current_thread(),
2831 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2);
2832
2833 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2834
2835 lck_mtx_lock_spin_always(c_list_lock);
2836
2837 while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
2838 c_seg = (c_segment_t) queue_first(c_queue);
2839
2840 if (consider_all_cseg == FALSE) {
2841 if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
2842 c_seg->c_generation_id > last_c_segment_to_warm_generation_id) {
2843 break;
2844 }
2845
2846 if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) {
2847 break;
2848 }
2849 }
2850
2851 lck_mtx_lock_spin_always(&c_seg->c_lock);
2852 lck_mtx_unlock_always(c_list_lock);
2853
2854 if (c_seg->c_busy) {
2855 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2856 c_seg_wait_on_busy(c_seg);
2857 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2858 } else {
2859 if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) {
2860 lck_mtx_unlock_always(&c_seg->c_lock);
2861 }
2862 c_segment_warmup_count++;
2863
2864 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2865 vm_pageout_io_throttle();
2866 PAGE_REPLACEMENT_DISALLOWED(TRUE);
2867 }
2868 lck_mtx_lock_spin_always(c_list_lock);
2869 }
2870 lck_mtx_unlock_always(c_list_lock);
2871
2872 PAGE_REPLACEMENT_DISALLOWED(FALSE);
2873
2874 proc_set_thread_policy(current_thread(),
2875 TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
2876
2877 clock_get_uptime(&endTime);
2878 SUB_ABSOLUTETIME(&endTime, &startTime);
2879 absolutetime_to_nanoseconds(endTime, &nsec);
2880
2881 HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
2882
2883 lck_mtx_lock_spin_always(c_list_lock);
2884
2885 if (consider_all_cseg == FALSE) {
2886 first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
2887 }
2888 }
2889
2890 int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS;
2891 extern bool vm_swapout_thread_running;
2892 extern boolean_t compressor_store_stop_compaction;
2893
2894 void
2895 vm_compressor_compact_and_swap(boolean_t flush_all)
2896 {
2897 c_segment_t c_seg, c_seg_next;
2898 boolean_t keep_compacting, switch_state;
2899 clock_sec_t now;
2900 clock_nsec_t nsec;
2901 mach_timespec_t start_ts, end_ts;
2902 unsigned int number_considered, wanted_cseg_found, yield_after_considered_per_pass, number_yields;
2903 uint64_t bytes_to_free, bytes_freed, delta_usec;
2904
2905 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_START, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
2906
2907 if (fastwake_warmup == TRUE) {
2908 uint64_t starting_warmup_count;
2909
2910 starting_warmup_count = c_segment_warmup_count;
2911
2912 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
2913 first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
2914 do_fastwake_warmup(&c_swappedout_list_head, FALSE);
2915 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
2916
2917 fastwake_warmup = FALSE;
2918 }
2919
2920 /*
2921 * it's possible for the c_age_list_head to be empty if we
2922 * hit our limits for growing the compressor pool and we subsequently
2923 * hibernated... on the next hibernation we could see the queue as
2924 * empty and not proceeed even though we have a bunch of segments on
2925 * the swapped in queue that need to be dealt with.
2926 */
2927 vm_compressor_do_delayed_compactions(flush_all);
2928
2929 vm_compressor_age_swapped_in_segments(flush_all);
2930
2931 /*
2932 * we only need to grab the timestamp once per
2933 * invocation of this function since the
2934 * timescale we're interested in is measured
2935 * in days
2936 */
2937 clock_get_system_nanotime(&now, &nsec);
2938
2939 start_ts.tv_sec = (int) now;
2940 start_ts.tv_nsec = nsec;
2941 delta_usec = 0;
2942 number_considered = 0;
2943 wanted_cseg_found = 0;
2944 number_yields = 0;
2945 bytes_to_free = 0;
2946 bytes_freed = 0;
2947 yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS);
2948
2949 while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) {
2950 if (hibernate_flushing == TRUE) {
2951 clock_sec_t sec;
2952
2953 if (hibernate_should_abort()) {
2954 HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n");
2955 break;
2956 }
2957 if (hibernate_no_swapspace == TRUE) {
2958 HIBLOG("vm_compressor_flush - out of swap space\n");
2959 break;
2960 }
2961 if (vm_swap_files_pinned() == FALSE) {
2962 HIBLOG("vm_compressor_flush - unpinned swap files\n");
2963 break;
2964 }
2965 if (hibernate_in_progress_with_pinned_swap == TRUE &&
2966 (vm_swapfile_total_segs_alloced == vm_swapfile_total_segs_used)) {
2967 HIBLOG("vm_compressor_flush - out of pinned swap space\n");
2968 break;
2969 }
2970 clock_get_system_nanotime(&sec, &nsec);
2971
2972 if (sec > hibernate_flushing_deadline) {
2973 HIBLOG("vm_compressor_flush - failed to finish before deadline\n");
2974 break;
2975 }
2976 }
2977 if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
2978 assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC);
2979
2980 if (!vm_swapout_thread_running) {
2981 thread_wakeup((event_t)&c_swapout_list_head);
2982 }
2983
2984 lck_mtx_unlock_always(c_list_lock);
2985
2986 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0);
2987
2988 thread_block(THREAD_CONTINUE_NULL);
2989
2990 lck_mtx_lock_spin_always(c_list_lock);
2991 }
2992 /*
2993 * Minor compactions
2994 */
2995 vm_compressor_do_delayed_compactions(flush_all);
2996
2997 vm_compressor_age_swapped_in_segments(flush_all);
2998
2999 if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) {
3000 /*
3001 * we timed out on the above thread_block
3002 * let's loop around and try again
3003 * the timeout allows us to continue
3004 * to do minor compactions to make
3005 * more memory available
3006 */
3007 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 2, c_swapout_count, 0, 0);
3008
3009 continue;
3010 }
3011
3012 /*
3013 * Swap out segments?
3014 */
3015 if (flush_all == FALSE) {
3016 boolean_t needs_to_swap;
3017
3018 lck_mtx_unlock_always(c_list_lock);
3019
3020 needs_to_swap = compressor_needs_to_swap();
3021
3022 lck_mtx_lock_spin_always(c_list_lock);
3023
3024 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0);
3025
3026 if (needs_to_swap == FALSE) {
3027 break;
3028 }
3029 }
3030 if (queue_empty(&c_age_list_head)) {
3031 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 4, c_age_count, 0, 0);
3032 break;
3033 }
3034 c_seg = (c_segment_t) queue_first(&c_age_list_head);
3035
3036 assert(c_seg->c_state == C_ON_AGE_Q);
3037
3038 if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) {
3039 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 5, 0, 0, 0);
3040 break;
3041 }
3042
3043 lck_mtx_lock_spin_always(&c_seg->c_lock);
3044
3045 if (c_seg->c_busy) {
3046 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 6, (void*) VM_KERNEL_ADDRPERM(c_seg), 0, 0);
3047
3048 lck_mtx_unlock_always(c_list_lock);
3049 c_seg_wait_on_busy(c_seg);
3050 lck_mtx_lock_spin_always(c_list_lock);
3051
3052 continue;
3053 }
3054 C_SEG_BUSY(c_seg);
3055
3056 if (c_seg_do_minor_compaction_and_unlock(c_seg, FALSE, TRUE, TRUE)) {
3057 /*
3058 * found an empty c_segment and freed it
3059 * so go grab the next guy in the queue
3060 */
3061 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 7, 0, 0, 0);
3062 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3063 continue;
3064 }
3065 /*
3066 * Major compaction
3067 */
3068 keep_compacting = TRUE;
3069 switch_state = TRUE;
3070
3071 while (keep_compacting == TRUE) {
3072 assert(c_seg->c_busy);
3073
3074 /* look for another segment to consolidate */
3075
3076 c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list);
3077
3078 if (queue_end(&c_age_list_head, (queue_entry_t)c_seg_next)) {
3079 break;
3080 }
3081
3082 assert(c_seg_next->c_state == C_ON_AGE_Q);
3083
3084 number_considered++;
3085
3086 if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) {
3087 break;
3088 }
3089
3090 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3091
3092 if (c_seg_next->c_busy) {
3093 /*
3094 * We are going to block for our neighbor.
3095 * If our c_seg is wanted, we should unbusy
3096 * it because we don't know how long we might
3097 * have to block here.
3098 */
3099 if (c_seg->c_wanted) {
3100 lck_mtx_unlock_always(&c_seg_next->c_lock);
3101 switch_state = FALSE;
3102 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3103 wanted_cseg_found++;
3104 break;
3105 }
3106
3107 lck_mtx_unlock_always(c_list_lock);
3108
3109 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 8, (void*) VM_KERNEL_ADDRPERM(c_seg_next), 0, 0);
3110
3111 c_seg_wait_on_busy(c_seg_next);
3112 lck_mtx_lock_spin_always(c_list_lock);
3113
3114 continue;
3115 }
3116 /* grab that segment */
3117 C_SEG_BUSY(c_seg_next);
3118
3119 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3120 if (c_seg_do_minor_compaction_and_unlock(c_seg_next, FALSE, TRUE, TRUE)) {
3121 /*
3122 * found an empty c_segment and freed it
3123 * so we can't continue to use c_seg_next
3124 */
3125 bytes_freed += bytes_to_free;
3126 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3127 continue;
3128 }
3129
3130 /* unlock the list ... */
3131 lck_mtx_unlock_always(c_list_lock);
3132
3133 /* do the major compaction */
3134
3135 keep_compacting = c_seg_major_compact(c_seg, c_seg_next);
3136
3137 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 9, keep_compacting, 0, 0);
3138
3139 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3140
3141 lck_mtx_lock_spin_always(&c_seg_next->c_lock);
3142 /*
3143 * run a minor compaction on the donor segment
3144 * since we pulled at least some of it's
3145 * data into our target... if we've emptied
3146 * it, now is a good time to free it which
3147 * c_seg_minor_compaction_and_unlock also takes care of
3148 *
3149 * by passing TRUE, we ask for c_busy to be cleared
3150 * and c_wanted to be taken care of
3151 */
3152 bytes_to_free = C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3153 if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) {
3154 bytes_freed += bytes_to_free;
3155 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_freed_segs++;
3156 } else {
3157 bytes_to_free -= C_SEG_OFFSET_TO_BYTES(c_seg_next->c_populated_offset);
3158 bytes_freed += bytes_to_free;
3159 }
3160
3161 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3162
3163 /* relock the list */
3164 lck_mtx_lock_spin_always(c_list_lock);
3165
3166 if (c_seg->c_wanted) {
3167 /*
3168 * Our c_seg is in demand. Let's
3169 * unbusy it and wakeup the waiters
3170 * instead of continuing the compaction
3171 * because we could be in this loop
3172 * for a while.
3173 */
3174 switch_state = FALSE;
3175 wanted_cseg_found++;
3176 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bailed_compactions++;
3177 break;
3178 }
3179 } /* major compaction */
3180
3181 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 10, number_considered, wanted_cseg_found, 0);
3182
3183 lck_mtx_lock_spin_always(&c_seg->c_lock);
3184
3185 assert(c_seg->c_busy);
3186 assert(!c_seg->c_on_minorcompact_q);
3187
3188 if (switch_state) {
3189 if (VM_CONFIG_SWAP_IS_ACTIVE) {
3190 /*
3191 * This mode of putting a generic c_seg on the swapout list is
3192 * only supported when we have general swapping enabled
3193 */
3194 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3195 } else {
3196 if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) {
3197 assert(VM_CONFIG_SWAP_IS_PRESENT);
3198 /*
3199 * we are running compressor sweeps with swap-behind
3200 * make sure the c_seg has aged enough before swapping it
3201 * out...
3202 */
3203 if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) {
3204 c_seg->c_overage_swap = TRUE;
3205 c_overage_swapped_count++;
3206 c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE);
3207 }
3208 }
3209 }
3210 if (c_seg->c_state == C_ON_AGE_Q) {
3211 /*
3212 * this c_seg didn't get moved to the swapout queue
3213 * so we need to move it out of the way...
3214 * we just did a major compaction on it so put it
3215 * on that queue
3216 */
3217 c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE);
3218 } else {
3219 c_seg_major_compact_stats[c_seg_major_compact_stats_now].wasted_space_in_swapouts += C_SEG_BUFSIZE - c_seg->c_bytes_used;
3220 c_seg_major_compact_stats[c_seg_major_compact_stats_now].count_of_swapouts++;
3221 }
3222 }
3223
3224 C_SEG_WAKEUP_DONE(c_seg);
3225
3226 lck_mtx_unlock_always(&c_seg->c_lock);
3227
3228 if (c_swapout_count) {
3229 /*
3230 * We don't pause/yield here because we will either
3231 * yield below or at the top of the loop with the
3232 * assert_wait_timeout.
3233 */
3234 if (!vm_swapout_thread_running) {
3235 thread_wakeup((event_t)&c_swapout_list_head);
3236 }
3237 }
3238
3239 if (number_considered >= yield_after_considered_per_pass) {
3240 if (wanted_cseg_found) {
3241 /*
3242 * We stopped major compactions on a c_seg
3243 * that is wanted. We don't know the priority
3244 * of the waiter unfortunately but we are at
3245 * a very high priority and so, just in case
3246 * the waiter is a critical system daemon or
3247 * UI thread, let's give up the CPU in case
3248 * the system is running a few CPU intensive
3249 * tasks.
3250 */
3251 lck_mtx_unlock_always(c_list_lock);
3252
3253 mutex_pause(2); /* 100us yield */
3254
3255 number_yields++;
3256
3257 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 11, number_considered, number_yields, 0);
3258
3259 lck_mtx_lock_spin_always(c_list_lock);
3260 }
3261
3262 number_considered = 0;
3263 wanted_cseg_found = 0;
3264 }
3265 }
3266 clock_get_system_nanotime(&now, &nsec);
3267 end_ts.tv_sec = (int) now;
3268 end_ts.tv_nsec = nsec;
3269
3270 SUB_MACH_TIMESPEC(&end_ts, &start_ts);
3271
3272 delta_usec = (end_ts.tv_sec * USEC_PER_SEC) + (end_ts.tv_nsec / NSEC_PER_USEC) - (number_yields * 100);
3273
3274 delta_usec = MAX(1, delta_usec); /* we could have 0 usec run if conditions weren't right */
3275
3276 c_seg_major_compact_stats[c_seg_major_compact_stats_now].bytes_freed_rate_us = (bytes_freed / delta_usec);
3277
3278 if ((c_seg_major_compact_stats_now + 1) == C_SEG_MAJOR_COMPACT_STATS_MAX) {
3279 c_seg_major_compact_stats_now = 0;
3280 } else {
3281 c_seg_major_compact_stats_now++;
3282 }
3283
3284 assert(c_seg_major_compact_stats_now < C_SEG_MAJOR_COMPACT_STATS_MAX);
3285
3286 VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_END, c_age_count, c_minor_count, c_major_count, vm_page_free_count);
3287 }
3288
3289
3290 static c_segment_t
3291 c_seg_allocate(c_segment_t *current_chead)
3292 {
3293 c_segment_t c_seg;
3294 int min_needed;
3295 int size_to_populate;
3296
3297 #if XNU_TARGET_OS_OSX
3298 if (vm_compressor_low_on_space()) {
3299 vm_compressor_take_paging_space_action();
3300 }
3301 #endif /* XNU_TARGET_OS_OSX */
3302
3303 if ((c_seg = *current_chead) == NULL) {
3304 uint32_t c_segno;
3305
3306 lck_mtx_lock_spin_always(c_list_lock);
3307
3308 while (c_segments_busy == TRUE) {
3309 assert_wait((event_t) (&c_segments_busy), THREAD_UNINT);
3310
3311 lck_mtx_unlock_always(c_list_lock);
3312
3313 thread_block(THREAD_CONTINUE_NULL);
3314
3315 lck_mtx_lock_spin_always(c_list_lock);
3316 }
3317 if (c_free_segno_head == (uint32_t)-1) {
3318 uint32_t c_segments_available_new;
3319 uint32_t compressed_pages;
3320
3321 #if CONFIG_FREEZE
3322 if (freezer_incore_cseg_acct) {
3323 compressed_pages = c_segment_pages_compressed_incore;
3324 } else {
3325 compressed_pages = c_segment_pages_compressed;
3326 }
3327 #else
3328 compressed_pages = c_segment_pages_compressed;
3329 #endif /* CONFIG_FREEZE */
3330
3331 if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) {
3332 lck_mtx_unlock_always(c_list_lock);
3333
3334 return NULL;
3335 }
3336 c_segments_busy = TRUE;
3337 lck_mtx_unlock_always(c_list_lock);
3338
3339 kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page,
3340 PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR);
3341 c_segments_next_page += PAGE_SIZE;
3342
3343 c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE;
3344
3345 if (c_segments_available_new > c_segments_limit) {
3346 c_segments_available_new = c_segments_limit;
3347 }
3348
3349 for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) {
3350 c_segments[c_segno - 1].c_segno = c_segno;
3351 }
3352
3353 lck_mtx_lock_spin_always(c_list_lock);
3354
3355 c_segments[c_segno - 1].c_segno = c_free_segno_head;
3356 c_free_segno_head = c_segments_available;
3357 c_segments_available = c_segments_available_new;
3358
3359 c_segments_busy = FALSE;
3360 thread_wakeup((event_t) (&c_segments_busy));
3361 }
3362 c_segno = c_free_segno_head;
3363 assert(c_segno >= 0 && c_segno < c_segments_limit);
3364
3365 c_free_segno_head = (uint32_t)c_segments[c_segno].c_segno;
3366
3367 /*
3368 * do the rest of the bookkeeping now while we're still behind
3369 * the list lock and grab our generation id now into a local
3370 * so that we can install it once we have the c_seg allocated
3371 */
3372 c_segment_count++;
3373 if (c_segment_count > c_segment_count_max) {
3374 c_segment_count_max = c_segment_count;
3375 }
3376
3377 lck_mtx_unlock_always(c_list_lock);
3378
3379 c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO);
3380
3381 c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno);
3382
3383 lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL);
3384
3385 c_seg->c_state = C_IS_EMPTY;
3386 c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX;
3387 c_seg->c_mysegno = c_segno;
3388
3389 lck_mtx_lock_spin_always(c_list_lock);
3390 c_empty_count++;
3391 c_seg_switch_state(c_seg, C_IS_FILLING, FALSE);
3392 c_segments[c_segno].c_seg = c_seg;
3393 assert(c_segments[c_segno].c_segno > c_segments_available);
3394 lck_mtx_unlock_always(c_list_lock);
3395
3396 *current_chead = c_seg;
3397
3398 #if DEVELOPMENT || DEBUG
3399 C_SEG_MAKE_WRITEABLE(c_seg);
3400 #endif
3401 }
3402 c_seg_alloc_nextslot(c_seg);
3403
3404 size_to_populate = C_SEG_ALLOCSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset);
3405
3406 if (size_to_populate) {
3407 min_needed = PAGE_SIZE + (C_SEG_ALLOCSIZE - C_SEG_BUFSIZE);
3408
3409 if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) {
3410 if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) {
3411 size_to_populate = C_SEG_MAX_POPULATE_SIZE;
3412 }
3413
3414 OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed);
3415
3416 kernel_memory_populate(compressor_map,
3417 (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset],
3418 size_to_populate,
3419 KMA_COMPRESSOR,
3420 VM_KERN_MEMORY_COMPRESSOR);
3421 } else {
3422 size_to_populate = 0;
3423 }
3424 }
3425 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3426
3427 lck_mtx_lock_spin_always(&c_seg->c_lock);
3428
3429 if (size_to_populate) {
3430 c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate);
3431 }
3432
3433 return c_seg;
3434 }
3435
3436 #if DEVELOPMENT || DEBUG
3437 #if CONFIG_FREEZE
3438 extern boolean_t memorystatus_freeze_to_memory;
3439 #endif /* CONFIG_FREEZE */
3440 #endif /* DEVELOPMENT || DEBUG */
3441
3442 static void
3443 c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead)
3444 {
3445 uint32_t unused_bytes;
3446 uint32_t offset_to_depopulate;
3447 int new_state = C_ON_AGE_Q;
3448 clock_sec_t sec;
3449 clock_nsec_t nsec;
3450 boolean_t head_insert = FALSE;
3451
3452 unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset));
3453
3454 if (unused_bytes) {
3455 offset_to_depopulate = C_SEG_BYTES_TO_OFFSET(round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset)));
3456
3457 /*
3458 * release the extra physical page(s) at the end of the segment
3459 */
3460 lck_mtx_unlock_always(&c_seg->c_lock);
3461
3462 kernel_memory_depopulate(
3463 compressor_map,
3464 (vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate],
3465 unused_bytes,
3466 KMA_COMPRESSOR,
3467 VM_KERN_MEMORY_COMPRESSOR);
3468
3469 lck_mtx_lock_spin_always(&c_seg->c_lock);
3470
3471 c_seg->c_populated_offset = offset_to_depopulate;
3472 }
3473 assert(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset) <= C_SEG_BUFSIZE);
3474
3475 #if DEVELOPMENT || DEBUG
3476 {
3477 boolean_t c_seg_was_busy = FALSE;
3478
3479 if (!c_seg->c_busy) {
3480 C_SEG_BUSY(c_seg);
3481 } else {
3482 c_seg_was_busy = TRUE;
3483 }
3484
3485 lck_mtx_unlock_always(&c_seg->c_lock);
3486
3487 C_SEG_WRITE_PROTECT(c_seg);
3488
3489 lck_mtx_lock_spin_always(&c_seg->c_lock);
3490
3491 if (c_seg_was_busy == FALSE) {
3492 C_SEG_WAKEUP_DONE(c_seg);
3493 }
3494 }
3495 #endif
3496
3497 #if CONFIG_FREEZE
3498 if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) &&
3499 VM_CONFIG_SWAP_IS_PRESENT &&
3500 VM_CONFIG_FREEZER_SWAP_IS_ACTIVE
3501 #if DEVELOPMENT || DEBUG
3502 && !memorystatus_freeze_to_memory
3503 #endif /* DEVELOPMENT || DEBUG */
3504 ) {
3505 new_state = C_ON_SWAPOUT_Q;
3506 }
3507 #endif /* CONFIG_FREEZE */
3508
3509 if (vm_darkwake_mode == TRUE) {
3510 new_state = C_ON_SWAPOUT_Q;
3511 head_insert = TRUE;
3512 }
3513
3514 clock_get_system_nanotime(&sec, &nsec);
3515 c_seg->c_creation_ts = (uint32_t)sec;
3516
3517 lck_mtx_lock_spin_always(c_list_lock);
3518
3519 c_seg->c_generation_id = c_generation_id++;
3520 c_seg_switch_state(c_seg, new_state, head_insert);
3521
3522 #if CONFIG_FREEZE
3523 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3524 /*
3525 * darkwake and freezer can't co-exist together
3526 * We'll need to fix this accounting as a start.
3527 */
3528 assert(vm_darkwake_mode == FALSE);
3529 c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task);
3530 freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used;
3531 }
3532 #endif /* CONFIG_FREEZE */
3533
3534 if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3535 #if CONFIG_FREEZE
3536 assert(c_seg->c_task_owner == NULL);
3537 #endif /* CONFIG_FREEZE */
3538 c_seg_need_delayed_compaction(c_seg, TRUE);
3539 }
3540
3541 lck_mtx_unlock_always(c_list_lock);
3542
3543 if (c_seg->c_state == C_ON_SWAPOUT_Q) {
3544 /*
3545 * Darkwake and Freeze configs always
3546 * wake up the swapout thread because
3547 * the compactor thread that normally handles
3548 * it may not be running as much in these
3549 * configs.
3550 */
3551 thread_wakeup((event_t)&c_swapout_list_head);
3552 }
3553
3554 *current_chead = NULL;
3555 }
3556
3557
3558 /*
3559 * returns with c_seg locked
3560 */
3561 void
3562 c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q)
3563 {
3564 clock_sec_t sec;
3565 clock_nsec_t nsec;
3566
3567 clock_get_system_nanotime(&sec, &nsec);
3568
3569 lck_mtx_lock_spin_always(c_list_lock);
3570 lck_mtx_lock_spin_always(&c_seg->c_lock);
3571
3572 assert(c_seg->c_busy_swapping);
3573 assert(c_seg->c_busy);
3574
3575 c_seg->c_busy_swapping = 0;
3576
3577 if (c_seg->c_overage_swap == TRUE) {
3578 c_overage_swapped_count--;
3579 c_seg->c_overage_swap = FALSE;
3580 }
3581 if (has_data == TRUE) {
3582 if (age_on_swapin_q == TRUE) {
3583 c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE);
3584 } else {
3585 c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
3586 }
3587
3588 if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
3589 c_seg_need_delayed_compaction(c_seg, TRUE);
3590 }
3591 } else {
3592 c_seg->c_store.c_buffer = (int32_t*) NULL;
3593 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
3594
3595 c_seg_switch_state(c_seg, C_ON_BAD_Q, FALSE);
3596 }
3597 c_seg->c_swappedin_ts = (uint32_t)sec;
3598
3599 lck_mtx_unlock_always(c_list_lock);
3600 }
3601
3602
3603
3604 /*
3605 * c_seg has to be locked and is returned locked if the c_seg isn't freed
3606 * PAGE_REPLACMENT_DISALLOWED has to be TRUE on entry and is returned TRUE
3607 * c_seg_swapin returns 1 if the c_seg was freed, 0 otherwise
3608 */
3609
3610 int
3611 c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q)
3612 {
3613 vm_offset_t addr = 0;
3614 uint32_t io_size = 0;
3615 uint64_t f_offset;
3616
3617 assert(C_SEG_IS_ONDISK(c_seg));
3618
3619 #if !CHECKSUM_THE_SWAP
3620 c_seg_trim_tail(c_seg);
3621 #endif
3622 io_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
3623 f_offset = c_seg->c_store.c_swap_handle;
3624
3625 C_SEG_BUSY(c_seg);
3626 c_seg->c_busy_swapping = 1;
3627
3628 /*
3629 * This thread is likely going to block for I/O.
3630 * Make sure it is ready to run when the I/O completes because
3631 * it needs to clear the busy bit on the c_seg so that other
3632 * waiting threads can make progress too. To do that, boost
3633 * the rwlock_count so that the priority is boosted.
3634 */
3635 set_thread_rwlock_boost();
3636 lck_mtx_unlock_always(&c_seg->c_lock);
3637
3638 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3639
3640 addr = (vm_offset_t)C_SEG_BUFFER_ADDRESS(c_seg->c_mysegno);
3641 c_seg->c_store.c_buffer = (int32_t*) addr;
3642
3643 kernel_memory_populate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3644
3645 if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) {
3646 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3647
3648 kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
3649
3650 c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q);
3651 } else {
3652 #if ENCRYPTED_SWAP
3653 vm_swap_decrypt(c_seg);
3654 #endif /* ENCRYPTED_SWAP */
3655
3656 #if CHECKSUM_THE_SWAP
3657 if (c_seg->cseg_swap_size != io_size) {
3658 panic("swapin size doesn't match swapout size");
3659 }
3660
3661 if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) {
3662 panic("c_seg_swapin - Swap hash mismatch\n");
3663 }
3664 #endif /* CHECKSUM_THE_SWAP */
3665
3666 PAGE_REPLACEMENT_DISALLOWED(TRUE);
3667
3668 c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q);
3669
3670 #if CONFIG_FREEZE
3671 /*
3672 * c_seg_swapin_requeue() returns with the c_seg lock held.
3673 */
3674 if (!lck_mtx_try_lock_spin_always(c_list_lock)) {
3675 assert(c_seg->c_busy);
3676
3677 lck_mtx_unlock_always(&c_seg->c_lock);
3678 lck_mtx_lock_spin_always(c_list_lock);
3679 lck_mtx_lock_spin_always(&c_seg->c_lock);
3680 }
3681
3682 if (c_seg->c_task_owner) {
3683 c_seg_update_task_owner(c_seg, NULL);
3684 }
3685
3686 lck_mtx_unlock_always(c_list_lock);
3687
3688 OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore);
3689 #endif /* CONFIG_FREEZE */
3690
3691 OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used);
3692
3693 if (force_minor_compaction == TRUE) {
3694 if (c_seg_minor_compaction_and_unlock(c_seg, FALSE)) {
3695 /*
3696 * c_seg was completely empty so it was freed,
3697 * so be careful not to reference it again
3698 *
3699 * Drop the rwlock_count so that the thread priority
3700 * is returned back to where it is supposed to be.
3701 */
3702 clear_thread_rwlock_boost();
3703 return 1;
3704 }
3705
3706 lck_mtx_lock_spin_always(&c_seg->c_lock);
3707 }
3708 }
3709 C_SEG_WAKEUP_DONE(c_seg);
3710
3711 /*
3712 * Drop the rwlock_count so that the thread priority
3713 * is returned back to where it is supposed to be.
3714 */
3715 clear_thread_rwlock_boost();
3716
3717 return 0;
3718 }
3719
3720
3721 static void
3722 c_segment_sv_hash_drop_ref(int hash_indx)
3723 {
3724 struct c_sv_hash_entry o_sv_he, n_sv_he;
3725
3726 while (1) {
3727 o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record;
3728
3729 n_sv_he.he_ref = o_sv_he.he_ref - 1;
3730 n_sv_he.he_data = o_sv_he.he_data;
3731
3732 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) {
3733 if (n_sv_he.he_ref == 0) {
3734 OSAddAtomic(-1, &c_segment_svp_in_hash);
3735 }
3736 break;
3737 }
3738 }
3739 }
3740
3741
3742 static int
3743 c_segment_sv_hash_insert(uint32_t data)
3744 {
3745 int hash_sindx;
3746 int misses;
3747 struct c_sv_hash_entry o_sv_he, n_sv_he;
3748 boolean_t got_ref = FALSE;
3749
3750 if (data == 0) {
3751 OSAddAtomic(1, &c_segment_svp_zero_compressions);
3752 } else {
3753 OSAddAtomic(1, &c_segment_svp_nonzero_compressions);
3754 }
3755
3756 hash_sindx = data & C_SV_HASH_MASK;
3757
3758 for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) {
3759 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3760
3761 while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) {
3762 n_sv_he.he_ref = o_sv_he.he_ref + 1;
3763 n_sv_he.he_data = data;
3764
3765 if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) {
3766 if (n_sv_he.he_ref == 1) {
3767 OSAddAtomic(1, &c_segment_svp_in_hash);
3768 }
3769 got_ref = TRUE;
3770 break;
3771 }
3772 o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record;
3773 }
3774 if (got_ref == TRUE) {
3775 break;
3776 }
3777 hash_sindx++;
3778
3779 if (hash_sindx == C_SV_HASH_SIZE) {
3780 hash_sindx = 0;
3781 }
3782 }
3783 if (got_ref == FALSE) {
3784 return -1;
3785 }
3786
3787 return hash_sindx;
3788 }
3789
3790
3791 #if RECORD_THE_COMPRESSED_DATA
3792
3793 static void
3794 c_compressed_record_data(char *src, int c_size)
3795 {
3796 if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) {
3797 panic("c_compressed_record_cptr >= c_compressed_record_ebuf");
3798 }
3799
3800 *(int *)((void *)c_compressed_record_cptr) = c_size;
3801
3802 c_compressed_record_cptr += 4;
3803
3804 memcpy(c_compressed_record_cptr, src, c_size);
3805 c_compressed_record_cptr += c_size;
3806 }
3807 #endif
3808
3809
3810 static int
3811 c_compress_page(char *src, c_slot_mapping_t slot_ptr, c_segment_t *current_chead, char *scratch_buf)
3812 {
3813 int c_size;
3814 int c_rounded_size = 0;
3815 int max_csize;
3816 c_slot_t cs;
3817 c_segment_t c_seg;
3818
3819 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0);
3820 retry:
3821 if ((c_seg = c_seg_allocate(current_chead)) == NULL) {
3822 return 1;
3823 }
3824 /*
3825 * returns with c_seg lock held
3826 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
3827 * c_nextslot has been allocated and
3828 * c_store.c_buffer populated
3829 */
3830 assert(c_seg->c_state == C_IS_FILLING);
3831
3832 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot);
3833
3834 C_SLOT_ASSERT_PACKABLE(slot_ptr);
3835 cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr);
3836
3837 cs->c_offset = c_seg->c_nextoffset;
3838
3839 max_csize = C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset);
3840
3841 if (max_csize > PAGE_SIZE) {
3842 max_csize = PAGE_SIZE;
3843 }
3844
3845 #if CHECKSUM_THE_DATA
3846 cs->c_hash_data = vmc_hash(src, PAGE_SIZE);
3847 #endif
3848 boolean_t incomp_copy = FALSE;
3849 int max_csize_adj = (max_csize - 4);
3850
3851 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
3852 #if defined(__arm__) || defined(__arm64__)
3853 uint16_t ccodec = CINVALID;
3854 uint32_t inline_popcount;
3855 if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) {
3856 c_size = metacompressor((const uint8_t *) src,
3857 (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
3858 max_csize_adj, &ccodec,
3859 scratch_buf, &incomp_copy, &inline_popcount);
3860 #if __ARM_WKDM_POPCNT__
3861 cs->c_inline_popcount = inline_popcount;
3862 #else
3863 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
3864 #endif
3865
3866 #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4
3867 if (c_size > max_csize_adj) {
3868 c_size = -1;
3869 }
3870 #endif
3871 } else {
3872 c_size = -1;
3873 }
3874 assert(ccodec == CCWK || ccodec == CCLZ4);
3875 cs->c_codec = ccodec;
3876 #endif
3877 } else {
3878 #if defined(__arm__) || defined(__arm64__)
3879 cs->c_codec = CCWK;
3880 #endif
3881 #if defined(__arm64__)
3882 __unreachable_ok_push
3883 if (PAGE_SIZE == 4096) {
3884 c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
3885 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
3886 } else {
3887 c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
3888 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
3889 }
3890 __unreachable_ok_pop
3891 #else
3892 c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
3893 (WK_word *)(uintptr_t)scratch_buf, max_csize_adj);
3894 #endif
3895 }
3896 assertf(((c_size <= max_csize_adj) && (c_size >= -1)),
3897 "c_size invalid (%d, %d), cur compressions: %d", c_size, max_csize_adj, c_segment_pages_compressed);
3898
3899 if (c_size == -1) {
3900 if (max_csize < PAGE_SIZE) {
3901 c_current_seg_filled(c_seg, current_chead);
3902 assert(*current_chead == NULL);
3903
3904 lck_mtx_unlock_always(&c_seg->c_lock);
3905 /* TODO: it may be worth requiring codecs to distinguish
3906 * between incompressible inputs and failures due to
3907 * budget exhaustion.
3908 */
3909 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3910 goto retry;
3911 }
3912 c_size = PAGE_SIZE;
3913
3914 if (incomp_copy == FALSE) {
3915 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
3916 }
3917
3918 OSAddAtomic(1, &c_segment_noncompressible_pages);
3919 } else if (c_size == 0) {
3920 int hash_index;
3921
3922 /*
3923 * special case - this is a page completely full of a single 32 bit value
3924 */
3925 hash_index = c_segment_sv_hash_insert(*(uint32_t *)(uintptr_t)src);
3926
3927 if (hash_index != -1) {
3928 slot_ptr->s_cindx = hash_index;
3929 slot_ptr->s_cseg = C_SV_CSEG_ID;
3930
3931 OSAddAtomic(1, &c_segment_svp_hash_succeeded);
3932 #if RECORD_THE_COMPRESSED_DATA
3933 c_compressed_record_data(src, 4);
3934 #endif
3935 goto sv_compression;
3936 }
3937 c_size = 4;
3938
3939 memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size);
3940
3941 OSAddAtomic(1, &c_segment_svp_hash_failed);
3942 }
3943
3944 #if RECORD_THE_COMPRESSED_DATA
3945 c_compressed_record_data((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
3946 #endif
3947 #if CHECKSUM_THE_COMPRESSED_DATA
3948 cs->c_hash_compressed_data = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size);
3949 #endif
3950 #if POPCOUNT_THE_COMPRESSED_DATA
3951 cs->c_pop_cdata = vmc_pop((uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset], c_size);
3952 #endif
3953 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
3954
3955 PACK_C_SIZE(cs, c_size);
3956 c_seg->c_bytes_used += c_rounded_size;
3957 c_seg->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
3958 c_seg->c_slots_used++;
3959
3960 slot_ptr->s_cindx = c_seg->c_nextslot++;
3961 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
3962 slot_ptr->s_cseg = c_seg->c_mysegno + 1;
3963
3964 sv_compression:
3965 if (c_seg->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) {
3966 c_current_seg_filled(c_seg, current_chead);
3967 assert(*current_chead == NULL);
3968 }
3969 lck_mtx_unlock_always(&c_seg->c_lock);
3970
3971 PAGE_REPLACEMENT_DISALLOWED(FALSE);
3972
3973 #if RECORD_THE_COMPRESSED_DATA
3974 if ((c_compressed_record_cptr - c_compressed_record_sbuf) >= C_SEG_ALLOCSIZE) {
3975 c_compressed_record_write(c_compressed_record_sbuf, (int)(c_compressed_record_cptr - c_compressed_record_sbuf));
3976 c_compressed_record_cptr = c_compressed_record_sbuf;
3977 }
3978 #endif
3979 if (c_size) {
3980 OSAddAtomic64(c_size, &c_segment_compressed_bytes);
3981 OSAddAtomic64(c_rounded_size, &compressor_bytes_used);
3982 }
3983 OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes);
3984
3985 OSAddAtomic(1, &c_segment_pages_compressed);
3986 #if CONFIG_FREEZE
3987 OSAddAtomic(1, &c_segment_pages_compressed_incore);
3988 #endif /* CONFIG_FREEZE */
3989 OSAddAtomic(1, &sample_period_compression_count);
3990
3991 KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0);
3992
3993 return 0;
3994 }
3995
3996 static inline void
3997 sv_decompress(int32_t *ddst, int32_t pattern)
3998 {
3999 // assert(__builtin_constant_p(PAGE_SIZE) != 0);
4000 #if defined(__x86_64__)
4001 memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t));
4002 #elif defined(__arm64__)
4003 assert((PAGE_SIZE % 128) == 0);
4004 if (pattern == 0) {
4005 fill32_dczva((addr64_t)ddst, PAGE_SIZE);
4006 } else {
4007 fill32_nt((addr64_t)ddst, PAGE_SIZE, pattern);
4008 }
4009 #else
4010 size_t i;
4011
4012 /* Unroll the pattern fill loop 4x to encourage the
4013 * compiler to emit NEON stores, cf.
4014 * <rdar://problem/25839866> Loop autovectorization
4015 * anomalies.
4016 */
4017 /* * We use separate loops for each PAGE_SIZE
4018 * to allow the autovectorizer to engage, as PAGE_SIZE
4019 * may not be a constant.
4020 */
4021
4022 __unreachable_ok_push
4023 if (PAGE_SIZE == 4096) {
4024 for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) {
4025 *ddst++ = pattern;
4026 *ddst++ = pattern;
4027 *ddst++ = pattern;
4028 *ddst++ = pattern;
4029 }
4030 } else {
4031 assert(PAGE_SIZE == 16384);
4032 for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) {
4033 *ddst++ = pattern;
4034 *ddst++ = pattern;
4035 *ddst++ = pattern;
4036 *ddst++ = pattern;
4037 }
4038 }
4039 __unreachable_ok_pop
4040 #endif
4041 }
4042
4043 static int
4044 c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int *zeroslot)
4045 {
4046 c_slot_t cs;
4047 c_segment_t c_seg;
4048 uint32_t c_segno;
4049 uint16_t c_indx;
4050 int c_rounded_size;
4051 uint32_t c_size;
4052 int retval = 0;
4053 boolean_t need_unlock = TRUE;
4054 boolean_t consider_defragmenting = FALSE;
4055 boolean_t kdp_mode = FALSE;
4056
4057 if (__improbable(flags & C_KDP)) {
4058 if (not_in_kdp) {
4059 panic("C_KDP passed to decompress page from outside of debugger context");
4060 }
4061
4062 assert((flags & C_KEEP) == C_KEEP);
4063 assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK);
4064
4065 if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) {
4066 return -2;
4067 }
4068
4069 kdp_mode = TRUE;
4070 *zeroslot = 0;
4071 }
4072
4073 ReTry:
4074 if (__probable(!kdp_mode)) {
4075 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4076 } else {
4077 if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) {
4078 return -2;
4079 }
4080 }
4081
4082 #if HIBERNATION
4083 /*
4084 * if hibernation is enabled, it indicates (via a call
4085 * to 'vm_decompressor_lock' that no further
4086 * decompressions are allowed once it reaches
4087 * the point of flushing all of the currently dirty
4088 * anonymous memory through the compressor and out
4089 * to disk... in this state we allow freeing of compressed
4090 * pages and must honor the C_DONT_BLOCK case
4091 */
4092 if (__improbable(dst && decompressions_blocked == TRUE)) {
4093 if (flags & C_DONT_BLOCK) {
4094 if (__probable(!kdp_mode)) {
4095 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4096 }
4097
4098 *zeroslot = 0;
4099 return -2;
4100 }
4101 /*
4102 * it's safe to atomically assert and block behind the
4103 * lock held in shared mode because "decompressions_blocked" is
4104 * only set and cleared and the thread_wakeup done when the lock
4105 * is held exclusively
4106 */
4107 assert_wait((event_t)&decompressions_blocked, THREAD_UNINT);
4108
4109 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4110
4111 thread_block(THREAD_CONTINUE_NULL);
4112
4113 goto ReTry;
4114 }
4115 #endif
4116 /* s_cseg is actually "segno+1" */
4117 c_segno = slot_ptr->s_cseg - 1;
4118
4119 if (__improbable(c_segno >= c_segments_available)) {
4120 panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)",
4121 c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr));
4122 }
4123
4124 if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) {
4125 panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)",
4126 c_segno, slot_ptr, *(int *)((void *)slot_ptr));
4127 }
4128
4129 c_seg = c_segments[c_segno].c_seg;
4130
4131 if (__probable(!kdp_mode)) {
4132 lck_mtx_lock_spin_always(&c_seg->c_lock);
4133 } else {
4134 if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) {
4135 return -2;
4136 }
4137 }
4138
4139 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4140
4141 if (dst == NULL && c_seg->c_busy_swapping) {
4142 assert(c_seg->c_busy);
4143
4144 goto bypass_busy_check;
4145 }
4146 if (flags & C_DONT_BLOCK) {
4147 if (c_seg->c_busy || (C_SEG_IS_ONDISK(c_seg) && dst)) {
4148 *zeroslot = 0;
4149
4150 retval = -2;
4151 goto done;
4152 }
4153 }
4154 if (c_seg->c_busy) {
4155 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4156
4157 c_seg_wait_on_busy(c_seg);
4158
4159 goto ReTry;
4160 }
4161 bypass_busy_check:
4162
4163 c_indx = slot_ptr->s_cindx;
4164
4165 if (__improbable(c_indx >= c_seg->c_nextslot)) {
4166 panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4167 c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4168 }
4169
4170 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4171
4172 c_size = UNPACK_C_SIZE(cs);
4173
4174 if (__improbable(c_size == 0)) {
4175 panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)",
4176 c_seg, slot_ptr, *(int *)((void *)slot_ptr));
4177 }
4178
4179 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4180
4181 if (dst) {
4182 uint32_t age_of_cseg;
4183 clock_sec_t cur_ts_sec;
4184 clock_nsec_t cur_ts_nsec;
4185
4186 if (C_SEG_IS_ONDISK(c_seg)) {
4187 #if CONFIG_FREEZE
4188 if (freezer_incore_cseg_acct) {
4189 if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) {
4190 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4191 lck_mtx_unlock_always(&c_seg->c_lock);
4192
4193 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4194
4195 goto ReTry;
4196 }
4197
4198 uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
4199 if ((incore_seg_count + 1) >= c_segments_nearing_limit) {
4200 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4201 lck_mtx_unlock_always(&c_seg->c_lock);
4202
4203 memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */);
4204
4205 goto ReTry;
4206 }
4207 }
4208 #endif /* CONFIG_FREEZE */
4209 assert(kdp_mode == FALSE);
4210 retval = c_seg_swapin(c_seg, FALSE, TRUE);
4211 assert(retval == 0);
4212
4213 retval = 1;
4214 }
4215 if (c_seg->c_state == C_ON_BAD_Q) {
4216 assert(c_seg->c_store.c_buffer == NULL);
4217 *zeroslot = 0;
4218
4219 retval = -1;
4220 goto done;
4221 }
4222
4223 #if POPCOUNT_THE_COMPRESSED_DATA
4224 unsigned csvpop;
4225 uintptr_t csvaddr = (uintptr_t) &c_seg->c_store.c_buffer[cs->c_offset];
4226 if (cs->c_pop_cdata != (csvpop = vmc_pop(csvaddr, c_size))) {
4227 panic("Compressed data popcount doesn't match original, bit distance: %d %p (phys: %p) %p %p 0x%x 0x%x 0x%x 0x%x", (csvpop - cs->c_pop_cdata), (void *)csvaddr, (void *) kvtophys(csvaddr), c_seg, cs, cs->c_offset, c_size, csvpop, cs->c_pop_cdata);
4228 }
4229 #endif
4230
4231 #if CHECKSUM_THE_COMPRESSED_DATA
4232 unsigned csvhash;
4233 if (cs->c_hash_compressed_data != (csvhash = vmc_hash((char *)&c_seg->c_store.c_buffer[cs->c_offset], c_size))) {
4234 panic("Compressed data doesn't match original %p %p %u %u %u", c_seg, cs, c_size, cs->c_hash_compressed_data, csvhash);
4235 }
4236 #endif
4237 if (c_rounded_size == PAGE_SIZE) {
4238 /*
4239 * page wasn't compressible... just copy it out
4240 */
4241 memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE);
4242 } else if (c_size == 4) {
4243 int32_t data;
4244 int32_t *dptr;
4245
4246 /*
4247 * page was populated with a single value
4248 * that didn't fit into our fast hash
4249 * so we packed it in as a single non-compressed value
4250 * that we need to populate the page with
4251 */
4252 dptr = (int32_t *)(uintptr_t)dst;
4253 data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]);
4254 sv_decompress(dptr, data);
4255 } else {
4256 uint32_t my_cpu_no;
4257 char *scratch_buf;
4258
4259 if (__probable(!kdp_mode)) {
4260 /*
4261 * we're behind the c_seg lock held in spin mode
4262 * which means pre-emption is disabled... therefore
4263 * the following sequence is atomic and safe
4264 */
4265 my_cpu_no = cpu_number();
4266
4267 assert(my_cpu_no < compressor_cpus);
4268
4269 scratch_buf = &compressor_scratch_bufs[my_cpu_no * vm_compressor_get_decode_scratch_size()];
4270 } else {
4271 scratch_buf = kdp_compressor_scratch_buf;
4272 }
4273
4274 if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) {
4275 #if defined(__arm__) || defined(__arm64__)
4276 uint16_t c_codec = cs->c_codec;
4277 uint32_t inline_popcount;
4278 if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset],
4279 (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) {
4280 retval = -1;
4281 } else {
4282 #if __ARM_WKDM_POPCNT__
4283 if (inline_popcount != cs->c_inline_popcount) {
4284 /*
4285 * The codec choice in compression and
4286 * decompression must agree, so there
4287 * should never be a disagreement in
4288 * whether an inline population count
4289 * was performed.
4290 */
4291 assert(inline_popcount != C_SLOT_NO_POPCOUNT);
4292 assert(cs->c_inline_popcount != C_SLOT_NO_POPCOUNT);
4293 printf("decompression failure from physical region %llx+%05x: popcount mismatch (%d != %d)\n",
4294 (unsigned long long)kvtophys((uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset]), c_size,
4295 inline_popcount,
4296 cs->c_inline_popcount);
4297 retval = -1;
4298 }
4299 #else
4300 assert(inline_popcount == C_SLOT_NO_POPCOUNT);
4301 #endif /* __ARM_WKDM_POPCNT__ */
4302 }
4303 #endif
4304 } else {
4305 #if defined(__arm64__)
4306 __unreachable_ok_push
4307 if (PAGE_SIZE == 4096) {
4308 WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4309 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4310 } else {
4311 WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4312 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4313 }
4314 __unreachable_ok_pop
4315 #else
4316 WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset],
4317 (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size);
4318 #endif
4319 }
4320 }
4321
4322 #if CHECKSUM_THE_DATA
4323 if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) {
4324 #if defined(__arm__) || defined(__arm64__)
4325 int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset];
4326 panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2));
4327 #else
4328 panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size);
4329 #endif
4330 }
4331 #endif
4332 if (c_seg->c_swappedin_ts == 0 && !kdp_mode) {
4333 clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec);
4334
4335 age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts;
4336 if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) {
4337 OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]);
4338 } else {
4339 OSAddAtomic(1, &overage_decompressions_during_sample_period);
4340 }
4341
4342 OSAddAtomic(1, &sample_period_decompression_count);
4343 }
4344 }
4345 #if CONFIG_FREEZE
4346 else {
4347 /*
4348 * We are freeing an uncompressed page from this c_seg and so balance the ledgers.
4349 */
4350 if (C_SEG_IS_ONDISK(c_seg)) {
4351 /*
4352 * The compression sweep feature will push out anonymous pages to disk
4353 * without going through the freezer path and so those c_segs, while
4354 * swapped out, won't have an owner.
4355 */
4356 if (c_seg->c_task_owner) {
4357 task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP);
4358 }
4359
4360 /*
4361 * We are freeing a page in swap without swapping it in. We bump the in-core
4362 * count here to simulate a swapin of a page so that we can accurately
4363 * decrement it below.
4364 */
4365 OSAddAtomic(1, &c_segment_pages_compressed_incore);
4366 }
4367 }
4368 #endif /* CONFIG_FREEZE */
4369
4370 if (flags & C_KEEP) {
4371 *zeroslot = 0;
4372 goto done;
4373 }
4374 assert(kdp_mode == FALSE);
4375
4376 c_seg->c_bytes_unused += c_rounded_size;
4377 c_seg->c_bytes_used -= c_rounded_size;
4378
4379 assert(c_seg->c_slots_used);
4380 c_seg->c_slots_used--;
4381
4382 PACK_C_SIZE(cs, 0);
4383
4384 if (c_indx < c_seg->c_firstemptyslot) {
4385 c_seg->c_firstemptyslot = c_indx;
4386 }
4387
4388 OSAddAtomic(-1, &c_segment_pages_compressed);
4389 #if CONFIG_FREEZE
4390 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4391 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
4392 #endif /* CONFIG_FREEZE */
4393
4394 if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) {
4395 /*
4396 * C_SEG_IS_ONDISK == TRUE can occur when we're doing a
4397 * free of a compressed page (i.e. dst == NULL)
4398 */
4399 OSAddAtomic64(-c_rounded_size, &compressor_bytes_used);
4400 }
4401 if (c_seg->c_busy_swapping) {
4402 /*
4403 * bypass case for c_busy_swapping...
4404 * let the swapin/swapout paths deal with putting
4405 * the c_seg on the minor compaction queue if needed
4406 */
4407 assert(c_seg->c_busy);
4408 goto done;
4409 }
4410 assert(!c_seg->c_busy);
4411
4412 if (c_seg->c_state != C_IS_FILLING) {
4413 if (c_seg->c_bytes_used == 0) {
4414 if (!(C_SEG_IS_ONDISK(c_seg))) {
4415 int pages_populated;
4416
4417 pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
4418 c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
4419
4420 if (pages_populated) {
4421 assert(c_seg->c_state != C_ON_BAD_Q);
4422 assert(c_seg->c_store.c_buffer != NULL);
4423
4424 C_SEG_BUSY(c_seg);
4425 lck_mtx_unlock_always(&c_seg->c_lock);
4426
4427 kernel_memory_depopulate(compressor_map,
4428 (vm_offset_t) c_seg->c_store.c_buffer,
4429 pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
4430
4431 lck_mtx_lock_spin_always(&c_seg->c_lock);
4432 C_SEG_WAKEUP_DONE(c_seg);
4433 }
4434 if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q) {
4435 c_seg_need_delayed_compaction(c_seg, FALSE);
4436 }
4437 } else {
4438 if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) {
4439 c_seg_move_to_sparse_list(c_seg);
4440 consider_defragmenting = TRUE;
4441 }
4442 }
4443 } else if (c_seg->c_on_minorcompact_q) {
4444 assert(c_seg->c_state != C_ON_BAD_Q);
4445 assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg));
4446
4447 if (C_SEG_SHOULD_MINORCOMPACT_NOW(c_seg)) {
4448 c_seg_try_minor_compaction_and_unlock(c_seg);
4449 need_unlock = FALSE;
4450 }
4451 } else if (!(C_SEG_IS_ONDISK(c_seg))) {
4452 if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q &&
4453 C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
4454 c_seg_need_delayed_compaction(c_seg, FALSE);
4455 }
4456 } else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) {
4457 c_seg_move_to_sparse_list(c_seg);
4458 consider_defragmenting = TRUE;
4459 }
4460 }
4461 done:
4462 if (__improbable(kdp_mode)) {
4463 return retval;
4464 }
4465
4466 if (need_unlock == TRUE) {
4467 lck_mtx_unlock_always(&c_seg->c_lock);
4468 }
4469
4470 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4471
4472 if (consider_defragmenting == TRUE) {
4473 vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE);
4474 }
4475
4476 #if !XNU_TARGET_OS_OSX
4477 if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) {
4478 vm_wake_compactor_swapper();
4479 }
4480 #endif /* !XNU_TARGET_OS_OSX */
4481
4482 return retval;
4483 }
4484
4485
4486 int
4487 vm_compressor_get(ppnum_t pn, int *slot, int flags)
4488 {
4489 c_slot_mapping_t slot_ptr;
4490 char *dst;
4491 int zeroslot = 1;
4492 int retval;
4493
4494 dst = pmap_map_compressor_page(pn);
4495 slot_ptr = (c_slot_mapping_t)slot;
4496
4497 assert(dst != NULL);
4498
4499 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4500 int32_t data;
4501 int32_t *dptr;
4502
4503 /*
4504 * page was populated with a single value
4505 * that found a home in our hash table
4506 * grab that value from the hash and populate the page
4507 * that we need to populate the page with
4508 */
4509 dptr = (int32_t *)(uintptr_t)dst;
4510 data = c_segment_sv_hash_table[slot_ptr->s_cindx].he_data;
4511 sv_decompress(dptr, data);
4512 if (!(flags & C_KEEP)) {
4513 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4514
4515 OSAddAtomic(-1, &c_segment_pages_compressed);
4516 #if CONFIG_FREEZE
4517 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4518 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4519 #endif /* CONFIG_FREEZE */
4520 *slot = 0;
4521 }
4522 if (data) {
4523 OSAddAtomic(1, &c_segment_svp_nonzero_decompressions);
4524 } else {
4525 OSAddAtomic(1, &c_segment_svp_zero_decompressions);
4526 }
4527
4528 pmap_unmap_compressor_page(pn, dst);
4529 return 0;
4530 }
4531
4532 retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot);
4533
4534 /*
4535 * zeroslot will be set to 0 by c_decompress_page if (flags & C_KEEP)
4536 * or (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be TRUE
4537 */
4538 if (zeroslot) {
4539 *slot = 0;
4540 }
4541
4542 pmap_unmap_compressor_page(pn, dst);
4543
4544 /*
4545 * returns 0 if we successfully decompressed a page from a segment already in memory
4546 * returns 1 if we had to first swap in the segment, before successfully decompressing the page
4547 * returns -1 if we encountered an error swapping in the segment - decompression failed
4548 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true
4549 */
4550 return retval;
4551 }
4552
4553 #if DEVELOPMENT || DEBUG
4554
4555 void
4556 vm_compressor_inject_error(int *slot)
4557 {
4558 c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot;
4559
4560 /* No error detection for single-value compression. */
4561 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4562 printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ );
4563 return;
4564 }
4565
4566 /* s_cseg is actually "segno+1" */
4567 const uint32_t c_segno = slot_ptr->s_cseg - 1;
4568
4569 assert(c_segno < c_segments_available);
4570 assert(c_segments[c_segno].c_segno >= c_segments_available);
4571
4572 const c_segment_t c_seg = c_segments[c_segno].c_seg;
4573
4574 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4575
4576 lck_mtx_lock_spin_always(&c_seg->c_lock);
4577 assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE);
4578
4579 const uint16_t c_indx = slot_ptr->s_cindx;
4580 assert(c_indx < c_seg->c_nextslot);
4581
4582 /*
4583 * To safely make this segment temporarily writable, we need to mark
4584 * the segment busy, which allows us to release the segment lock.
4585 */
4586 while (c_seg->c_busy) {
4587 c_seg_wait_on_busy(c_seg);
4588 lck_mtx_lock_spin_always(&c_seg->c_lock);
4589 }
4590 C_SEG_BUSY(c_seg);
4591
4592 bool already_writable = (c_seg->c_state == C_IS_FILLING);
4593 if (!already_writable) {
4594 /*
4595 * Protection update must be performed preemptibly, so temporarily drop
4596 * the lock. Having set c_busy will prevent most other concurrent
4597 * operations.
4598 */
4599 lck_mtx_unlock_always(&c_seg->c_lock);
4600 C_SEG_MAKE_WRITEABLE(c_seg);
4601 lck_mtx_lock_spin_always(&c_seg->c_lock);
4602 }
4603
4604 /*
4605 * Once we've released the lock following our c_state == C_IS_FILLING check,
4606 * c_current_seg_filled() can (re-)write-protect the segment. However, it
4607 * will transition from C_IS_FILLING before releasing the c_seg lock, so we
4608 * can detect this by re-checking after we've reobtained the lock.
4609 */
4610 if (already_writable && c_seg->c_state != C_IS_FILLING) {
4611 lck_mtx_unlock_always(&c_seg->c_lock);
4612 C_SEG_MAKE_WRITEABLE(c_seg);
4613 lck_mtx_lock_spin_always(&c_seg->c_lock);
4614 already_writable = false;
4615 /* Segment can't be freed while c_busy is set. */
4616 assert(c_seg->c_state != C_IS_FILLING);
4617 }
4618
4619 c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4620 int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset];
4621 /* assume that the compressed data holds at least one int32_t */
4622 assert(UNPACK_C_SIZE(cs) > sizeof(*data));
4623 /*
4624 * This bit is known to be in the payload of a MISS packet resulting from
4625 * the pattern used in the test pattern from decompression_failure.c.
4626 * Flipping it should result in many corrupted bits in the test page.
4627 */
4628 data[0] ^= 0x00000100;
4629 if (!already_writable) {
4630 lck_mtx_unlock_always(&c_seg->c_lock);
4631 C_SEG_WRITE_PROTECT(c_seg);
4632 lck_mtx_lock_spin_always(&c_seg->c_lock);
4633 }
4634
4635 C_SEG_WAKEUP_DONE(c_seg);
4636 lck_mtx_unlock_always(&c_seg->c_lock);
4637
4638 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4639 }
4640
4641 #endif /* DEVELOPMENT || DEBUG */
4642
4643 int
4644 vm_compressor_free(int *slot, int flags)
4645 {
4646 c_slot_mapping_t slot_ptr;
4647 int zeroslot = 1;
4648 int retval;
4649
4650 assert(flags == 0 || flags == C_DONT_BLOCK);
4651
4652 slot_ptr = (c_slot_mapping_t)slot;
4653
4654 if (slot_ptr->s_cseg == C_SV_CSEG_ID) {
4655 c_segment_sv_hash_drop_ref(slot_ptr->s_cindx);
4656 OSAddAtomic(-1, &c_segment_pages_compressed);
4657 #if CONFIG_FREEZE
4658 OSAddAtomic(-1, &c_segment_pages_compressed_incore);
4659 assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore);
4660 #endif /* CONFIG_FREEZE */
4661
4662 *slot = 0;
4663 return 0;
4664 }
4665 retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot);
4666 /*
4667 * returns 0 if we successfully freed the specified compressed page
4668 * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set
4669 */
4670
4671 if (retval == 0) {
4672 *slot = 0;
4673 } else {
4674 assert(retval == -2);
4675 }
4676
4677 return retval;
4678 }
4679
4680
4681 int
4682 vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf)
4683 {
4684 char *src;
4685 int retval;
4686
4687 src = pmap_map_compressor_page(pn);
4688 assert(src != NULL);
4689
4690 retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf);
4691 pmap_unmap_compressor_page(pn, src);
4692
4693 return retval;
4694 }
4695
4696 void
4697 vm_compressor_transfer(
4698 int *dst_slot_p,
4699 int *src_slot_p)
4700 {
4701 c_slot_mapping_t dst_slot, src_slot;
4702 c_segment_t c_seg;
4703 uint16_t c_indx;
4704 c_slot_t cs;
4705
4706 src_slot = (c_slot_mapping_t) src_slot_p;
4707
4708 if (src_slot->s_cseg == C_SV_CSEG_ID) {
4709 *dst_slot_p = *src_slot_p;
4710 *src_slot_p = 0;
4711 return;
4712 }
4713 dst_slot = (c_slot_mapping_t) dst_slot_p;
4714 Retry:
4715 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4716 /* get segment for src_slot */
4717 c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
4718 /* lock segment */
4719 lck_mtx_lock_spin_always(&c_seg->c_lock);
4720 /* wait if it's busy */
4721 if (c_seg->c_busy && !c_seg->c_busy_swapping) {
4722 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4723 c_seg_wait_on_busy(c_seg);
4724 goto Retry;
4725 }
4726 /* find the c_slot */
4727 c_indx = src_slot->s_cindx;
4728 cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
4729 /* point the c_slot back to dst_slot instead of src_slot */
4730 C_SLOT_ASSERT_PACKABLE(dst_slot);
4731 cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
4732 /* transfer */
4733 *dst_slot_p = *src_slot_p;
4734 *src_slot_p = 0;
4735 lck_mtx_unlock_always(&c_seg->c_lock);
4736 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4737 }
4738
4739 #if CONFIG_FREEZE
4740
4741 int freezer_finished_filling = 0;
4742
4743 void
4744 vm_compressor_finished_filling(
4745 void **current_chead)
4746 {
4747 c_segment_t c_seg;
4748
4749 if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
4750 return;
4751 }
4752
4753 assert(c_seg->c_state == C_IS_FILLING);
4754
4755 lck_mtx_lock_spin_always(&c_seg->c_lock);
4756
4757 c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
4758
4759 lck_mtx_unlock_always(&c_seg->c_lock);
4760
4761 freezer_finished_filling++;
4762 }
4763
4764
4765 /*
4766 * This routine is used to transfer the compressed chunks from
4767 * the c_seg/cindx pointed to by slot_p into a new c_seg headed
4768 * by the current_chead and a new cindx within that c_seg.
4769 *
4770 * Currently, this routine is only used by the "freezer backed by
4771 * compressor with swap" mode to create a series of c_segs that
4772 * only contain compressed data belonging to one task. So, we
4773 * move a task's previously compressed data into a set of new
4774 * c_segs which will also hold the task's yet to be compressed data.
4775 */
4776
4777 kern_return_t
4778 vm_compressor_relocate(
4779 void **current_chead,
4780 int *slot_p)
4781 {
4782 c_slot_mapping_t slot_ptr;
4783 c_slot_mapping_t src_slot;
4784 uint32_t c_rounded_size;
4785 uint32_t c_size;
4786 uint16_t dst_slot;
4787 c_slot_t c_dst;
4788 c_slot_t c_src;
4789 uint16_t c_indx;
4790 c_segment_t c_seg_dst = NULL;
4791 c_segment_t c_seg_src = NULL;
4792 kern_return_t kr = KERN_SUCCESS;
4793
4794
4795 src_slot = (c_slot_mapping_t) slot_p;
4796
4797 if (src_slot->s_cseg == C_SV_CSEG_ID) {
4798 /*
4799 * no need to relocate... this is a page full of a single
4800 * value which is hashed to a single entry not contained
4801 * in a c_segment_t
4802 */
4803 return kr;
4804 }
4805
4806 Relookup_dst:
4807 c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
4808 /*
4809 * returns with c_seg lock held
4810 * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
4811 * c_nextslot has been allocated and
4812 * c_store.c_buffer populated
4813 */
4814 if (c_seg_dst == NULL) {
4815 /*
4816 * Out of compression segments?
4817 */
4818 kr = KERN_RESOURCE_SHORTAGE;
4819 goto out;
4820 }
4821
4822 assert(c_seg_dst->c_busy == 0);
4823
4824 C_SEG_BUSY(c_seg_dst);
4825
4826 dst_slot = c_seg_dst->c_nextslot;
4827
4828 lck_mtx_unlock_always(&c_seg_dst->c_lock);
4829
4830 Relookup_src:
4831 c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
4832
4833 assert(c_seg_dst != c_seg_src);
4834
4835 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
4836
4837 if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
4838 c_seg_src->c_state == C_IS_FILLING) {
4839 /*
4840 * Skip this page if :-
4841 * a) the src c_seg is already on-disk (or on its way there)
4842 * A "thaw" can mark a process as eligible for
4843 * another freeze cycle without bringing any of
4844 * its swapped out c_segs back from disk (because
4845 * that is done on-demand).
4846 * Or, this page may be mapped elsewhere in the task's map,
4847 * and we may have marked it for swap already.
4848 *
4849 * b) Or, the src c_seg is being filled by the compressor
4850 * thread. We don't want the added latency of waiting for
4851 * this c_seg in the freeze path and so we skip it.
4852 */
4853
4854 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4855
4856 lck_mtx_unlock_always(&c_seg_src->c_lock);
4857
4858 c_seg_src = NULL;
4859
4860 goto out;
4861 }
4862
4863 if (c_seg_src->c_busy) {
4864 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4865 c_seg_wait_on_busy(c_seg_src);
4866
4867 c_seg_src = NULL;
4868
4869 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4870
4871 goto Relookup_src;
4872 }
4873
4874 C_SEG_BUSY(c_seg_src);
4875
4876 lck_mtx_unlock_always(&c_seg_src->c_lock);
4877
4878 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4879
4880 /* find the c_slot */
4881 c_indx = src_slot->s_cindx;
4882
4883 c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
4884
4885 c_size = UNPACK_C_SIZE(c_src);
4886
4887 assert(c_size);
4888
4889 if (c_size > (uint32_t)(C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
4890 /*
4891 * This segment is full. We need a new one.
4892 */
4893
4894 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4895
4896 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
4897 C_SEG_WAKEUP_DONE(c_seg_src);
4898 lck_mtx_unlock_always(&c_seg_src->c_lock);
4899
4900 c_seg_src = NULL;
4901
4902 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
4903
4904 assert(c_seg_dst->c_busy);
4905 assert(c_seg_dst->c_state == C_IS_FILLING);
4906 assert(!c_seg_dst->c_on_minorcompact_q);
4907
4908 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
4909 assert(*current_chead == NULL);
4910
4911 C_SEG_WAKEUP_DONE(c_seg_dst);
4912
4913 lck_mtx_unlock_always(&c_seg_dst->c_lock);
4914
4915 c_seg_dst = NULL;
4916
4917 PAGE_REPLACEMENT_DISALLOWED(FALSE);
4918
4919 goto Relookup_dst;
4920 }
4921
4922 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
4923
4924 memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
4925 /*
4926 * Is platform alignment actually necessary since wkdm aligns its output?
4927 */
4928 c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
4929
4930 cslot_copy(c_dst, c_src);
4931 c_dst->c_offset = c_seg_dst->c_nextoffset;
4932
4933 if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
4934 c_seg_dst->c_firstemptyslot++;
4935 }
4936
4937 c_seg_dst->c_slots_used++;
4938 c_seg_dst->c_nextslot++;
4939 c_seg_dst->c_bytes_used += c_rounded_size;
4940 c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
4941
4942
4943 PACK_C_SIZE(c_src, 0);
4944
4945 c_seg_src->c_bytes_used -= c_rounded_size;
4946 c_seg_src->c_bytes_unused += c_rounded_size;
4947
4948 assert(c_seg_src->c_slots_used);
4949 c_seg_src->c_slots_used--;
4950
4951 if (c_indx < c_seg_src->c_firstemptyslot) {
4952 c_seg_src->c_firstemptyslot = c_indx;
4953 }
4954
4955 c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
4956
4957 PAGE_REPLACEMENT_ALLOWED(TRUE);
4958 slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
4959 /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
4960 slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
4961 slot_ptr->s_cindx = dst_slot;
4962
4963 PAGE_REPLACEMENT_ALLOWED(FALSE);
4964
4965 out:
4966 if (c_seg_src) {
4967 lck_mtx_lock_spin_always(&c_seg_src->c_lock);
4968
4969 C_SEG_WAKEUP_DONE(c_seg_src);
4970
4971 if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
4972 if (!c_seg_src->c_on_minorcompact_q) {
4973 c_seg_need_delayed_compaction(c_seg_src, FALSE);
4974 }
4975 }
4976
4977 lck_mtx_unlock_always(&c_seg_src->c_lock);
4978 }
4979
4980 if (c_seg_dst) {
4981 PAGE_REPLACEMENT_DISALLOWED(TRUE);
4982
4983 lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
4984
4985 if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
4986 /*
4987 * Nearing or exceeded maximum slot and offset capacity.
4988 */
4989 assert(c_seg_dst->c_busy);
4990 assert(c_seg_dst->c_state == C_IS_FILLING);
4991 assert(!c_seg_dst->c_on_minorcompact_q);
4992
4993 c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
4994 assert(*current_chead == NULL);
4995 }
4996
4997 C_SEG_WAKEUP_DONE(c_seg_dst);
4998
4999 lck_mtx_unlock_always(&c_seg_dst->c_lock);
5000
5001 c_seg_dst = NULL;
5002
5003 PAGE_REPLACEMENT_DISALLOWED(FALSE);
5004 }
5005
5006 return kr;
5007 }
5008 #endif /* CONFIG_FREEZE */