2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include "vm_compressor_backing_store.h"
30 #include <vm/vm_protos.h>
32 #include <IOKit/IOHibernatePrivate.h>
35 boolean_t compressor_store_stop_compaction
= FALSE
;
36 boolean_t vm_swap_up
= FALSE
;
37 boolean_t vm_swapfile_create_needed
= FALSE
;
38 boolean_t vm_swapfile_gc_needed
= FALSE
;
40 int swapper_throttle
= -1;
41 boolean_t swapper_throttle_inited
= FALSE
;
42 uint64_t vm_swapout_thread_id
;
44 uint64_t vm_swap_put_failures
= 0;
45 uint64_t vm_swap_get_failures
= 0;
46 int vm_num_swap_files
= 0;
47 int vm_swapout_thread_processed_segments
= 0;
48 int vm_swapout_thread_awakened
= 0;
49 int vm_swapfile_create_thread_awakened
= 0;
50 int vm_swapfile_create_thread_running
= 0;
51 int vm_swapfile_gc_thread_awakened
= 0;
52 int vm_swapfile_gc_thread_running
= 0;
54 unsigned int vm_swapfile_total_segs_alloced
= 0;
55 unsigned int vm_swapfile_total_segs_used
= 0;
58 #define SWAP_READY 0x1 /* Swap file is ready to be used */
59 #define SWAP_RECLAIM 0x2 /* Swap file is marked to be reclaimed */
60 #define SWAP_WANTED 0x4 /* Swap file has waiters */
61 #define SWAP_REUSE 0x8 /* Swap file is on the Q and has a name. Reuse after init-ing.*/
64 queue_head_t swp_queue
; /* list of swap files */
65 char *swp_path
; /* saved pathname of swap file */
66 struct vnode
*swp_vp
; /* backing vnode */
67 uint64_t swp_size
; /* size of this swap file */
68 uint8_t *swp_bitmap
; /* bitmap showing the alloced/freed slots in the swap file */
69 unsigned int swp_pathlen
; /* length of pathname */
70 unsigned int swp_nsegs
; /* #segments we can use */
71 unsigned int swp_nseginuse
; /* #segments in use */
72 unsigned int swp_index
; /* index of this swap file */
73 unsigned int swp_flags
; /* state of swap file */
74 unsigned int swp_free_hint
; /* offset of 1st free chunk */
75 unsigned int swp_io_count
; /* count of outstanding I/Os */
76 c_segment_t
*swp_csegs
; /* back pointers to the c_segments. Used during swap reclaim. */
78 struct trim_list
*swp_delayed_trim_list_head
;
79 unsigned int swp_delayed_trim_count
;
82 queue_head_t swf_global_queue
;
83 boolean_t swp_trim_supported
= FALSE
;
85 #define VM_SWAPFILE_DELAYED_TRIM_MAX 128
87 extern clock_sec_t dont_trim_until_ts
;
88 clock_sec_t vm_swapfile_last_failed_to_create_ts
= 0;
89 clock_sec_t vm_swapfile_last_successful_create_ts
= 0;
90 int vm_swapfile_can_be_created
= FALSE
;
91 boolean_t delayed_trim_handling_in_progress
= FALSE
;
93 static void vm_swapout_thread_throttle_adjust(void);
94 static void vm_swap_free_now(struct swapfile
*swf
, uint64_t f_offset
);
95 static void vm_swapout_thread(void);
96 static void vm_swapfile_create_thread(void);
97 static void vm_swapfile_gc_thread(void);
98 static void vm_swap_defragment();
99 static void vm_swap_handle_delayed_trims(boolean_t
);
100 static void vm_swap_do_delayed_trim();
101 static void vm_swap_wait_on_trim_handling_in_progress(void);
105 #define VM_SWAP_SHOULD_DEFRAGMENT() (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 4) ? 1 : 0)
106 #define VM_SWAP_SHOULD_RECLAIM() (((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) >= SWAPFILE_RECLAIM_THRESHOLD_SEGS) ? 1 : 0)
107 #define VM_SWAP_SHOULD_ABORT_RECLAIM() (((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) <= SWAPFILE_RECLAIM_MINIMUM_SEGS) ? 1 : 0)
108 #define VM_SWAP_SHOULD_CREATE(cur_ts) (((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)VM_SWAPFILE_HIWATER_SEGS) && \
109 ((cur_ts - vm_swapfile_last_failed_to_create_ts) > VM_SWAPFILE_DELAYED_CREATE) ? 1 : 0)
110 #define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0)
113 #define VM_SWAPFILE_DELAYED_CREATE 15
115 #define VM_SWAP_BUSY() ((c_swapout_count && (swapper_throttle == THROTTLE_LEVEL_COMPRESSOR_TIER1 || swapper_throttle == THROTTLE_LEVEL_COMPRESSOR_TIER0)) ? 1 : 0)
118 #if CHECKSUM_THE_SWAP
119 extern unsigned int hash_string(char *cp
, int len
);
123 extern boolean_t swap_crypt_ctx_initialized
;
124 extern void swap_crypt_ctx_initialize(void);
125 extern const unsigned char swap_crypt_null_iv
[AES_BLOCK_SIZE
];
126 extern aes_ctx swap_crypt_ctx
;
127 extern unsigned long vm_page_encrypt_counter
;
128 extern unsigned long vm_page_decrypt_counter
;
129 #endif /* ENCRYPTED_SWAP */
131 extern void vm_pageout_io_throttle(void);
132 extern void vm_pageout_reinit_tuneables(void);
133 extern void vm_swap_file_set_tuneables(void);
135 struct swapfile
*vm_swapfile_for_handle(uint64_t);
138 * Called with the vm_swap_data_lock held.
142 vm_swapfile_for_handle(uint64_t f_offset
)
145 uint64_t file_offset
= 0;
146 unsigned int swapfile_index
= 0;
147 struct swapfile
* swf
= NULL
;
149 file_offset
= (f_offset
& SWAP_SLOT_MASK
);
150 swapfile_index
= (f_offset
>> SWAP_DEVICE_SHIFT
);
152 swf
= (struct swapfile
*) queue_first(&swf_global_queue
);
154 while(queue_end(&swf_global_queue
, (queue_entry_t
)swf
) == FALSE
) {
156 if (swapfile_index
== swf
->swp_index
) {
160 swf
= (struct swapfile
*) queue_next(&swf
->swp_queue
);
163 if (queue_end(&swf_global_queue
, (queue_entry_t
) swf
)) {
171 vm_compressor_swap_init()
173 thread_t thread
= NULL
;
175 lck_grp_attr_setdefault(&vm_swap_data_lock_grp_attr
);
176 lck_grp_init(&vm_swap_data_lock_grp
,
178 &vm_swap_data_lock_grp_attr
);
179 lck_attr_setdefault(&vm_swap_data_lock_attr
);
180 lck_mtx_init_ext(&vm_swap_data_lock
,
181 &vm_swap_data_lock_ext
,
182 &vm_swap_data_lock_grp
,
183 &vm_swap_data_lock_attr
);
185 queue_init(&swf_global_queue
);
188 if (kernel_thread_start_priority((thread_continue_t
)vm_swapout_thread
, NULL
,
189 BASEPRI_PREEMPT
- 1, &thread
) != KERN_SUCCESS
) {
190 panic("vm_swapout_thread: create failed");
192 thread
->options
|= TH_OPT_VMPRIV
;
193 vm_swapout_thread_id
= thread
->thread_id
;
195 thread_deallocate(thread
);
197 if (kernel_thread_start_priority((thread_continue_t
)vm_swapfile_create_thread
, NULL
,
198 BASEPRI_PREEMPT
- 1, &thread
) != KERN_SUCCESS
) {
199 panic("vm_swapfile_create_thread: create failed");
201 thread
->options
|= TH_OPT_VMPRIV
;
203 thread_deallocate(thread
);
206 if (kernel_thread_start_priority((thread_continue_t
)vm_swapfile_gc_thread
, NULL
,
207 BASEPRI_PREEMPT
- 1, &thread
) != KERN_SUCCESS
) {
208 panic("vm_swapfile_gc_thread: create failed");
210 thread_deallocate(thread
);
212 proc_set_task_policy_thread(kernel_task
, thread
->thread_id
,
213 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, THROTTLE_LEVEL_COMPRESSOR_TIER2
);
214 proc_set_task_policy_thread(kernel_task
, thread
->thread_id
,
215 TASK_POLICY_INTERNAL
, TASK_POLICY_PASSIVE_IO
, TASK_POLICY_ENABLE
);
218 if (swap_crypt_ctx_initialized
== FALSE
) {
219 swap_crypt_ctx_initialize();
221 #endif /* ENCRYPTED_SWAP */
223 memset(swapfilename
, 0, MAX_SWAPFILENAME_LEN
+ 1);
227 printf("VM Swap Subsystem is %s\n", (vm_swap_up
== TRUE
) ? "ON" : "OFF");
232 vm_swap_file_set_tuneables()
238 if (strlen(swapfilename
) == 0) {
240 * If no swapfile name has been set, we'll
241 * use the default name.
243 * Also, this function is only called from the vm_pageout_scan thread
244 * via vm_consider_waking_compactor_swapper,
245 * so we don't need to worry about a race in checking/setting the name here.
247 strlcpy(swapfilename
, SWAP_FILE_NAME
, MAX_SWAPFILENAME_LEN
);
249 namelen
= (int)strlen(swapfilename
) + SWAPFILENAME_INDEX_LEN
+ 1;
250 pathname
= (char*)kalloc(namelen
);
251 memset(pathname
, 0, namelen
);
252 snprintf(pathname
, namelen
, "%s%d", swapfilename
, 0);
254 vm_swapfile_open(pathname
, &vp
);
259 if (vnode_pager_isSSD(vp
) == FALSE
)
260 vm_pageout_reinit_tuneables();
261 vnode_setswapmount(vp
);
262 vm_swapfile_close((uint64_t)pathname
, vp
);
264 kfree(pathname
, namelen
);
270 vm_swap_encrypt(c_segment_t c_seg
)
272 vm_offset_t kernel_vaddr
= 0;
276 unsigned char aes_iv
[AES_BLOCK_SIZE
];
280 assert(swap_crypt_ctx_initialized
);
282 bzero(&encrypt_iv
.aes_iv
[0], sizeof (encrypt_iv
.aes_iv
));
284 encrypt_iv
.c_seg
= (void*)c_seg
;
286 /* encrypt the "initial vector" */
287 aes_encrypt_cbc((const unsigned char *) &encrypt_iv
.aes_iv
[0],
290 &encrypt_iv
.aes_iv
[0],
291 &swap_crypt_ctx
.encrypt
);
293 kernel_vaddr
= (vm_offset_t
) c_seg
->c_store
.c_buffer
;
294 size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
297 * Encrypt the c_segment.
299 aes_encrypt_cbc((const unsigned char *) kernel_vaddr
,
300 &encrypt_iv
.aes_iv
[0],
301 (unsigned int)(size
/ AES_BLOCK_SIZE
),
302 (unsigned char *) kernel_vaddr
,
303 &swap_crypt_ctx
.encrypt
);
305 vm_page_encrypt_counter
+= (size
/PAGE_SIZE_64
);
309 vm_swap_decrypt(c_segment_t c_seg
)
312 vm_offset_t kernel_vaddr
= 0;
316 unsigned char aes_iv
[AES_BLOCK_SIZE
];
321 assert(swap_crypt_ctx_initialized
);
324 * Prepare an "initial vector" for the decryption.
325 * It has to be the same as the "initial vector" we
326 * used to encrypt that page.
328 bzero(&decrypt_iv
.aes_iv
[0], sizeof (decrypt_iv
.aes_iv
));
330 decrypt_iv
.c_seg
= (void*)c_seg
;
332 /* encrypt the "initial vector" */
333 aes_encrypt_cbc((const unsigned char *) &decrypt_iv
.aes_iv
[0],
336 &decrypt_iv
.aes_iv
[0],
337 &swap_crypt_ctx
.encrypt
);
339 kernel_vaddr
= (vm_offset_t
) c_seg
->c_store
.c_buffer
;
340 size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
343 * Decrypt the c_segment.
345 aes_decrypt_cbc((const unsigned char *) kernel_vaddr
,
346 &decrypt_iv
.aes_iv
[0],
347 (unsigned int) (size
/ AES_BLOCK_SIZE
),
348 (unsigned char *) kernel_vaddr
,
349 &swap_crypt_ctx
.decrypt
);
351 vm_page_decrypt_counter
+= (size
/PAGE_SIZE_64
);
353 #endif /* ENCRYPTED_SWAP */
357 vm_swap_consider_defragmenting()
359 if (compressor_store_stop_compaction
== FALSE
&& !VM_SWAP_BUSY() &&
360 (VM_SWAP_SHOULD_DEFRAGMENT() || VM_SWAP_SHOULD_RECLAIM())) {
362 if (!vm_swapfile_gc_thread_running
) {
363 lck_mtx_lock(&vm_swap_data_lock
);
365 if (!vm_swapfile_gc_thread_running
)
366 thread_wakeup((event_t
) &vm_swapfile_gc_needed
);
368 lck_mtx_unlock(&vm_swap_data_lock
);
374 int vm_swap_defragment_yielded
= 0;
375 int vm_swap_defragment_swapin
= 0;
376 int vm_swap_defragment_free
= 0;
377 int vm_swap_defragment_busy
= 0;
386 * have to grab the master lock w/o holding
387 * any locks in spin mode
389 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
391 lck_mtx_lock_spin_always(c_list_lock
);
393 while (!queue_empty(&c_swappedout_sparse_list_head
)) {
395 if (compressor_store_stop_compaction
== TRUE
|| VM_SWAP_BUSY()) {
396 vm_swap_defragment_yielded
++;
399 c_seg
= (c_segment_t
)queue_first(&c_swappedout_sparse_list_head
);
401 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
403 assert(c_seg
->c_on_swappedout_sparse_q
);
406 lck_mtx_unlock_always(c_list_lock
);
408 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
410 * c_seg_wait_on_busy consumes c_seg->c_lock
412 c_seg_wait_on_busy(c_seg
);
414 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
416 lck_mtx_lock_spin_always(c_list_lock
);
418 vm_swap_defragment_busy
++;
421 if (c_seg
->c_bytes_used
== 0) {
423 * c_seg_free_locked consumes the c_list_lock
426 c_seg_free_locked(c_seg
);
428 vm_swap_defragment_free
++;
430 lck_mtx_unlock_always(c_list_lock
);
432 c_seg_swapin(c_seg
, TRUE
);
433 lck_mtx_unlock_always(&c_seg
->c_lock
);
435 vm_swap_defragment_swapin
++;
437 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
439 vm_pageout_io_throttle();
442 * because write waiters have privilege over readers,
443 * dropping and immediately retaking the master lock will
444 * still allow any thread waiting to acquire the
445 * master lock exclusively an opportunity to take it
447 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
449 lck_mtx_lock_spin_always(c_list_lock
);
451 lck_mtx_unlock_always(c_list_lock
);
453 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
459 vm_swapfile_create_thread(void)
464 vm_swapfile_create_thread_awakened
++;
465 vm_swapfile_create_thread_running
= 1;
469 * walk through the list of swap files
470 * and do the delayed frees/trims for
471 * any swap file whose count of delayed
472 * frees is above the batch limit
474 vm_swap_handle_delayed_trims(FALSE
);
476 lck_mtx_lock(&vm_swap_data_lock
);
478 clock_get_system_nanotime(&sec
, &nsec
);
480 if (VM_SWAP_SHOULD_CREATE(sec
) == 0)
483 lck_mtx_unlock(&vm_swap_data_lock
);
485 if (vm_swap_create_file() == FALSE
) {
486 vm_swapfile_last_failed_to_create_ts
= sec
;
487 HIBLOG("vm_swap_create_file failed @ %lu secs\n", (unsigned long)sec
);
490 vm_swapfile_last_successful_create_ts
= sec
;
492 vm_swapfile_create_thread_running
= 0;
494 assert_wait((event_t
)&vm_swapfile_create_needed
, THREAD_UNINT
);
496 lck_mtx_unlock(&vm_swap_data_lock
);
498 thread_block((thread_continue_t
)vm_swapfile_create_thread
);
505 vm_swapfile_gc_thread(void)
507 boolean_t need_defragment
;
508 boolean_t need_reclaim
;
510 vm_swapfile_gc_thread_awakened
++;
511 vm_swapfile_gc_thread_running
= 1;
515 lck_mtx_lock(&vm_swap_data_lock
);
517 if (VM_SWAP_BUSY() || compressor_store_stop_compaction
== TRUE
)
520 need_defragment
= FALSE
;
521 need_reclaim
= FALSE
;
523 if (VM_SWAP_SHOULD_DEFRAGMENT())
524 need_defragment
= TRUE
;
526 if (VM_SWAP_SHOULD_RECLAIM()) {
527 need_defragment
= TRUE
;
530 if (need_defragment
== FALSE
&& need_reclaim
== FALSE
)
533 lck_mtx_unlock(&vm_swap_data_lock
);
535 if (need_defragment
== TRUE
)
536 vm_swap_defragment();
537 if (need_reclaim
== TRUE
)
540 vm_swapfile_gc_thread_running
= 0;
542 assert_wait((event_t
)&vm_swapfile_gc_needed
, THREAD_UNINT
);
544 lck_mtx_unlock(&vm_swap_data_lock
);
546 thread_block((thread_continue_t
)vm_swapfile_gc_thread
);
553 int swapper_entered_T0
= 0;
554 int swapper_entered_T1
= 0;
555 int swapper_entered_T2
= 0;
558 vm_swapout_thread_throttle_adjust(void)
560 int swapper_throttle_new
;
562 if (swapper_throttle_inited
== FALSE
) {
564 * force this thread to be set to the correct
567 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER2
;
568 swapper_throttle
= THROTTLE_LEVEL_COMPRESSOR_TIER1
;
569 swapper_throttle_inited
= TRUE
;
570 swapper_entered_T2
++;
573 swapper_throttle_new
= swapper_throttle
;
576 switch(swapper_throttle
) {
578 case THROTTLE_LEVEL_COMPRESSOR_TIER2
:
580 if (SWAPPER_NEEDS_TO_UNTHROTTLE() || swapout_target_age
|| hibernate_flushing
== TRUE
) {
581 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER1
;
582 swapper_entered_T1
++;
587 case THROTTLE_LEVEL_COMPRESSOR_TIER1
:
589 if (VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
590 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER0
;
591 swapper_entered_T0
++;
594 if (COMPRESSOR_NEEDS_TO_SWAP() == 0 && swapout_target_age
== 0 && hibernate_flushing
== FALSE
) {
595 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER2
;
596 swapper_entered_T2
++;
601 case THROTTLE_LEVEL_COMPRESSOR_TIER0
:
603 if (COMPRESSOR_NEEDS_TO_SWAP() == 0) {
604 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER2
;
605 swapper_entered_T2
++;
608 if (SWAPPER_NEEDS_TO_UNTHROTTLE() == 0) {
609 swapper_throttle_new
= THROTTLE_LEVEL_COMPRESSOR_TIER1
;
610 swapper_entered_T1
++;
616 if (swapper_throttle
!= swapper_throttle_new
) {
617 proc_set_task_policy_thread(kernel_task
, vm_swapout_thread_id
,
618 TASK_POLICY_INTERNAL
, TASK_POLICY_IO
, swapper_throttle_new
);
619 proc_set_task_policy_thread(kernel_task
, vm_swapout_thread_id
,
620 TASK_POLICY_INTERNAL
, TASK_POLICY_PASSIVE_IO
, TASK_POLICY_ENABLE
);
622 swapper_throttle
= swapper_throttle_new
;
628 vm_swapout_thread(void)
630 uint64_t f_offset
= 0;
632 c_segment_t c_seg
= NULL
;
633 kern_return_t kr
= KERN_SUCCESS
;
634 vm_offset_t addr
= 0;
636 vm_swapout_thread_awakened
++;
638 lck_mtx_lock_spin_always(c_list_lock
);
640 while (!queue_empty(&c_swapout_list_head
)) {
642 c_seg
= (c_segment_t
)queue_first(&c_swapout_list_head
);
644 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
646 assert(c_seg
->c_on_swapout_q
);
649 lck_mtx_unlock_always(c_list_lock
);
651 c_seg_wait_on_busy(c_seg
);
653 lck_mtx_lock_spin_always(c_list_lock
);
657 queue_remove(&c_swapout_list_head
, c_seg
, c_segment_t
, c_age_list
);
658 c_seg
->c_on_swapout_q
= 0;
661 vm_swapout_thread_processed_segments
++;
663 thread_wakeup((event_t
)&compaction_swapper_running
);
665 size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
668 c_seg_free_locked(c_seg
);
669 goto c_seg_was_freed
;
672 c_seg
->c_busy_swapping
= 1;
674 lck_mtx_unlock_always(c_list_lock
);
676 addr
= (vm_offset_t
) c_seg
->c_store
.c_buffer
;
678 lck_mtx_unlock_always(&c_seg
->c_lock
);
680 #if CHECKSUM_THE_SWAP
681 c_seg
->cseg_hash
= hash_string((char*)addr
, (int)size
);
682 c_seg
->cseg_swap_size
= size
;
683 #endif /* CHECKSUM_THE_SWAP */
686 vm_swap_encrypt(c_seg
);
687 #endif /* ENCRYPTED_SWAP */
689 vm_swapout_thread_throttle_adjust();
691 kr
= vm_swap_put((vm_offset_t
) addr
, &f_offset
, size
, c_seg
);
693 PAGE_REPLACEMENT_DISALLOWED(TRUE
);
695 lck_mtx_lock_spin_always(c_list_lock
);
696 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
698 if (kr
== KERN_SUCCESS
) {
700 if (C_SEG_ONDISK_IS_SPARSE(c_seg
) && hibernate_flushing
== FALSE
) {
702 c_seg_insert_into_q(&c_swappedout_sparse_list_head
, c_seg
);
703 c_seg
->c_on_swappedout_sparse_q
= 1;
704 c_swappedout_sparse_count
++;
707 if (hibernate_flushing
== TRUE
&& (c_seg
->c_generation_id
>= first_c_segment_to_warm_generation_id
&&
708 c_seg
->c_generation_id
<= last_c_segment_to_warm_generation_id
))
709 queue_enter_first(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
711 queue_enter(&c_swappedout_list_head
, c_seg
, c_segment_t
, c_age_list
);
712 c_seg
->c_on_swappedout_q
= 1;
713 c_swappedout_count
++;
715 c_seg
->c_store
.c_swap_handle
= f_offset
;
718 VM_STAT_INCR_BY(swapouts
, size
>> PAGE_SHIFT
);
720 if (c_seg
->c_bytes_used
)
721 OSAddAtomic64(-c_seg
->c_bytes_used
, &compressor_bytes_used
);
724 vm_swap_decrypt(c_seg
);
725 #endif /* ENCRYPTED_SWAP */
726 c_seg_insert_into_q(&c_age_list_head
, c_seg
);
727 c_seg
->c_on_age_q
= 1;
730 vm_swap_put_failures
++;
732 lck_mtx_unlock_always(c_list_lock
);
734 if (c_seg
->c_must_free
)
737 c_seg
->c_busy_swapping
= 0;
738 C_SEG_WAKEUP_DONE(c_seg
);
739 lck_mtx_unlock_always(&c_seg
->c_lock
);
742 if (kr
== KERN_SUCCESS
)
743 kernel_memory_depopulate(kernel_map
, (vm_offset_t
) addr
, size
, KMA_COMPRESSOR
);
745 PAGE_REPLACEMENT_DISALLOWED(FALSE
);
747 if (kr
== KERN_SUCCESS
) {
748 kmem_free(kernel_map
, (vm_offset_t
) addr
, C_SEG_ALLOCSIZE
);
749 OSAddAtomic64(-C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
751 vm_pageout_io_throttle();
753 if (c_swapout_count
== 0)
754 vm_swap_consider_defragmenting();
756 lck_mtx_lock_spin_always(c_list_lock
);
759 assert_wait((event_t
)&c_swapout_list_head
, THREAD_UNINT
);
761 lck_mtx_unlock_always(c_list_lock
);
763 thread_block((thread_continue_t
)vm_swapout_thread
);
769 vm_swap_create_file()
773 boolean_t swap_file_created
= FALSE
;
774 boolean_t swap_file_reuse
= FALSE
;
775 struct swapfile
*swf
= NULL
;
778 * Any swapfile structure ready for re-use?
781 lck_mtx_lock(&vm_swap_data_lock
);
783 swf
= (struct swapfile
*) queue_first(&swf_global_queue
);
785 while (queue_end(&swf_global_queue
, (queue_entry_t
)swf
) == FALSE
) {
786 if (swf
->swp_flags
== SWAP_REUSE
) {
787 swap_file_reuse
= TRUE
;
790 swf
= (struct swapfile
*) queue_next(&swf
->swp_queue
);
793 lck_mtx_unlock(&vm_swap_data_lock
);
795 if (swap_file_reuse
== FALSE
) {
797 if (strlen(swapfilename
) == 0) {
799 * If no swapfile name has been set, we'll
800 * use the default name.
802 * Also, this function is only called from the swapfile management thread.
803 * So we don't need to worry about a race in checking/setting the name here.
806 strlcpy(swapfilename
, SWAP_FILE_NAME
, MAX_SWAPFILENAME_LEN
);
809 namelen
= (int)strlen(swapfilename
) + SWAPFILENAME_INDEX_LEN
+ 1;
811 swf
= (struct swapfile
*) kalloc(sizeof *swf
);
812 memset(swf
, 0, sizeof(*swf
));
814 swf
->swp_index
= vm_num_swap_files
+ 1;
815 swf
->swp_pathlen
= namelen
;
816 swf
->swp_path
= (char*)kalloc(swf
->swp_pathlen
);
818 memset(swf
->swp_path
, 0, namelen
);
820 snprintf(swf
->swp_path
, namelen
, "%s%d", swapfilename
, vm_num_swap_files
);
823 vm_swapfile_open(swf
->swp_path
, &swf
->swp_vp
);
825 if (swf
->swp_vp
== NULL
) {
826 if (swap_file_reuse
== FALSE
) {
827 kfree(swf
->swp_path
, swf
->swp_pathlen
);
828 kfree(swf
, sizeof *swf
);
832 vm_swapfile_can_be_created
= TRUE
;
834 size
= MAX_SWAP_FILE_SIZE
;
836 while (size
>= MIN_SWAP_FILE_SIZE
) {
838 if (vm_swapfile_preallocate(swf
->swp_vp
, &size
) == 0) {
840 int num_bytes_for_bitmap
= 0;
842 swap_file_created
= TRUE
;
844 swf
->swp_size
= size
;
845 swf
->swp_nsegs
= (unsigned int) (size
/ COMPRESSED_SWAP_CHUNK_SIZE
);
846 swf
->swp_nseginuse
= 0;
847 swf
->swp_free_hint
= 0;
849 num_bytes_for_bitmap
= MAX((swf
->swp_nsegs
>> 3) , 1);
851 * Allocate a bitmap that describes the
852 * number of segments held by this swapfile.
854 swf
->swp_bitmap
= (uint8_t*)kalloc(num_bytes_for_bitmap
);
855 memset(swf
->swp_bitmap
, 0, num_bytes_for_bitmap
);
857 swf
->swp_csegs
= (c_segment_t
*) kalloc(swf
->swp_nsegs
* sizeof(c_segment_t
));
858 memset(swf
->swp_csegs
, 0, (swf
->swp_nsegs
* sizeof(c_segment_t
)));
861 * passing a NULL trim_list into vnode_trim_list
862 * will return ENOTSUP if trim isn't supported
865 if (vnode_trim_list(swf
->swp_vp
, NULL
, FALSE
) == 0)
866 swp_trim_supported
= TRUE
;
868 lck_mtx_lock(&vm_swap_data_lock
);
870 swf
->swp_flags
= SWAP_READY
;
872 if (swap_file_reuse
== FALSE
) {
873 queue_enter(&swf_global_queue
, swf
, struct swapfile
*, swp_queue
);
878 vm_swapfile_total_segs_alloced
+= swf
->swp_nsegs
;
880 lck_mtx_unlock(&vm_swap_data_lock
);
882 thread_wakeup((event_t
) &vm_num_swap_files
);
890 if (swap_file_created
== FALSE
) {
892 vm_swapfile_close((uint64_t)(swf
->swp_path
), swf
->swp_vp
);
896 if (swap_file_reuse
== FALSE
) {
897 kfree(swf
->swp_path
, swf
->swp_pathlen
);
898 kfree(swf
, sizeof *swf
);
901 return swap_file_created
;
906 vm_swap_get(vm_offset_t addr
, uint64_t f_offset
, uint64_t size
)
908 struct swapfile
*swf
= NULL
;
909 uint64_t file_offset
= 0;
916 lck_mtx_lock(&vm_swap_data_lock
);
918 swf
= vm_swapfile_for_handle(f_offset
);
920 if (swf
== NULL
|| ( !(swf
->swp_flags
& SWAP_READY
) && !(swf
->swp_flags
& SWAP_RECLAIM
))) {
926 lck_mtx_unlock(&vm_swap_data_lock
);
928 file_offset
= (f_offset
& SWAP_SLOT_MASK
);
929 retval
= vm_swapfile_io(swf
->swp_vp
, file_offset
, addr
, (int)(size
/ PAGE_SIZE_64
), SWAP_READ
);
932 VM_STAT_INCR_BY(swapins
, size
>> PAGE_SHIFT
);
934 vm_swap_get_failures
++;
937 * Free this slot in the swap structure.
939 vm_swap_free(f_offset
);
941 lck_mtx_lock(&vm_swap_data_lock
);
944 if ((swf
->swp_flags
& SWAP_WANTED
) && swf
->swp_io_count
== 0) {
946 swf
->swp_flags
&= ~SWAP_WANTED
;
947 thread_wakeup((event_t
) &swf
->swp_flags
);
950 lck_mtx_unlock(&vm_swap_data_lock
);
959 vm_swap_put(vm_offset_t addr
, uint64_t *f_offset
, uint64_t size
, c_segment_t c_seg
)
961 unsigned int segidx
= 0;
962 struct swapfile
*swf
= NULL
;
963 uint64_t file_offset
= 0;
964 uint64_t swapfile_index
= 0;
965 unsigned int byte_for_segidx
= 0;
966 unsigned int offset_within_byte
= 0;
967 boolean_t swf_eligible
= FALSE
;
968 boolean_t waiting
= FALSE
;
969 boolean_t retried
= FALSE
;
974 if (addr
== 0 || f_offset
== NULL
) {
978 lck_mtx_lock(&vm_swap_data_lock
);
980 swf
= (struct swapfile
*) queue_first(&swf_global_queue
);
982 while(queue_end(&swf_global_queue
, (queue_entry_t
)swf
) == FALSE
) {
984 segidx
= swf
->swp_free_hint
;
986 swf_eligible
= (swf
->swp_flags
& SWAP_READY
) && (swf
->swp_nseginuse
< swf
->swp_nsegs
);
990 while(segidx
< swf
->swp_nsegs
) {
992 byte_for_segidx
= segidx
>> 3;
993 offset_within_byte
= segidx
% 8;
995 if ((swf
->swp_bitmap
)[byte_for_segidx
] & (1 << offset_within_byte
)) {
1000 (swf
->swp_bitmap
)[byte_for_segidx
] |= (1 << offset_within_byte
);
1002 file_offset
= segidx
* COMPRESSED_SWAP_CHUNK_SIZE
;
1003 swf
->swp_nseginuse
++;
1004 swf
->swp_io_count
++;
1005 swapfile_index
= swf
->swp_index
;
1007 vm_swapfile_total_segs_used
++;
1009 clock_get_system_nanotime(&sec
, &nsec
);
1011 if (VM_SWAP_SHOULD_CREATE(sec
) && !vm_swapfile_create_thread_running
)
1012 thread_wakeup((event_t
) &vm_swapfile_create_needed
);
1014 lck_mtx_unlock(&vm_swap_data_lock
);
1019 swf
= (struct swapfile
*) queue_next(&swf
->swp_queue
);
1021 assert(queue_end(&swf_global_queue
, (queue_entry_t
) swf
));
1024 * we've run out of swap segments, but may not
1025 * be in a position to immediately create a new swap
1026 * file if we've recently failed to create due to a lack
1027 * of free space in the root filesystem... we'll try
1028 * to kick that create off, but in any event we're going
1029 * to take a breather (up to 1 second) so that we're not caught in a tight
1030 * loop back in "vm_compressor_compact_and_swap" trying to stuff
1031 * segments into swap files only to have them immediately put back
1032 * on the c_age queue due to vm_swap_put failing.
1034 * if we're doing these puts due to a hibernation flush,
1035 * no need to block... setting hibernate_no_swapspace to TRUE,
1036 * will cause "vm_compressor_compact_and_swap" to immediately abort
1038 clock_get_system_nanotime(&sec
, &nsec
);
1040 if (VM_SWAP_SHOULD_CREATE(sec
) && !vm_swapfile_create_thread_running
)
1041 thread_wakeup((event_t
) &vm_swapfile_create_needed
);
1043 if (hibernate_flushing
== FALSE
|| VM_SWAP_SHOULD_CREATE(sec
)) {
1045 assert_wait_timeout((event_t
) &vm_num_swap_files
, THREAD_INTERRUPTIBLE
, 1000, 1000*NSEC_PER_USEC
);
1047 hibernate_no_swapspace
= TRUE
;
1049 lck_mtx_unlock(&vm_swap_data_lock
);
1051 if (waiting
== TRUE
) {
1052 thread_block(THREAD_CONTINUE_NULL
);
1054 if (retried
== FALSE
&& hibernate_flushing
== TRUE
) {
1060 return KERN_FAILURE
;
1063 error
= vm_swapfile_io(swf
->swp_vp
, file_offset
, addr
, (int) (size
/ PAGE_SIZE_64
), SWAP_WRITE
);
1065 lck_mtx_lock(&vm_swap_data_lock
);
1067 swf
->swp_csegs
[segidx
] = c_seg
;
1069 swf
->swp_io_count
--;
1071 *f_offset
= (swapfile_index
<< SWAP_DEVICE_SHIFT
) | file_offset
;
1073 if ((swf
->swp_flags
& SWAP_WANTED
) && swf
->swp_io_count
== 0) {
1075 swf
->swp_flags
&= ~SWAP_WANTED
;
1076 thread_wakeup((event_t
) &swf
->swp_flags
);
1079 lck_mtx_unlock(&vm_swap_data_lock
);
1082 vm_swap_free(*f_offset
);
1084 return KERN_FAILURE
;
1086 return KERN_SUCCESS
;
1092 vm_swap_free_now(struct swapfile
*swf
, uint64_t f_offset
)
1094 uint64_t file_offset
= 0;
1095 unsigned int segidx
= 0;
1098 if ((swf
->swp_flags
& SWAP_READY
) || (swf
->swp_flags
& SWAP_RECLAIM
)) {
1100 unsigned int byte_for_segidx
= 0;
1101 unsigned int offset_within_byte
= 0;
1103 file_offset
= (f_offset
& SWAP_SLOT_MASK
);
1104 segidx
= (unsigned int) (file_offset
/ COMPRESSED_SWAP_CHUNK_SIZE
);
1106 byte_for_segidx
= segidx
>> 3;
1107 offset_within_byte
= segidx
% 8;
1109 if ((swf
->swp_bitmap
)[byte_for_segidx
] & (1 << offset_within_byte
)) {
1111 (swf
->swp_bitmap
)[byte_for_segidx
] &= ~(1 << offset_within_byte
);
1113 swf
->swp_csegs
[segidx
] = NULL
;
1115 swf
->swp_nseginuse
--;
1116 vm_swapfile_total_segs_used
--;
1118 if (segidx
< swf
->swp_free_hint
) {
1119 swf
->swp_free_hint
= segidx
;
1122 if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running
)
1123 thread_wakeup((event_t
) &vm_swapfile_gc_needed
);
1128 uint32_t vm_swap_free_now_count
= 0;
1129 uint32_t vm_swap_free_delayed_count
= 0;
1133 vm_swap_free(uint64_t f_offset
)
1135 struct swapfile
*swf
= NULL
;
1136 struct trim_list
*tl
= NULL
;
1140 if (swp_trim_supported
== TRUE
)
1141 tl
= kalloc(sizeof(struct trim_list
));
1143 lck_mtx_lock(&vm_swap_data_lock
);
1145 swf
= vm_swapfile_for_handle(f_offset
);
1147 if (swf
&& (swf
->swp_flags
& (SWAP_READY
| SWAP_RECLAIM
))) {
1149 if (swp_trim_supported
== FALSE
|| (swf
->swp_flags
& SWAP_RECLAIM
)) {
1151 * don't delay the free if the underlying disk doesn't support
1152 * trim, or we're in the midst of reclaiming this swap file since
1153 * we don't want to move segments that are technically free
1154 * but not yet handled by the delayed free mechanism
1156 vm_swap_free_now(swf
, f_offset
);
1158 vm_swap_free_now_count
++;
1161 tl
->tl_offset
= f_offset
& SWAP_SLOT_MASK
;
1162 tl
->tl_length
= COMPRESSED_SWAP_CHUNK_SIZE
;
1164 tl
->tl_next
= swf
->swp_delayed_trim_list_head
;
1165 swf
->swp_delayed_trim_list_head
= tl
;
1166 swf
->swp_delayed_trim_count
++;
1169 if (VM_SWAP_SHOULD_TRIM(swf
) && !vm_swapfile_create_thread_running
) {
1170 clock_get_system_nanotime(&sec
, &nsec
);
1172 if (sec
> dont_trim_until_ts
)
1173 thread_wakeup((event_t
) &vm_swapfile_create_needed
);
1175 vm_swap_free_delayed_count
++;
1178 lck_mtx_unlock(&vm_swap_data_lock
);
1181 kfree(tl
, sizeof(struct trim_list
));
1186 vm_swap_wait_on_trim_handling_in_progress()
1188 while (delayed_trim_handling_in_progress
== TRUE
) {
1190 assert_wait((event_t
) &delayed_trim_handling_in_progress
, THREAD_UNINT
);
1191 lck_mtx_unlock(&vm_swap_data_lock
);
1193 thread_block(THREAD_CONTINUE_NULL
);
1195 lck_mtx_lock(&vm_swap_data_lock
);
1201 vm_swap_handle_delayed_trims(boolean_t force_now
)
1203 struct swapfile
*swf
= NULL
;
1206 * serialize the race between us and vm_swap_reclaim...
1207 * if vm_swap_reclaim wins it will turn off SWAP_READY
1208 * on the victim it has chosen... we can just skip over
1209 * that file since vm_swap_reclaim will first process
1210 * all of the delayed trims associated with it
1212 lck_mtx_lock(&vm_swap_data_lock
);
1214 delayed_trim_handling_in_progress
= TRUE
;
1216 lck_mtx_unlock(&vm_swap_data_lock
);
1219 * no need to hold the lock to walk the swf list since
1220 * vm_swap_create (the only place where we add to this list)
1221 * is run on the same thread as this function
1222 * and vm_swap_reclaim doesn't remove items from this list
1223 * instead marking them with SWAP_REUSE for future re-use
1225 swf
= (struct swapfile
*) queue_first(&swf_global_queue
);
1227 while (queue_end(&swf_global_queue
, (queue_entry_t
)swf
) == FALSE
) {
1229 if ((swf
->swp_flags
& SWAP_READY
) && (force_now
== TRUE
|| VM_SWAP_SHOULD_TRIM(swf
))) {
1231 assert(!(swf
->swp_flags
& SWAP_RECLAIM
));
1232 vm_swap_do_delayed_trim(swf
);
1234 swf
= (struct swapfile
*) queue_next(&swf
->swp_queue
);
1236 lck_mtx_lock(&vm_swap_data_lock
);
1238 delayed_trim_handling_in_progress
= FALSE
;
1239 thread_wakeup((event_t
) &delayed_trim_handling_in_progress
);
1241 if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running
)
1242 thread_wakeup((event_t
) &vm_swapfile_gc_needed
);
1244 lck_mtx_unlock(&vm_swap_data_lock
);
1249 vm_swap_do_delayed_trim(struct swapfile
*swf
)
1251 struct trim_list
*tl
, *tl_head
;
1253 lck_mtx_lock(&vm_swap_data_lock
);
1255 tl_head
= swf
->swp_delayed_trim_list_head
;
1256 swf
->swp_delayed_trim_list_head
= NULL
;
1257 swf
->swp_delayed_trim_count
= 0;
1259 lck_mtx_unlock(&vm_swap_data_lock
);
1261 vnode_trim_list(swf
->swp_vp
, tl_head
, TRUE
);
1263 while ((tl
= tl_head
) != NULL
) {
1264 unsigned int segidx
= 0;
1265 unsigned int byte_for_segidx
= 0;
1266 unsigned int offset_within_byte
= 0;
1268 lck_mtx_lock(&vm_swap_data_lock
);
1270 segidx
= (unsigned int) (tl
->tl_offset
/ COMPRESSED_SWAP_CHUNK_SIZE
);
1272 byte_for_segidx
= segidx
>> 3;
1273 offset_within_byte
= segidx
% 8;
1275 if ((swf
->swp_bitmap
)[byte_for_segidx
] & (1 << offset_within_byte
)) {
1277 (swf
->swp_bitmap
)[byte_for_segidx
] &= ~(1 << offset_within_byte
);
1279 swf
->swp_csegs
[segidx
] = NULL
;
1281 swf
->swp_nseginuse
--;
1282 vm_swapfile_total_segs_used
--;
1284 if (segidx
< swf
->swp_free_hint
) {
1285 swf
->swp_free_hint
= segidx
;
1288 lck_mtx_unlock(&vm_swap_data_lock
);
1290 tl_head
= tl
->tl_next
;
1292 kfree(tl
, sizeof(struct trim_list
));
1303 int vm_swap_reclaim_yielded
= 0;
1306 vm_swap_reclaim(void)
1308 vm_offset_t addr
= 0;
1309 unsigned int segidx
= 0;
1310 uint64_t f_offset
= 0;
1311 struct swapfile
*swf
= NULL
;
1312 struct swapfile
*smallest_swf
= NULL
;
1313 unsigned int min_nsegs
= 0;
1314 unsigned int byte_for_segidx
= 0;
1315 unsigned int offset_within_byte
= 0;
1316 uint32_t c_size
= 0;
1318 c_segment_t c_seg
= NULL
;
1320 if (kernel_memory_allocate(kernel_map
, (vm_offset_t
*)(&addr
), C_SEG_BUFSIZE
, 0, KMA_KOBJECT
) != KERN_SUCCESS
) {
1321 panic("vm_swap_reclaim: kernel_memory_allocate failed\n");
1324 lck_mtx_lock(&vm_swap_data_lock
);
1327 * if we're running the swapfile list looking for
1328 * candidates with delayed trims, we need to
1329 * wait before making our decision concerning
1330 * the swapfile we want to reclaim
1332 vm_swap_wait_on_trim_handling_in_progress();
1335 * from here until we knock down the SWAP_READY bit,
1336 * we need to remain behind the vm_swap_data_lock...
1337 * once that bit has been turned off, "vm_swap_handle_delayed_trims"
1338 * will not consider this swapfile for processing
1340 swf
= (struct swapfile
*) queue_first(&swf_global_queue
);
1341 min_nsegs
= MAX_SWAP_FILE_SIZE
/ COMPRESSED_SWAP_CHUNK_SIZE
;
1342 smallest_swf
= NULL
;
1344 while (queue_end(&swf_global_queue
, (queue_entry_t
)swf
) == FALSE
) {
1346 if ((swf
->swp_flags
& SWAP_READY
) && (swf
->swp_nseginuse
<= min_nsegs
)) {
1349 min_nsegs
= swf
->swp_nseginuse
;
1351 swf
= (struct swapfile
*) queue_next(&swf
->swp_queue
);
1354 if (smallest_swf
== NULL
)
1360 swf
->swp_flags
&= ~SWAP_READY
;
1361 swf
->swp_flags
|= SWAP_RECLAIM
;
1363 if (swf
->swp_delayed_trim_count
) {
1365 lck_mtx_unlock(&vm_swap_data_lock
);
1367 vm_swap_do_delayed_trim(swf
);
1369 lck_mtx_lock(&vm_swap_data_lock
);
1373 while (segidx
< swf
->swp_nsegs
) {
1377 * Wait for outgoing I/Os.
1379 while (swf
->swp_io_count
) {
1381 swf
->swp_flags
|= SWAP_WANTED
;
1383 assert_wait((event_t
) &swf
->swp_flags
, THREAD_UNINT
);
1384 lck_mtx_unlock(&vm_swap_data_lock
);
1386 thread_block(THREAD_CONTINUE_NULL
);
1388 lck_mtx_lock(&vm_swap_data_lock
);
1390 if (compressor_store_stop_compaction
== TRUE
|| VM_SWAP_SHOULD_ABORT_RECLAIM() || VM_SWAP_BUSY()) {
1391 vm_swap_reclaim_yielded
++;
1395 byte_for_segidx
= segidx
>> 3;
1396 offset_within_byte
= segidx
% 8;
1398 if (((swf
->swp_bitmap
)[byte_for_segidx
] & (1 << offset_within_byte
)) == 0) {
1404 c_seg
= swf
->swp_csegs
[segidx
];
1406 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1408 assert(c_seg
->c_ondisk
);
1410 if (c_seg
->c_busy
) {
1412 c_seg
->c_wanted
= 1;
1414 assert_wait((event_t
) (c_seg
), THREAD_UNINT
);
1415 lck_mtx_unlock_always(&c_seg
->c_lock
);
1417 lck_mtx_unlock(&vm_swap_data_lock
);
1419 thread_block(THREAD_CONTINUE_NULL
);
1421 lck_mtx_lock(&vm_swap_data_lock
);
1423 goto ReTry_for_cseg
;
1425 (swf
->swp_bitmap
)[byte_for_segidx
] &= ~(1 << offset_within_byte
);
1427 f_offset
= segidx
* COMPRESSED_SWAP_CHUNK_SIZE
;
1429 swf
->swp_csegs
[segidx
] = NULL
;
1430 swf
->swp_nseginuse
--;
1432 vm_swapfile_total_segs_used
--;
1434 lck_mtx_unlock(&vm_swap_data_lock
);
1436 if (c_seg
->c_must_free
) {
1442 c_seg
->c_busy_swapping
= 1;
1443 #if !CHECKSUM_THE_SWAP
1444 c_seg_trim_tail(c_seg
);
1446 c_size
= round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg
->c_populated_offset
));
1448 assert(c_size
<= C_SEG_BUFSIZE
);
1450 lck_mtx_unlock_always(&c_seg
->c_lock
);
1452 if (vm_swapfile_io(swf
->swp_vp
, f_offset
, addr
, (int)(c_size
/ PAGE_SIZE_64
), SWAP_READ
)) {
1455 * reading the data back in failed, so convert c_seg
1456 * to a swapped in c_segment that contains no data
1458 c_seg
->c_store
.c_buffer
= (int32_t *)NULL
;
1459 c_seg_swapin_requeue(c_seg
);
1461 goto swap_io_failed
;
1463 VM_STAT_INCR_BY(swapins
, c_size
>> PAGE_SHIFT
);
1465 if (vm_swap_put(addr
, &f_offset
, c_size
, c_seg
)) {
1466 vm_offset_t c_buffer
;
1469 * the put failed, so convert c_seg to a fully swapped in c_segment
1472 if (kernel_memory_allocate(kernel_map
, &c_buffer
, C_SEG_ALLOCSIZE
, 0, KMA_COMPRESSOR
| KMA_VAONLY
) != KERN_SUCCESS
)
1473 panic("vm_swap_reclaim: kernel_memory_allocate failed\n");
1474 OSAddAtomic64(C_SEG_ALLOCSIZE
, &compressor_kvspace_used
);
1476 kernel_memory_populate(kernel_map
, c_buffer
, c_size
, KMA_COMPRESSOR
);
1478 memcpy((char *)c_buffer
, (char *)addr
, c_size
);
1480 c_seg
->c_store
.c_buffer
= (int32_t *)c_buffer
;
1482 vm_swap_decrypt(c_seg
);
1483 #endif /* ENCRYPTED_SWAP */
1484 c_seg_swapin_requeue(c_seg
);
1486 OSAddAtomic64(c_seg
->c_bytes_used
, &compressor_bytes_used
);
1488 goto swap_io_failed
;
1490 VM_STAT_INCR_BY(swapouts
, c_size
>> PAGE_SHIFT
);
1492 lck_mtx_lock_spin_always(&c_seg
->c_lock
);
1494 assert(c_seg
->c_ondisk
);
1496 * The c_seg will now know about the new location on disk.
1498 c_seg
->c_store
.c_swap_handle
= f_offset
;
1500 c_seg
->c_busy_swapping
= 0;
1502 if (c_seg
->c_must_free
)
1505 C_SEG_WAKEUP_DONE(c_seg
);
1507 lck_mtx_unlock_always(&c_seg
->c_lock
);
1510 lck_mtx_lock(&vm_swap_data_lock
);
1513 if (swf
->swp_nseginuse
) {
1515 swf
->swp_flags
&= ~SWAP_RECLAIM
;
1516 swf
->swp_flags
|= SWAP_READY
;
1521 * We don't remove this inactive swf from the queue.
1522 * That way, we can re-use it when needed again and
1523 * preserve the namespace. The delayed_trim processing
1524 * is also dependent on us not removing swfs from the queue.
1526 //queue_remove(&swf_global_queue, swf, struct swapfile*, swp_queue);
1528 vm_num_swap_files
--;
1530 vm_swapfile_total_segs_alloced
-= swf
->swp_nsegs
;
1532 lck_mtx_unlock(&vm_swap_data_lock
);
1534 vm_swapfile_close((uint64_t)(swf
->swp_path
), swf
->swp_vp
);
1536 kfree(swf
->swp_csegs
, swf
->swp_nsegs
* sizeof(c_segment_t
));
1537 kfree(swf
->swp_bitmap
, MAX((swf
->swp_nsegs
>> 3), 1));
1539 lck_mtx_lock(&vm_swap_data_lock
);
1543 swf
->swp_free_hint
= 0;
1545 swf
->swp_flags
= SWAP_REUSE
;
1548 thread_wakeup((event_t
) &swf
->swp_flags
);
1549 lck_mtx_unlock(&vm_swap_data_lock
);
1551 kmem_free(kernel_map
, (vm_offset_t
) addr
, C_SEG_BUFSIZE
);
1556 vm_swap_get_total_space(void)
1558 uint64_t total_space
= 0;
1560 total_space
= (uint64_t)vm_swapfile_total_segs_alloced
* COMPRESSED_SWAP_CHUNK_SIZE
;
1566 vm_swap_get_used_space(void)
1568 uint64_t used_space
= 0;
1570 used_space
= (uint64_t)vm_swapfile_total_segs_used
* COMPRESSED_SWAP_CHUNK_SIZE
;
1576 vm_swap_get_free_space(void)
1578 return (vm_swap_get_total_space() - vm_swap_get_used_space());
1583 vm_swap_low_on_space(void)
1586 if (vm_num_swap_files
== 0 && vm_swapfile_can_be_created
== FALSE
)
1589 if (((vm_swapfile_total_segs_alloced
- vm_swapfile_total_segs_used
) < ((unsigned int)VM_SWAPFILE_HIWATER_SEGS
) / 8)) {
1591 if (vm_num_swap_files
== 0 && !SWAPPER_NEEDS_TO_UNTHROTTLE())
1594 if (vm_swapfile_last_failed_to_create_ts
>= vm_swapfile_last_successful_create_ts
)