+
+void
+vm_swapout_iodone(void *io_context, int error)
+{
+ struct swapout_io_completion *soc;
+
+ soc = (struct swapout_io_completion *)io_context;
+
+ lck_mtx_lock_spin_always(c_list_lock);
+
+ soc->swp_io_done = 1;
+ soc->swp_io_error = error;
+ vm_swapout_soc_done++;
+
+ if (!vm_swapout_thread_running) {
+ thread_wakeup((event_t)&c_swapout_list_head);
+ }
+
+ lck_mtx_unlock_always(c_list_lock);
+}
+
+
+static void
+vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_return_t kr)
+{
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+
+ if (kr == KERN_SUCCESS) {
+ kernel_memory_depopulate(compressor_map, (vm_offset_t)c_seg->c_store.c_buffer, size,
+ KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR);
+ }
+#if ENCRYPTED_SWAP
+ else {
+ vm_swap_decrypt(c_seg);
+ }
+#endif /* ENCRYPTED_SWAP */
+ lck_mtx_lock_spin_always(c_list_lock);
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+
+ if (kr == KERN_SUCCESS) {
+ int new_state = C_ON_SWAPPEDOUT_Q;
+ boolean_t insert_head = FALSE;
+
+ if (hibernate_flushing == TRUE) {
+ if (c_seg->c_generation_id >= first_c_segment_to_warm_generation_id &&
+ c_seg->c_generation_id <= last_c_segment_to_warm_generation_id) {
+ insert_head = TRUE;
+ }
+ } else if (C_SEG_ONDISK_IS_SPARSE(c_seg)) {
+ new_state = C_ON_SWAPPEDOUTSPARSE_Q;
+ }
+
+ c_seg_switch_state(c_seg, new_state, insert_head);
+
+ c_seg->c_store.c_swap_handle = f_offset;
+
+ counter_add(&vm_statistics_swapouts, size >> PAGE_SHIFT);
+
+ if (c_seg->c_bytes_used) {
+ OSAddAtomic64(-c_seg->c_bytes_used, &compressor_bytes_used);
+ }
+
+#if CONFIG_FREEZE
+ /*
+ * Successful swapout. Decrement the in-core compressed pages count.
+ */
+ OSAddAtomic(-(c_seg->c_slots_used), &c_segment_pages_compressed_incore);
+ assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore);
+#endif /* CONFIG_FREEZE */
+ } else {
+ if (c_seg->c_overage_swap == TRUE) {
+ c_seg->c_overage_swap = FALSE;
+ c_overage_swapped_count--;
+ }
+
+#if CONFIG_FREEZE
+ if (c_seg->c_task_owner) {
+ c_seg_update_task_owner(c_seg, NULL);
+ }
+#endif /* CONFIG_FREEZE */
+
+ c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE);
+
+ if (!c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) {
+ c_seg_need_delayed_compaction(c_seg, TRUE);
+ }
+ }
+ assert(c_seg->c_busy_swapping);
+ assert(c_seg->c_busy);
+
+ c_seg->c_busy_swapping = 0;
+ lck_mtx_unlock_always(c_list_lock);
+
+ C_SEG_WAKEUP_DONE(c_seg);
+ lck_mtx_unlock_always(&c_seg->c_lock);
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+}
+
+