+
+void
+vm_compressor_transfer(
+ int *dst_slot_p,
+ int *src_slot_p)
+{
+ c_slot_mapping_t dst_slot, src_slot;
+ c_segment_t c_seg;
+ uint16_t c_indx;
+ c_slot_t cs;
+
+ src_slot = (c_slot_mapping_t) src_slot_p;
+
+ if (src_slot->s_cseg == C_SV_CSEG_ID) {
+ *dst_slot_p = *src_slot_p;
+ *src_slot_p = 0;
+ return;
+ }
+ dst_slot = (c_slot_mapping_t) dst_slot_p;
+Retry:
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+ /* get segment for src_slot */
+ c_seg = c_segments[src_slot->s_cseg - 1].c_seg;
+ /* lock segment */
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+ /* wait if it's busy */
+ if (c_seg->c_busy && !c_seg->c_busy_swapping) {
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+ c_seg_wait_on_busy(c_seg);
+ goto Retry;
+ }
+ /* find the c_slot */
+ c_indx = src_slot->s_cindx;
+ cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx);
+ /* point the c_slot back to dst_slot instead of src_slot */
+ C_SLOT_ASSERT_PACKABLE(dst_slot);
+ cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot);
+ /* transfer */
+ *dst_slot_p = *src_slot_p;
+ *src_slot_p = 0;
+ lck_mtx_unlock_always(&c_seg->c_lock);
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+}
+
+#if CONFIG_FREEZE
+
+int freezer_finished_filling = 0;
+
+void
+vm_compressor_finished_filling(
+ void **current_chead)
+{
+ c_segment_t c_seg;
+
+ if ((c_seg = *(c_segment_t *)current_chead) == NULL) {
+ return;
+ }
+
+ assert(c_seg->c_state == C_IS_FILLING);
+
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+
+ c_current_seg_filled(c_seg, (c_segment_t *)current_chead);
+
+ lck_mtx_unlock_always(&c_seg->c_lock);
+
+ freezer_finished_filling++;
+}
+
+
+/*
+ * This routine is used to transfer the compressed chunks from
+ * the c_seg/cindx pointed to by slot_p into a new c_seg headed
+ * by the current_chead and a new cindx within that c_seg.
+ *
+ * Currently, this routine is only used by the "freezer backed by
+ * compressor with swap" mode to create a series of c_segs that
+ * only contain compressed data belonging to one task. So, we
+ * move a task's previously compressed data into a set of new
+ * c_segs which will also hold the task's yet to be compressed data.
+ */
+
+kern_return_t
+vm_compressor_relocate(
+ void **current_chead,
+ int *slot_p)
+{
+ c_slot_mapping_t slot_ptr;
+ c_slot_mapping_t src_slot;
+ uint32_t c_rounded_size;
+ uint32_t c_size;
+ uint16_t dst_slot;
+ c_slot_t c_dst;
+ c_slot_t c_src;
+ uint16_t c_indx;
+ c_segment_t c_seg_dst = NULL;
+ c_segment_t c_seg_src = NULL;
+ kern_return_t kr = KERN_SUCCESS;
+
+
+ src_slot = (c_slot_mapping_t) slot_p;
+
+ if (src_slot->s_cseg == C_SV_CSEG_ID) {
+ /*
+ * no need to relocate... this is a page full of a single
+ * value which is hashed to a single entry not contained
+ * in a c_segment_t
+ */
+ return kr;
+ }
+
+Relookup_dst:
+ c_seg_dst = c_seg_allocate((c_segment_t *)current_chead);
+ /*
+ * returns with c_seg lock held
+ * and PAGE_REPLACEMENT_DISALLOWED(TRUE)...
+ * c_nextslot has been allocated and
+ * c_store.c_buffer populated
+ */
+ if (c_seg_dst == NULL) {
+ /*
+ * Out of compression segments?
+ */
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+
+ assert(c_seg_dst->c_busy == 0);
+
+ C_SEG_BUSY(c_seg_dst);
+
+ dst_slot = c_seg_dst->c_nextslot;
+
+ lck_mtx_unlock_always(&c_seg_dst->c_lock);
+
+Relookup_src:
+ c_seg_src = c_segments[src_slot->s_cseg - 1].c_seg;
+
+ assert(c_seg_dst != c_seg_src);
+
+ lck_mtx_lock_spin_always(&c_seg_src->c_lock);
+
+ if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) ||
+ c_seg_src->c_state == C_IS_FILLING) {
+ /*
+ * Skip this page if :-
+ * a) the src c_seg is already on-disk (or on its way there)
+ * A "thaw" can mark a process as eligible for
+ * another freeze cycle without bringing any of
+ * its swapped out c_segs back from disk (because
+ * that is done on-demand).
+ * Or, this page may be mapped elsewhere in the task's map,
+ * and we may have marked it for swap already.
+ *
+ * b) Or, the src c_seg is being filled by the compressor
+ * thread. We don't want the added latency of waiting for
+ * this c_seg in the freeze path and so we skip it.
+ */
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+
+ lck_mtx_unlock_always(&c_seg_src->c_lock);
+
+ c_seg_src = NULL;
+
+ goto out;
+ }
+
+ if (c_seg_src->c_busy) {
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+ c_seg_wait_on_busy(c_seg_src);
+
+ c_seg_src = NULL;
+
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+
+ goto Relookup_src;
+ }
+
+ C_SEG_BUSY(c_seg_src);
+
+ lck_mtx_unlock_always(&c_seg_src->c_lock);
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+
+ /* find the c_slot */
+ c_indx = src_slot->s_cindx;
+
+ c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, c_indx);
+
+ c_size = UNPACK_C_SIZE(c_src);
+
+ assert(c_size);
+
+ if (c_size > (uint32_t)(C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES((int32_t)c_seg_dst->c_nextoffset))) {
+ /*
+ * This segment is full. We need a new one.
+ */
+
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+
+ lck_mtx_lock_spin_always(&c_seg_src->c_lock);
+ C_SEG_WAKEUP_DONE(c_seg_src);
+ lck_mtx_unlock_always(&c_seg_src->c_lock);
+
+ c_seg_src = NULL;
+
+ lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
+
+ assert(c_seg_dst->c_busy);
+ assert(c_seg_dst->c_state == C_IS_FILLING);
+ assert(!c_seg_dst->c_on_minorcompact_q);
+
+ c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
+ assert(*current_chead == NULL);
+
+ C_SEG_WAKEUP_DONE(c_seg_dst);
+
+ lck_mtx_unlock_always(&c_seg_dst->c_lock);
+
+ c_seg_dst = NULL;
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+
+ goto Relookup_dst;
+ }
+
+ c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot);
+
+ memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size);
+ /*
+ * Is platform alignment actually necessary since wkdm aligns its output?
+ */
+ c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK;
+
+ cslot_copy(c_dst, c_src);
+ c_dst->c_offset = c_seg_dst->c_nextoffset;
+
+ if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) {
+ c_seg_dst->c_firstemptyslot++;
+ }
+
+ c_seg_dst->c_slots_used++;
+ c_seg_dst->c_nextslot++;
+ c_seg_dst->c_bytes_used += c_rounded_size;
+ c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size);
+
+
+ PACK_C_SIZE(c_src, 0);
+
+ c_seg_src->c_bytes_used -= c_rounded_size;
+ c_seg_src->c_bytes_unused += c_rounded_size;
+
+ assert(c_seg_src->c_slots_used);
+ c_seg_src->c_slots_used--;
+
+ if (c_indx < c_seg_src->c_firstemptyslot) {
+ c_seg_src->c_firstemptyslot = c_indx;
+ }
+
+ c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot);
+
+ PAGE_REPLACEMENT_ALLOWED(TRUE);
+ slot_ptr = C_SLOT_UNPACK_PTR(c_dst);
+ /* <csegno=0,indx=0> would mean "empty slot", so use csegno+1 */
+ slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1;
+ slot_ptr->s_cindx = dst_slot;
+
+ PAGE_REPLACEMENT_ALLOWED(FALSE);
+
+out:
+ if (c_seg_src) {
+ lck_mtx_lock_spin_always(&c_seg_src->c_lock);
+
+ C_SEG_WAKEUP_DONE(c_seg_src);
+
+ if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) {
+ if (!c_seg_src->c_on_minorcompact_q) {
+ c_seg_need_delayed_compaction(c_seg_src, FALSE);
+ }
+ }
+
+ lck_mtx_unlock_always(&c_seg_src->c_lock);
+ }
+
+ if (c_seg_dst) {
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+
+ lck_mtx_lock_spin_always(&c_seg_dst->c_lock);
+
+ if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) {
+ /*
+ * Nearing or exceeded maximum slot and offset capacity.
+ */
+ assert(c_seg_dst->c_busy);
+ assert(c_seg_dst->c_state == C_IS_FILLING);
+ assert(!c_seg_dst->c_on_minorcompact_q);
+
+ c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead);
+ assert(*current_chead == NULL);
+ }
+
+ C_SEG_WAKEUP_DONE(c_seg_dst);
+
+ lck_mtx_unlock_always(&c_seg_dst->c_lock);
+
+ c_seg_dst = NULL;
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
+ }
+
+ return kr;
+}
+#endif /* CONFIG_FREEZE */