+
+ vm_object_transpose_count++;
+
+ return retval;
+}
+
+
+/*
+ * vm_object_cluster_size
+ *
+ * Determine how big a cluster we should issue an I/O for...
+ *
+ * Inputs: *start == offset of page needed
+ * *length == maximum cluster pager can handle
+ * Outputs: *start == beginning offset of cluster
+ * *length == length of cluster to try
+ *
+ * The original *start will be encompassed by the cluster
+ *
+ */
+extern int speculative_reads_disabled;
+
+/*
+ * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
+ * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
+ * always be page-aligned. The derivation could involve operations (e.g. division)
+ * that could give us non-page-size aligned values if we start out with values that
+ * are odd multiples of PAGE_SIZE.
+ */
+#if CONFIG_EMBEDDED
+unsigned int preheat_max_bytes = (1024 * 512);
+#else /* CONFIG_EMBEDDED */
+unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
+#endif /* CONFIG_EMBEDDED */
+unsigned int preheat_min_bytes = (1024 * 32);
+
+
+__private_extern__ void
+vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
+ vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
+{
+ vm_size_t pre_heat_size;
+ vm_size_t tail_size;
+ vm_size_t head_size;
+ vm_size_t max_length;
+ vm_size_t cluster_size;
+ vm_object_offset_t object_size;
+ vm_object_offset_t orig_start;
+ vm_object_offset_t target_start;
+ vm_object_offset_t offset;
+ vm_behavior_t behavior;
+ boolean_t look_behind = TRUE;
+ boolean_t look_ahead = TRUE;
+ boolean_t isSSD = FALSE;
+ uint32_t throttle_limit;
+ int sequential_run;
+ int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ vm_size_t max_ph_size;
+ vm_size_t min_ph_size;
+
+ assert( !(*length & PAGE_MASK));
+ assert( !(*start & PAGE_MASK_64));
+
+ /*
+ * remember maxiumum length of run requested
+ */
+ max_length = *length;
+ /*
+ * we'll always return a cluster size of at least
+ * 1 page, since the original fault must always
+ * be processed
+ */
+ *length = PAGE_SIZE;
+ *io_streaming = 0;
+
+ if (speculative_reads_disabled || fault_info == NULL) {
+ /*
+ * no cluster... just fault the page in
+ */
+ return;
+ }
+ orig_start = *start;
+ target_start = orig_start;
+ cluster_size = round_page(fault_info->cluster_size);
+ behavior = fault_info->behavior;
+
+ vm_object_lock(object);
+
+ if (object->pager == MEMORY_OBJECT_NULL) {
+ goto out; /* pager is gone for this object, nothing more to do */
+ }
+ vnode_pager_get_isSSD(object->pager, &isSSD);
+
+ min_ph_size = round_page(preheat_min_bytes);
+ max_ph_size = round_page(preheat_max_bytes);
+
+#if !CONFIG_EMBEDDED
+ if (isSSD) {
+ min_ph_size /= 2;
+ max_ph_size /= 8;
+
+ if (min_ph_size & PAGE_MASK_64) {
+ min_ph_size = trunc_page(min_ph_size);
+ }
+
+ if (max_ph_size & PAGE_MASK_64) {
+ max_ph_size = trunc_page(max_ph_size);
+ }
+ }
+#endif /* !CONFIG_EMBEDDED */
+
+ if (min_ph_size < PAGE_SIZE) {
+ min_ph_size = PAGE_SIZE;
+ }
+
+ if (max_ph_size < PAGE_SIZE) {
+ max_ph_size = PAGE_SIZE;
+ } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
+ max_ph_size = MAX_UPL_TRANSFER_BYTES;
+ }
+
+ if (max_length > max_ph_size) {
+ max_length = max_ph_size;
+ }
+
+ if (max_length <= PAGE_SIZE) {
+ goto out;
+ }
+
+ if (object->internal) {
+ object_size = object->vo_size;
+ } else {
+ vnode_pager_get_object_size(object->pager, &object_size);
+ }
+
+ object_size = round_page_64(object_size);
+
+ if (orig_start >= object_size) {
+ /*
+ * fault occurred beyond the EOF...
+ * we need to punt w/o changing the
+ * starting offset
+ */
+ goto out;
+ }
+ if (object->pages_used > object->pages_created) {
+ /*
+ * must have wrapped our 32 bit counters
+ * so reset
+ */
+ object->pages_used = object->pages_created = 0;
+ }
+ if ((sequential_run = object->sequential)) {
+ if (sequential_run < 0) {
+ sequential_behavior = VM_BEHAVIOR_RSEQNTL;
+ sequential_run = 0 - sequential_run;
+ } else {
+ sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ }
+ }
+ switch (behavior) {
+ default:
+ behavior = VM_BEHAVIOR_DEFAULT;
+
+ case VM_BEHAVIOR_DEFAULT:
+ if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
+ goto out;
+ }
+
+ if (sequential_run >= (3 * PAGE_SIZE)) {
+ pre_heat_size = sequential_run + PAGE_SIZE;
+
+ if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
+ look_behind = FALSE;
+ } else {
+ look_ahead = FALSE;
+ }
+
+ *io_streaming = 1;
+ } else {
+ if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
+ /*
+ * prime the pump
+ */
+ pre_heat_size = min_ph_size;
+ } else {
+ /*
+ * Linear growth in PH size: The maximum size is max_length...
+ * this cacluation will result in a size that is neither a
+ * power of 2 nor a multiple of PAGE_SIZE... so round
+ * it up to the nearest PAGE_SIZE boundary
+ */
+ pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
+
+ if (pre_heat_size < min_ph_size) {
+ pre_heat_size = min_ph_size;
+ } else {
+ pre_heat_size = round_page(pre_heat_size);
+ }
+ }
+ }
+ break;
+
+ case VM_BEHAVIOR_RANDOM:
+ if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
+ goto out;
+ }
+ break;
+
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if ((pre_heat_size = cluster_size) == 0) {
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ }
+ look_behind = FALSE;
+ *io_streaming = 1;
+
+ break;
+
+ case VM_BEHAVIOR_RSEQNTL:
+ if ((pre_heat_size = cluster_size) == 0) {
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ }
+ look_ahead = FALSE;
+ *io_streaming = 1;
+
+ break;
+ }
+ throttle_limit = (uint32_t) max_length;
+ assert(throttle_limit == max_length);
+
+ if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
+ if (max_length > throttle_limit) {
+ max_length = throttle_limit;
+ }
+ }
+ if (pre_heat_size > max_length) {
+ pre_heat_size = max_length;
+ }
+
+ if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
+ unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
+
+ if (consider_free < vm_page_throttle_limit) {
+ pre_heat_size = trunc_page(pre_heat_size / 16);
+ } else if (consider_free < vm_page_free_target) {
+ pre_heat_size = trunc_page(pre_heat_size / 4);
+ }
+
+ if (pre_heat_size < min_ph_size) {
+ pre_heat_size = min_ph_size;
+ }
+ }
+ if (look_ahead == TRUE) {
+ if (look_behind == TRUE) {
+ /*
+ * if we get here its due to a random access...
+ * so we want to center the original fault address
+ * within the cluster we will issue... make sure
+ * to calculate 'head_size' as a multiple of PAGE_SIZE...
+ * 'pre_heat_size' is a multiple of PAGE_SIZE but not
+ * necessarily an even number of pages so we need to truncate
+ * the result to a PAGE_SIZE boundary
+ */
+ head_size = trunc_page(pre_heat_size / 2);
+
+ if (target_start > head_size) {
+ target_start -= head_size;
+ } else {
+ target_start = 0;
+ }
+
+ /*
+ * 'target_start' at this point represents the beginning offset
+ * of the cluster we are considering... 'orig_start' will be in
+ * the center of this cluster if we didn't have to clip the start
+ * due to running into the start of the file
+ */
+ }
+ if ((target_start + pre_heat_size) > object_size) {
+ pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
+ }
+ /*
+ * at this point caclulate the number of pages beyond the original fault
+ * address that we want to consider... this is guaranteed not to extend beyond
+ * the current EOF...
+ */
+ assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
+ tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
+ } else {
+ if (pre_heat_size > target_start) {
+ /*
+ * since pre_heat_size is always smaller then 2^32,
+ * if it is larger then target_start (a 64 bit value)
+ * it is safe to clip target_start to 32 bits
+ */
+ pre_heat_size = (vm_size_t) target_start;
+ }
+ tail_size = 0;
+ }
+ assert( !(target_start & PAGE_MASK_64));
+ assert( !(pre_heat_size & PAGE_MASK_64));
+
+ if (pre_heat_size <= PAGE_SIZE) {
+ goto out;
+ }
+
+ if (look_behind == TRUE) {
+ /*
+ * take a look at the pages before the original
+ * faulting offset... recalculate this in case
+ * we had to clip 'pre_heat_size' above to keep
+ * from running past the EOF.
+ */
+ head_size = pre_heat_size - tail_size - PAGE_SIZE;
+
+ for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
+ /*
+ * don't poke below the lowest offset
+ */
+ if (offset < fault_info->lo_offset) {
+ break;
+ }
+ /*
+ * for external objects or internal objects w/o a pager,
+ * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+ if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ break;
+ }
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *start = offset;
+ *length += PAGE_SIZE;
+ }
+ }
+ if (look_ahead == TRUE) {
+ for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
+ /*
+ * don't poke above the highest offset
+ */
+ if (offset >= fault_info->hi_offset) {
+ break;
+ }
+ assert(offset < object_size);
+
+ /*
+ * for external objects or internal objects w/o a pager,
+ * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+ if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ break;
+ }
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *length += PAGE_SIZE;
+ }
+ }
+out:
+ if (*length > max_length) {
+ *length = max_length;
+ }
+
+ vm_object_unlock(object);
+
+ DTRACE_VM1(clustersize, vm_size_t, *length);
+}
+
+
+/*
+ * Allow manipulation of individual page state. This is actually part of
+ * the UPL regimen but takes place on the VM object rather than on a UPL
+ */
+
+kern_return_t
+vm_object_page_op(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags)
+{
+ vm_page_t dst_page;
+
+ vm_object_lock(object);
+
+ if (ops & UPL_POP_PHYSICAL) {
+ if (object->phys_contiguous) {
+ if (phys_entry) {
+ *phys_entry = (ppnum_t)
+ (object->vo_shadow_offset >> PAGE_SHIFT);
+ }
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ } else {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+ }
+ if (object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ while (TRUE) {
+ if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ /* Sync up on getting the busy bit */
+ if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
+ (((ops & UPL_POP_SET) &&
+ (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
+ /* someone else is playing with the page, we will */
+ /* have to wait */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
+
+ if (ops & UPL_POP_DUMP) {
+ if (dst_page->vmp_pmapped == TRUE) {
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+ }
+
+ VM_PAGE_FREE(dst_page);
+ break;
+ }
+
+ if (flags) {
+ *flags = 0;
+
+ /* Get the condition of flags before requested ops */
+ /* are undertaken */
+
+ if (dst_page->vmp_dirty) {
+ *flags |= UPL_POP_DIRTY;
+ }
+ if (dst_page->vmp_free_when_done) {
+ *flags |= UPL_POP_PAGEOUT;
+ }
+ if (dst_page->vmp_precious) {
+ *flags |= UPL_POP_PRECIOUS;
+ }
+ if (dst_page->vmp_absent) {
+ *flags |= UPL_POP_ABSENT;
+ }
+ if (dst_page->vmp_busy) {
+ *flags |= UPL_POP_BUSY;
+ }
+ }
+
+ /* The caller should have made a call either contingent with */
+ /* or prior to this call to set UPL_POP_BUSY */
+ if (ops & UPL_POP_SET) {
+ /* The protection granted with this assert will */
+ /* not be complete. If the caller violates the */
+ /* convention and attempts to change page state */
+ /* without first setting busy we may not see it */
+ /* because the page may already be busy. However */
+ /* if such violations occur we will assert sooner */
+ /* or later. */
+ assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
+ if (ops & UPL_POP_DIRTY) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ }
+ if (ops & UPL_POP_PAGEOUT) {
+ dst_page->vmp_free_when_done = TRUE;
+ }
+ if (ops & UPL_POP_PRECIOUS) {
+ dst_page->vmp_precious = TRUE;
+ }
+ if (ops & UPL_POP_ABSENT) {
+ dst_page->vmp_absent = TRUE;
+ }
+ if (ops & UPL_POP_BUSY) {
+ dst_page->vmp_busy = TRUE;
+ }
+ }
+
+ if (ops & UPL_POP_CLR) {
+ assert(dst_page->vmp_busy);
+ if (ops & UPL_POP_DIRTY) {
+ dst_page->vmp_dirty = FALSE;
+ }
+ if (ops & UPL_POP_PAGEOUT) {
+ dst_page->vmp_free_when_done = FALSE;
+ }
+ if (ops & UPL_POP_PRECIOUS) {
+ dst_page->vmp_precious = FALSE;
+ }
+ if (ops & UPL_POP_ABSENT) {
+ dst_page->vmp_absent = FALSE;
+ }
+ if (ops & UPL_POP_BUSY) {
+ dst_page->vmp_busy = FALSE;
+ PAGE_WAKEUP(dst_page);
+ }
+ }
+ if (phys_entry) {
+ /*
+ * The physical page number will remain valid
+ * only if the page is kept busy.
+ */
+ assert(dst_page->vmp_busy);
+ *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
+ }
+
+ break;
+ }
+
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_object_range_op offers performance enhancement over
+ * vm_object_page_op for page_op functions which do not require page
+ * level state to be returned from the call. Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved. The range_op call establishes the ability in the _op
+ * family of functions to work on multiple pages where the lack of page level
+ * state handling allows the caller to avoid the overhead of the upl structures.
+ */
+
+kern_return_t
+vm_object_range_op(
+ vm_object_t object,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ uint32_t *range)
+{
+ vm_object_offset_t offset;
+ vm_page_t dst_page;
+
+ if (offset_end - offset_beg > (uint32_t) -1) {
+ /* range is too big and would overflow "*range" */
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (object->resident_page_count == 0) {
+ if (range) {
+ if (ops & UPL_ROP_PRESENT) {
+ *range = 0;
+ } else {
+ *range = (uint32_t) (offset_end - offset_beg);
+ assert(*range == (offset_end - offset_beg));
+ }
+ }
+ return KERN_SUCCESS;
+ }
+ vm_object_lock(object);
+
+ if (object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ offset = offset_beg & ~PAGE_MASK_64;
+
+ while (offset < offset_end) {
+ dst_page = vm_page_lookup(object, offset);
+ if (dst_page != VM_PAGE_NULL) {
+ if (ops & UPL_ROP_DUMP) {
+ if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
+ /*
+ * someone else is playing with the
+ * page, we will have to wait
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ /*
+ * need to relook the page up since it's
+ * state may have changed while we slept
+ * it might even belong to a different object
+ * at this point
+ */
+ continue;
+ }
+ if (dst_page->vmp_laundry) {
+ vm_pageout_steal_laundry(dst_page, FALSE);
+ }
+
+ if (dst_page->vmp_pmapped == TRUE) {
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
+ }
+
+ VM_PAGE_FREE(dst_page);
+ } else if ((ops & UPL_ROP_ABSENT)
+ && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
+ break;
+ }
+ } else if (ops & UPL_ROP_PRESENT) {
+ break;
+ }
+
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ if (range) {
+ if (offset > offset_end) {
+ offset = offset_end;
+ }
+ if (offset > offset_beg) {
+ *range = (uint32_t) (offset - offset_beg);
+ assert(*range == (offset - offset_beg));
+ } else {
+ *range = 0;
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+/*
+ * Used to point a pager directly to a range of memory (when the pager may be associated
+ * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
+ * expect that the virtual address will denote the start of a range that is physically contiguous.
+ */
+kern_return_t
+pager_map_to_phys_contiguous(
+ memory_object_control_t object,
+ memory_object_offset_t offset,
+ addr64_t base_vaddr,
+ vm_size_t size)
+{
+ ppnum_t page_num;
+ boolean_t clobbered_private;
+ kern_return_t retval;
+ vm_object_t pager_object;
+
+ page_num = pmap_find_phys(kernel_pmap, base_vaddr);
+
+ if (!page_num) {
+ retval = KERN_FAILURE;
+ goto out;
+ }
+
+ pager_object = memory_object_control_to_vm_object(object);
+
+ if (!pager_object) {
+ retval = KERN_FAILURE;
+ goto out;
+ }
+
+ clobbered_private = pager_object->private;
+ if (pager_object->private != TRUE) {
+ vm_object_lock(pager_object);
+ pager_object->private = TRUE;
+ vm_object_unlock(pager_object);
+ }
+ retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
+
+ if (retval != KERN_SUCCESS) {
+ if (pager_object->private != clobbered_private) {
+ vm_object_lock(pager_object);
+ pager_object->private = clobbered_private;
+ vm_object_unlock(pager_object);
+ }
+ }
+
+out:
+ return retval;
+}
+
+uint32_t scan_object_collision = 0;
+
+void
+vm_object_lock(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ DTRACE_VM(vm_object_lock_w);
+ lck_rw_lock_exclusive(&object->Lock);
+#if DEVELOPMENT || DEBUG
+ object->Lock_owner = current_thread();
+#endif
+}
+
+boolean_t
+vm_object_lock_avoid(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ return TRUE;
+ }
+ return FALSE;
+}
+
+boolean_t
+_vm_object_lock_try(vm_object_t object)
+{
+ boolean_t retval;
+
+ retval = lck_rw_try_lock_exclusive(&object->Lock);
+#if DEVELOPMENT || DEBUG
+ if (retval == TRUE) {
+ DTRACE_VM(vm_object_lock_w);
+ object->Lock_owner = current_thread();
+ }
+#endif
+ return retval;
+}
+
+boolean_t
+vm_object_lock_try(vm_object_t object)
+{
+ /*
+ * Called from hibernate path so check before blocking.
+ */
+ if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
+ mutex_pause(2);
+ }
+ return _vm_object_lock_try(object);
+}
+
+void
+vm_object_lock_shared(vm_object_t object)
+{
+ if (vm_object_lock_avoid(object)) {
+ mutex_pause(2);
+ }
+ DTRACE_VM(vm_object_lock_r);
+ lck_rw_lock_shared(&object->Lock);
+}
+
+boolean_t
+vm_object_lock_yield_shared(vm_object_t object)
+{
+ boolean_t retval = FALSE, force_yield = FALSE;;
+
+ vm_object_lock_assert_shared(object);
+
+ force_yield = vm_object_lock_avoid(object);
+
+ retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
+ if (retval) {
+ DTRACE_VM(vm_object_lock_yield);
+ }
+
+ return retval;
+}
+
+boolean_t
+vm_object_lock_try_shared(vm_object_t object)
+{
+ boolean_t retval;
+
+ if (vm_object_lock_avoid(object)) {
+ mutex_pause(2);
+ }
+ retval = lck_rw_try_lock_shared(&object->Lock);
+ if (retval) {
+ DTRACE_VM(vm_object_lock_r);
+ }
+ return retval;
+}
+
+boolean_t
+vm_object_lock_upgrade(vm_object_t object)
+{
+ boolean_t retval;
+
+ retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
+#if DEVELOPMENT || DEBUG
+ if (retval == TRUE) {
+ DTRACE_VM(vm_object_lock_w);
+ object->Lock_owner = current_thread();
+ }
+#endif
+ return retval;
+}
+
+void
+vm_object_unlock(vm_object_t object)
+{
+#if DEVELOPMENT || DEBUG
+ if (object->Lock_owner) {
+ if (object->Lock_owner != current_thread()) {
+ panic("vm_object_unlock: not owner - %p\n", object);
+ }
+ object->Lock_owner = 0;
+ DTRACE_VM(vm_object_unlock);
+ }
+#endif
+ lck_rw_done(&object->Lock);
+}
+
+
+unsigned int vm_object_change_wimg_mode_count = 0;
+
+/*
+ * The object must be locked
+ */
+void
+vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
+{
+ vm_page_t p;
+
+ vm_object_lock_assert_exclusive(object);
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+
+ vm_page_queue_iterate(&object->memq, p, vmp_listq) {
+ if (!p->vmp_fictitious) {
+ pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
+ }
+ }
+ if (wimg_mode == VM_WIMG_USE_DEFAULT) {
+ object->set_cache_attr = FALSE;
+ } else {
+ object->set_cache_attr = TRUE;
+ }
+
+ object->wimg_bits = wimg_mode;
+
+ vm_object_change_wimg_mode_count++;
+}
+
+#if CONFIG_FREEZE
+
+/*
+ * This routine does the "relocation" of previously
+ * compressed pages belonging to this object that are
+ * residing in a number of compressed segments into
+ * a set of compressed segments dedicated to hold
+ * compressed pages belonging to this object.
+ */
+
+extern void *freezer_chead;
+extern char *freezer_compressor_scratch_buf;
+extern int c_freezer_compression_count;
+extern AbsoluteTime c_freezer_last_yield_ts;
+
+#define MAX_FREE_BATCH 32
+#define FREEZER_DUTY_CYCLE_ON_MS 5
+#define FREEZER_DUTY_CYCLE_OFF_MS 5
+
+static int c_freezer_should_yield(void);
+
+
+static int
+c_freezer_should_yield()
+{
+ AbsoluteTime cur_time;
+ uint64_t nsecs;
+
+ assert(c_freezer_last_yield_ts);
+ clock_get_uptime(&cur_time);
+
+ SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
+ absolutetime_to_nanoseconds(cur_time, &nsecs);
+
+ if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
+ return 1;
+ }
+ return 0;
+}
+
+
+void
+vm_object_compressed_freezer_done()
+{
+ vm_compressor_finished_filling(&freezer_chead);
+}
+
+
+uint32_t
+vm_object_compressed_freezer_pageout(
+ vm_object_t object, uint32_t dirty_budget)
+{
+ vm_page_t p;
+ vm_page_t local_freeq = NULL;
+ int local_freed = 0;
+ kern_return_t retval = KERN_SUCCESS;
+ int obj_resident_page_count_snapshot = 0;
+ uint32_t paged_out_count = 0;
+
+ assert(object != VM_OBJECT_NULL);
+ assert(object->internal);
+
+ vm_object_lock(object);
+
+ if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
+ if (!object->pager_initialized) {
+ vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
+
+ if (!object->pager_initialized) {
+ vm_object_compressor_pager_create(object);
+ }
+ }
+
+ if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ return paged_out_count;
+ }
+ }
+
+ if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
+ vm_object_offset_t curr_offset = 0;
+
+ /*
+ * Go through the object and make sure that any
+ * previously compressed pages are relocated into
+ * a compressed segment associated with our "freezer_chead".
+ */
+ while (curr_offset < object->vo_size) {
+ curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
+
+ if (curr_offset == (vm_object_offset_t) -1) {
+ break;
+ }
+
+ retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead);
+
+ if (retval != KERN_SUCCESS) {
+ break;
+ }
+
+ curr_offset += PAGE_SIZE_64;
+ }
+ }
+
+ /*
+ * We can't hold the object lock while heading down into the compressed pager
+ * layer because we might need the kernel map lock down there to allocate new
+ * compressor data structures. And if this same object is mapped in the kernel
+ * and there's a fault on it, then that thread will want the object lock while
+ * holding the kernel map lock.
+ *
+ * Since we are going to drop/grab the object lock repeatedly, we must make sure
+ * we won't be stuck in an infinite loop if the same page(s) keep getting
+ * decompressed. So we grab a snapshot of the number of pages in the object and
+ * we won't process any more than that number of pages.
+ */
+
+ obj_resident_page_count_snapshot = object->resident_page_count;
+
+ vm_object_activity_begin(object);
+
+ while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
+ p = (vm_page_t)vm_page_queue_first(&object->memq);
+
+ KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
+
+ vm_page_lockspin_queues();
+
+ if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) {
+ vm_page_unlock_queues();
+
+ KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
+
+ vm_page_queue_remove(&object->memq, p, vmp_listq);
+ vm_page_queue_enter(&object->memq, p, vmp_listq);
+
+ continue;
+ }
+
+ if (p->vmp_pmapped == TRUE) {
+ int refmod_state, pmap_flags;
+
+ if (p->vmp_dirty || p->vmp_precious) {
+ pmap_flags = PMAP_OPTIONS_COMPRESSOR;
+ } else {
+ pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ }
+
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(p, FALSE);
+ }
+ }
+
+ if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
+ /*
+ * Clean and non-precious page.
+ */
+ vm_page_unlock_queues();
+ VM_PAGE_FREE(p);
+
+ KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
+ continue;
+ }
+
+ if (p->vmp_laundry) {
+ vm_pageout_steal_laundry(p, TRUE);
+ }
+
+ vm_page_queues_remove(p, TRUE);
+
+ vm_page_unlock_queues();
+
+
+ /*
+ * In case the compressor fails to compress this page, we need it at
+ * the back of the object memq so that we don't keep trying to process it.
+ * Make the move here while we have the object lock held.
+ */
+
+ vm_page_queue_remove(&object->memq, p, vmp_listq);
+ vm_page_queue_enter(&object->memq, p, vmp_listq);
+
+ /*
+ * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
+ *
+ * Mark the page busy so no one messes with it while we have the object lock dropped.
+ */
+ p->vmp_busy = TRUE;
+
+ vm_object_activity_begin(object);
+
+ vm_object_unlock(object);
+
+ if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p) == KERN_SUCCESS) {
+ /*
+ * page has already been un-tabled from the object via 'vm_page_remove'
+ */
+ p->vmp_snext = local_freeq;
+ local_freeq = p;
+ local_freed++;
+ paged_out_count++;
+
+ if (local_freed >= MAX_FREE_BATCH) {
+ OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ c_freezer_compression_count++;
+ }
+ KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
+
+ if (local_freed == 0 && c_freezer_should_yield()) {
+ thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
+ clock_get_uptime(&c_freezer_last_yield_ts);
+ }
+
+ vm_object_lock(object);
+ }
+
+ if (local_freeq) {
+ OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+
+ vm_object_activity_end(object);
+
+ vm_object_unlock(object);
+
+ if (c_freezer_should_yield()) {
+ thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
+ clock_get_uptime(&c_freezer_last_yield_ts);
+ }
+ return paged_out_count;
+}
+
+#endif /* CONFIG_FREEZE */
+
+
+void
+vm_object_pageout(
+ vm_object_t object)
+{
+ vm_page_t p, next;
+ struct vm_pageout_queue *iq;
+
+ if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+ return;
+ }
+
+ iq = &vm_pageout_queue_internal;
+
+ assert(object != VM_OBJECT_NULL );
+
+ vm_object_lock(object);
+
+ if (!object->internal ||
+ object->terminating ||
+ !object->alive) {
+ vm_object_unlock(object);
+ return;
+ }
+
+ if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
+ if (!object->pager_initialized) {
+ vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
+
+ if (!object->pager_initialized) {
+ vm_object_compressor_pager_create(object);
+ }
+ }
+
+ if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ return;
+ }
+ }
+
+ReScan:
+ next = (vm_page_t)vm_page_queue_first(&object->memq);
+
+ while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
+
+ assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
+
+ if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
+ p->vmp_cleaning ||
+ p->vmp_laundry ||
+ p->vmp_busy ||
+ p->vmp_absent ||
+ p->vmp_error ||
+ p->vmp_fictitious ||
+ VM_PAGE_WIRED(p)) {
+ /*
+ * Page is already being cleaned or can't be cleaned.
+ */
+ continue;
+ }
+ if (vm_compressor_low_on_space()) {
+ break;
+ }
+
+ /* Throw to the pageout queue */
+
+ vm_page_lockspin_queues();
+
+ if (VM_PAGE_Q_THROTTLED(iq)) {
+ iq->pgo_draining = TRUE;
+
+ assert_wait((event_t) (&iq->pgo_laundry + 1),
+ THREAD_INTERRUPTIBLE);
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ vm_object_lock(object);
+ goto ReScan;
+ }
+
+ assert(!p->vmp_fictitious);
+ assert(!p->vmp_busy);
+ assert(!p->vmp_absent);
+ assert(!p->vmp_unusual);
+ assert(!p->vmp_error);
+ assert(!VM_PAGE_WIRED(p));
+ assert(!p->vmp_cleaning);
+
+ if (p->vmp_pmapped == TRUE) {
+ int refmod_state;
+ int pmap_options;
+
+ /*
+ * Tell pmap the page should be accounted
+ * for as "compressed" if it's been modified.
+ */
+ pmap_options =
+ PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ if (p->vmp_dirty || p->vmp_precious) {
+ /*
+ * We already know it's been modified,
+ * so tell pmap to account for it
+ * as "compressed".
+ */
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
+ }
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
+ pmap_options,
+ NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(p, FALSE);
+ }
+ }
+
+ if (!p->vmp_dirty && !p->vmp_precious) {
+ vm_page_unlock_queues();
+ VM_PAGE_FREE(p);
+ continue;
+ }
+ vm_page_queues_remove(p, TRUE);
+
+ vm_pageout_cluster(p);
+
+ vm_page_unlock_queues();
+ }
+ vm_object_unlock(object);
+}
+
+
+#if CONFIG_IOSCHED
+void
+vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
+{
+ io_reprioritize_req_t req;
+ struct vnode *devvp = NULL;
+
+ if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
+ return;
+ }
+
+ /*
+ * Create the request for I/O reprioritization.
+ * We use the noblock variant of zalloc because we're holding the object
+ * lock here and we could cause a deadlock in low memory conditions.
+ */
+ req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
+ if (req == NULL) {
+ return;
+ }
+ req->blkno = blkno;
+ req->len = len;
+ req->priority = prio;
+ req->devvp = devvp;
+
+ /* Insert request into the reprioritization list */
+ IO_REPRIORITIZE_LIST_LOCK();
+ queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
+ IO_REPRIORITIZE_LIST_UNLOCK();
+
+ /* Wakeup reprioritize thread */
+ IO_REPRIO_THREAD_WAKEUP();
+
+ return;
+}
+
+void
+vm_decmp_upl_reprioritize(upl_t upl, int prio)
+{
+ int offset;
+ vm_object_t object;
+ io_reprioritize_req_t req;
+ struct vnode *devvp = NULL;
+ uint64_t blkno;
+ uint32_t len;
+ upl_t io_upl;
+ uint64_t *io_upl_reprio_info;
+ int io_upl_size;
+
+ if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
+ return;