+int vm_default_ahead = 0;
+int vm_default_behind = MAX_UPL_TRANSFER;
+
+#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
+
+/*
+ * vm_page_is_sequential
+ *
+ * Determine if sequential access is in progress
+ * in accordance with the behavior specified.
+ * Update state to indicate current access pattern.
+ *
+ * object must have at least the shared lock held
+ */
+static
+void
+vm_fault_is_sequential(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_behavior_t behavior)
+{
+ vm_object_offset_t last_alloc;
+ int sequential;
+ int orig_sequential;
+
+ last_alloc = object->last_alloc;
+ sequential = object->sequential;
+ orig_sequential = sequential;
+
+ switch (behavior) {
+ case VM_BEHAVIOR_RANDOM:
+ /*
+ * reset indicator of sequential behavior
+ */
+ sequential = 0;
+ break;
+
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if (offset && last_alloc == offset - PAGE_SIZE_64) {
+ /*
+ * advance indicator of sequential behavior
+ */
+ if (sequential < MAX_SEQUENTIAL_RUN)
+ sequential += PAGE_SIZE;
+ } else {
+ /*
+ * reset indicator of sequential behavior
+ */
+ sequential = 0;
+ }
+ break;
+
+ case VM_BEHAVIOR_RSEQNTL:
+ if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
+ /*
+ * advance indicator of sequential behavior
+ */
+ if (sequential > -MAX_SEQUENTIAL_RUN)
+ sequential -= PAGE_SIZE;
+ } else {
+ /*
+ * reset indicator of sequential behavior
+ */
+ sequential = 0;
+ }
+ break;
+
+ case VM_BEHAVIOR_DEFAULT:
+ default:
+ if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
+ /*
+ * advance indicator of sequential behavior
+ */
+ if (sequential < 0)
+ sequential = 0;
+ if (sequential < MAX_SEQUENTIAL_RUN)
+ sequential += PAGE_SIZE;
+
+ } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
+ /*
+ * advance indicator of sequential behavior
+ */
+ if (sequential > 0)
+ sequential = 0;
+ if (sequential > -MAX_SEQUENTIAL_RUN)
+ sequential -= PAGE_SIZE;
+ } else {
+ /*
+ * reset indicator of sequential behavior
+ */
+ sequential = 0;
+ }
+ break;
+ }
+ if (sequential != orig_sequential) {
+ if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
+ /*
+ * if someone else has already updated object->sequential
+ * don't bother trying to update it or object->last_alloc
+ */
+ return;
+ }
+ }
+ /*
+ * I'd like to do this with a OSCompareAndSwap64, but that
+ * doesn't exist for PPC... however, it shouldn't matter
+ * that much... last_alloc is maintained so that we can determine
+ * if a sequential access pattern is taking place... if only
+ * one thread is banging on this object, no problem with the unprotected
+ * update... if 2 or more threads are banging away, we run the risk of
+ * someone seeing a mangled update... however, in the face of multiple
+ * accesses, no sequential access pattern can develop anyway, so we
+ * haven't lost any real info.
+ */
+ object->last_alloc = offset;
+}
+
+
+/*
+ * vm_page_deactivate_behind
+ *
+ * Determine if sequential access is in progress
+ * in accordance with the behavior specified. If
+ * so, compute a potential page to deactivate and
+ * deactivate it.
+ *
+ * object must be locked.
+ *
+ * return TRUE if we actually deactivate a page
+ */
+static
+boolean_t
+vm_fault_deactivate_behind(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_behavior_t behavior)
+{
+ vm_page_t m = NULL;
+ int sequential_run;
+ int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
+#endif
+
+ if (object == kernel_object || vm_page_deactivate_behind == FALSE) {
+ /*
+ * Do not deactivate pages from the kernel object: they
+ * are not intended to become pageable.
+ * or we've disabled the deactivate behind mechanism
+ */
+ return FALSE;
+ }
+ if ((sequential_run = object->sequential)) {
+ if (sequential_run < 0) {
+ sequential_behavior = VM_BEHAVIOR_RSEQNTL;
+ sequential_run = 0 - sequential_run;
+ } else {
+ sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ }
+ }
+ switch (behavior) {
+ case VM_BEHAVIOR_RANDOM:
+ break;
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if (sequential_run >= (int)PAGE_SIZE)
+ m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+ break;
+ case VM_BEHAVIOR_RSEQNTL:
+ if (sequential_run >= (int)PAGE_SIZE)
+ m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+ break;
+ case VM_BEHAVIOR_DEFAULT:
+ default:
+ { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+ /*
+ * determine if the run of sequential accesss has been
+ * long enough on an object with default access behavior
+ * to consider it for deactivation
+ */
+ if ((uint64_t)sequential_run >= behind) {
+ if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
+ if (offset >= behind)
+ m = vm_page_lookup(object, offset - behind);
+ } else {
+ if (offset < -behind)
+ m = vm_page_lookup(object, offset + behind);
+ }
+ }
+ break;
+ }
+ }
+ if (m) {
+ if (!m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
+ pmap_clear_reference(m->phys_page);
+ m->deactivated = TRUE;
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
+#endif
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+
+/*
+ * check for various conditions that would
+ * prevent us from creating a ZF page...
+ * cleanup is based on being called from vm_fault_page
+ *
+ * object must be locked
+ * object == m->object
+ */
+static vm_fault_return_t
+vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state)
+{
+ if (object->shadow_severed) {
+ /*
+ * the shadow chain was severed
+ * just have to return an error at this point
+ */
+ if (m != VM_PAGE_NULL)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+
+ thread_interrupt_level(interruptible_state);
+
+ return (VM_FAULT_MEMORY_ERROR);
+ }
+ if (vm_backing_store_low) {
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+ if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) {
+
+ if (m != VM_PAGE_NULL)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+
+ assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT);
+
+ thread_block(THREAD_CONTINUE_NULL);
+ thread_interrupt_level(interruptible_state);
+
+ return (VM_FAULT_RETRY);
+ }
+ }
+ if (VM_PAGE_ZFILL_THROTTLED()) {
+ /*
+ * we're throttling zero-fills...
+ * treat this as if we couldn't grab a page
+ */
+ if (m != VM_PAGE_NULL)
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, first_m);
+
+ thread_interrupt_level(interruptible_state);
+
+ return (VM_FAULT_MEMORY_SHORTAGE);
+ }
+ return (VM_FAULT_SUCCESS);
+}
+
+
+/*
+ * do the work to zero fill a page and
+ * inject it into the correct paging queue
+ *
+ * m->object must be locked
+ * page queue lock must NOT be held
+ */
+static int
+vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
+{
+ int my_fault = DBG_ZERO_FILL_FAULT;
+
+ /*
+ * This is is a zero-fill page fault...
+ *
+ * Checking the page lock is a waste of
+ * time; this page was absent, so
+ * it can't be page locked by a pager.
+ *
+ * we also consider it undefined
+ * with respect to instruction
+ * execution. i.e. it is the responsibility
+ * of higher layers to call for an instruction
+ * sync after changing the contents and before
+ * sending a program into this area. We
+ * choose this approach for performance
+ */
+ m->pmapped = TRUE;
+
+ m->cs_validated = FALSE;
+ m->cs_tainted = FALSE;
+
+ if (no_zero_fill == TRUE)
+ my_fault = DBG_NZF_PAGE_FAULT;
+ else {
+ vm_page_zero_fill(m);
+
+ VM_STAT_INCR(zero_fill_count);
+ DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
+ }
+ assert(!m->laundry);
+ assert(m->object != kernel_object);
+ //assert(m->pageq.next == NULL && m->pageq.prev == NULL);
+
+ if (!IP_VALID(memory_manager_default) &&
+ (m->object->purgable == VM_PURGABLE_DENY ||
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE )) {
+ vm_page_lock_queues();
+
+ queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+ m->throttled = TRUE;
+ vm_page_throttled_count++;
+
+ vm_page_unlock_queues();
+ } else {
+ if (m->object->size > VM_ZF_OBJECT_SIZE_THRESHOLD) {
+ m->zero_fill = TRUE;
+ OSAddAtomic(1, (SInt32 *)&vm_zf_count);
+ }
+ }
+ return (my_fault);
+}