]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.c
index e94ac06f4fc3690743d7b8a4b92d20371a3823f3..c83ae023c154609e9cbc14e08e26bc1f70f2573f 100644 (file)
@@ -1,21 +1,24 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -58,6 +61,7 @@
 #ifdef MACH_BSD
 /* remove after component interface available */
 extern int     vnode_pager_workaround;
+extern int     device_pager_workaround;
 #endif
 
 #include <mach_cluster_stats.h>
@@ -74,9 +78,12 @@ extern int   vnode_pager_workaround;
 #include <kern/sched_prim.h>
 #include <kern/host.h>
 #include <kern/xpr.h>
+#include <ppc/proc_reg.h>
+#include <vm/task_working_set.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_kern.h>
 #include <vm/pmap.h>
 #include <vm/vm_pageout.h>
 #include <mach/vm_param.h>
@@ -98,9 +105,6 @@ extern int   vnode_pager_workaround;
 int            vm_object_absent_max = 50;
 
 int            vm_fault_debug = 0;
-boolean_t      vm_page_deactivate_behind = TRUE;
-
-vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC;
 
 #if    !VM_FAULT_STATIC_CONFIG
 boolean_t      vm_fault_dirty_handling = FALSE;
@@ -117,7 +121,8 @@ extern kern_return_t vm_fault_wire_fast(
                                vm_map_t        map,
                                vm_offset_t     va,
                                vm_map_entry_t  entry,
-                               pmap_t          pmap);
+                               pmap_t          pmap,
+                               vm_offset_t     pmap_addr);
 
 extern void vm_fault_continue(void);
 
@@ -199,13 +204,107 @@ struct {
 boolean_t vm_allow_clustered_pagein = FALSE;
 int vm_pagein_cluster_used = 0;
 
+#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
+
+
+boolean_t      vm_page_deactivate_behind = TRUE;
 /* 
  * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior 
  */
-int vm_default_ahead = 1;      /* Number of pages to prepage ahead */
-int vm_default_behind = 0;     /* Number of pages to prepage behind */
+int vm_default_ahead = 0;
+int vm_default_behind = MAX_UPL_TRANSFER;
+
+/*
+ *     vm_page_deactivate_behind
+ *
+ *     Determine if sequential access is in progress
+ *     in accordance with the behavior specified.  If
+ *     so, compute a potential page to deactive and
+ *     deactivate it.
+ *
+ *     The object must be locked.
+ */
+static
+boolean_t
+vm_fault_deactivate_behind(
+       vm_object_t object,
+       vm_offset_t offset,
+       vm_behavior_t behavior)
+{
+       vm_page_t m;
+
+#if TRACEFAULTPAGE
+       dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
+#endif
+
+       switch (behavior) {
+       case VM_BEHAVIOR_RANDOM:
+               object->sequential = PAGE_SIZE_64;
+               m = VM_PAGE_NULL;
+               break;
+       case VM_BEHAVIOR_SEQUENTIAL:
+               if (offset &&
+                       object->last_alloc == offset - PAGE_SIZE_64) {
+                       object->sequential += PAGE_SIZE_64;
+                       m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+               } else {
+                       object->sequential = PAGE_SIZE_64; /* reset */
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       case VM_BEHAVIOR_RSEQNTL:
+               if (object->last_alloc &&
+                       object->last_alloc == offset + PAGE_SIZE_64) {
+                       object->sequential += PAGE_SIZE_64;
+                       m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+               } else {
+                       object->sequential = PAGE_SIZE_64; /* reset */
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       case VM_BEHAVIOR_DEFAULT:
+       default:
+               if (offset && 
+                       object->last_alloc == offset - PAGE_SIZE_64) {
+                       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+                       object->sequential += PAGE_SIZE_64;
+                       m = (offset >= behind &&
+                               object->sequential >= behind) ?
+                               vm_page_lookup(object, offset - behind) :
+                               VM_PAGE_NULL;
+               } else if (object->last_alloc &&
+                       object->last_alloc == offset + PAGE_SIZE_64) {
+                       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+                       object->sequential += PAGE_SIZE_64;
+                       m = (offset < -behind &&
+                               object->sequential >= behind) ?
+                               vm_page_lookup(object, offset + behind) :
+                               VM_PAGE_NULL;
+               } else {
+                       object->sequential = PAGE_SIZE_64;
+                       m = VM_PAGE_NULL;
+               }
+               break;
+       }
+
+       object->last_alloc = offset;
+
+       if (m) {
+               if (!m->busy) {
+                       vm_page_lock_queues();
+                       vm_page_deactivate(m);
+                       vm_page_unlock_queues();
+#if TRACEFAULTPAGE
+                       dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
+#endif
+               }
+               return TRUE;
+       }
+       return FALSE;
+}
 
-#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
 
 /*
  *     Routine:        vm_fault_page
@@ -274,9 +373,11 @@ vm_fault_page(
        /* More arguments: */
        kern_return_t   *error_code,    /* code if page is in error */
        boolean_t       no_zero_fill,   /* don't zero fill absent pages */
-       boolean_t       data_supply)    /* treat as data_supply if 
+       boolean_t       data_supply,    /* treat as data_supply if 
                                         * it is a write fault and a full
                                         * page is provided */
+       vm_map_t        map,
+       vm_offset_t     vaddr)
 {
        register
        vm_page_t               m;
@@ -297,13 +398,9 @@ vm_fault_page(
        CLUSTER_STAT(int pages_at_higher_offsets;)
        CLUSTER_STAT(int pages_at_lower_offsets;)
        kern_return_t   wait_result;
-       thread_t                cur_thread;
        boolean_t               interruptible_state;
+       boolean_t               bumped_pagein = FALSE;
 
-#ifdef MACH_BSD
-       kern_return_t   vnode_pager_data_request(ipc_port_t, 
-                       ipc_port_t, vm_object_offset_t, vm_size_t, vm_prot_t);
-#endif
 
 #if    MACH_PAGEMAP
 /*
@@ -428,11 +525,7 @@ vm_fault_page(
 #endif /* MACH_KDB */
 #endif /* STATIC_CONFIG */
 
-       cur_thread = current_thread();
-
-       interruptible_state = cur_thread->interruptible;
-       if (interruptible == THREAD_UNINT)
-               cur_thread->interruptible = FALSE;
+       interruptible_state = thread_interrupt_level(interruptible);
  
        /*
         *      INVARIANTS (through entire routine):
@@ -489,7 +582,7 @@ vm_fault_page(
 #endif
                if (!object->alive) {
                        vm_fault_cleanup(object, first_m);
-                       cur_thread->interruptible = interruptible_state;
+                       thread_interrupt_level(interruptible_state);
                        return(VM_FAULT_MEMORY_ERROR);
                }
                m = vm_page_lookup(object, offset);
@@ -520,19 +613,16 @@ vm_fault_page(
 #if TRACEFAULTPAGE
                                dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
-                               PAGE_ASSERT_WAIT(m, interruptible);
-                               vm_object_unlock(object);
+                               wait_result = PAGE_SLEEP(object, m, interruptible);
                                XPR(XPR_VM_FAULT,
                                    "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
                                        (integer_t)object, offset,
                                        (integer_t)m, 0, 0);
                                counter(c_vm_fault_page_block_busy_kernel++);
-                               wait_result = thread_block((void (*)(void))0);
 
-                               vm_object_lock(object);
                                if (wait_result != THREAD_AWAKENED) {
                                        vm_fault_cleanup(object, first_m);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        if (wait_result == THREAD_RESTART)
                                          {
                                                return(VM_FAULT_RETRY);
@@ -557,7 +647,7 @@ vm_fault_page(
                                        *error_code = m->page_error;
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_MEMORY_ERROR);
                        }
 
@@ -574,7 +664,7 @@ vm_fault_page(
 #endif
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_RETRY);
                        }
 
@@ -602,7 +692,7 @@ vm_fault_page(
                                        if (object->shadow_severed) {
                                                vm_fault_cleanup(
                                                        object, first_m);
-                                               cur_thread->interruptible = interruptible_state;
+                                               thread_interrupt_level(interruptible_state);
                                                return VM_FAULT_MEMORY_ERROR;
                                        }
 
@@ -614,12 +704,38 @@ vm_fault_page(
                                         * need to allocate a real page.
                                         */
                                        if (VM_PAGE_THROTTLED() ||
-                                           (real_m = vm_page_grab()) == VM_PAGE_NULL) {
-                                               vm_fault_cleanup(object, first_m);
-                                               cur_thread->interruptible = interruptible_state;
-                                               return(VM_FAULT_MEMORY_SHORTAGE);
+                                           (real_m = vm_page_grab()) 
+                                                       == VM_PAGE_NULL) {
+                                               vm_fault_cleanup(
+                                                       object, first_m);
+                                               thread_interrupt_level(
+                                                       interruptible_state);
+                                               return(
+                                                  VM_FAULT_MEMORY_SHORTAGE);
                                        }
 
+                                       /*
+                                        * are we protecting the system from
+                                        * backing store exhaustion.  If so
+                                        * sleep unless we are privileged.
+                                        */
+
+                                       if(vm_backing_store_low) {
+                                          if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                               assert_wait((event_t) 
+                                                       &vm_backing_store_low, 
+                                                       THREAD_UNINT);
+                                               vm_fault_cleanup(object, 
+                                                                   first_m);
+                                               thread_block((void(*)(void)) 0);
+                                               thread_interrupt_level(
+                                                       interruptible_state);
+                                               return(VM_FAULT_RETRY);
+                                          }
+                                       }
+
+
                                        XPR(XPR_VM_FAULT,
              "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
                                                (integer_t)object, offset,
@@ -650,19 +766,48 @@ vm_fault_page(
                                         *  newly allocated -- in both cases
                                         *  it can't be page locked by a pager.
                                         */
+                                       m->no_isync = FALSE;
+
                                        if (!no_zero_fill) {
                                                vm_object_unlock(object);
                                                vm_page_zero_fill(m);
-                                               if (type_of_fault)
-                                                       *type_of_fault = DBG_ZERO_FILL_FAULT;
-                                               VM_STAT(zero_fill_count++);
                                                vm_object_lock(object);
                                        }
-                                       pmap_clear_modify(m->phys_addr);
+                                       if (type_of_fault)
+                                               *type_of_fault = DBG_ZERO_FILL_FAULT;
+                                       VM_STAT(zero_fill_count++);
+
+                                       if (bumped_pagein == TRUE) {
+                                               VM_STAT(pageins--);
+                                               current_task()->pageins--;
+                                       }
+#if 0
+                                       pmap_clear_modify(m->phys_page);
+#endif
                                        vm_page_lock_queues();
                                        VM_PAGE_QUEUES_REMOVE(m);
-                                       queue_enter(&vm_page_queue_inactive, 
+                                       m->page_ticket = vm_page_ticket;
+                                       if(m->object->size > 0x80000) {
+                                               m->zero_fill = TRUE;
+                                               /* depends on the queues lock */
+                                               vm_zf_count += 1;
+                                               queue_enter(&vm_page_queue_zf, 
                                                        m, vm_page_t, pageq);
+                                       } else {
+                                               queue_enter(
+                                                       &vm_page_queue_inactive, 
+                                                       m, vm_page_t, pageq);
+                                       }
+                                       vm_page_ticket_roll++;
+                                       if(vm_page_ticket_roll == 
+                                               VM_PAGE_TICKETS_IN_ROLL) {
+                                               vm_page_ticket_roll = 0;
+                                               if(vm_page_ticket == 
+                                                    VM_PAGE_TICKET_ROLL_IDS)
+                                                       vm_page_ticket= 0;
+                                               else
+                                                       vm_page_ticket++;
+                                       }
                                        m->inactive = TRUE;
                                        vm_page_inactive_count++;
                                        vm_page_unlock_queues();
@@ -734,13 +879,13 @@ vm_fault_page(
                                if (m != VM_PAGE_NULL && m->cleaning) {
                                        PAGE_ASSERT_WAIT(m, interruptible);
                                        vm_object_unlock(object);
-                                       wait_result = thread_block((void (*)(void)) 0);
+                                       wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        vm_object_deallocate(object);
                                        goto backoff;
                                } else {
                                        vm_object_unlock(object);
                                        vm_object_deallocate(object);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_RETRY;
                                }
                        }
@@ -774,18 +919,19 @@ vm_fault_page(
                                                vm_object_lock(object);
                                                assert(object->ref_count > 0);
                                                if (!object->pager_ready) {
-                                                       vm_object_assert_wait(
+                                                       wait_result = vm_object_assert_wait(
                                                                object,
                                                                VM_OBJECT_EVENT_PAGER_READY,
                                                                interruptible);
                                                        vm_object_unlock(object);
-                                                       wait_result = thread_block((void (*)(void))0);
+                                                       if (wait_result == THREAD_WAITING)
+                                                               wait_result = thread_block(THREAD_CONTINUE_NULL);
                                                        vm_object_deallocate(object);
                                                        goto backoff;
                                                } else {
                                                        vm_object_unlock(object);
                                                        vm_object_deallocate(object);
-                                                       cur_thread->interruptible = interruptible_state;
+                                                       thread_interrupt_level(interruptible_state);
                                                        return VM_FAULT_RETRY;
                                                }
                                        }
@@ -799,7 +945,6 @@ vm_fault_page(
                                        (integer_t)m, new_unlock_request, 0);
                                        if ((rc = memory_object_data_unlock(
                                                object->pager,
-                                               object->pager_request,
                                                offset + object->paging_offset,
                                                PAGE_SIZE,
                                                new_unlock_request))
@@ -808,7 +953,7 @@ vm_fault_page(
                                                    printf("vm_fault: memory_object_data_unlock failed\n");
                                                vm_object_lock(object);
                                                vm_fault_cleanup(object, first_m);
-                                               cur_thread->interruptible = interruptible_state;
+                                               thread_interrupt_level(interruptible_state);
                                                return((rc == MACH_SEND_INTERRUPTED) ?
                                                        VM_FAULT_INTERRUPTED :
                                                        VM_FAULT_MEMORY_ERROR);
@@ -835,13 +980,13 @@ vm_fault_page(
                                    !((access_required & m->unlock_request) != access_required)) {
                                        PAGE_ASSERT_WAIT(m, interruptible);
                                        vm_object_unlock(object);
-                                       wait_result = thread_block((void (*)(void)) 0);
+                                       wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        vm_object_deallocate(object);
                                        goto backoff;
                                } else {
                                        vm_object_unlock(object);
                                        vm_object_deallocate(object);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_RETRY;
                                }
                        }
@@ -884,7 +1029,8 @@ vm_fault_page(
                dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
 #endif
                if ((look_for_page || (object == first_object))
-                                && !must_be_resident) {
+                               && !must_be_resident 
+                               && !(object->phys_contiguous))  {
                        /*
                         *      Allocate a new page for this object/offset
                         *      pair.
@@ -896,13 +1042,13 @@ vm_fault_page(
 #endif
                        if (m == VM_PAGE_NULL) {
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_FICTITIOUS_SHORTAGE);
                        }
                        vm_page_insert(m, object, offset);
                }
 
-               if (look_for_page && !must_be_resident) {
+               if ((look_for_page && !must_be_resident)) {
                        kern_return_t   rc;
 
                        /*
@@ -913,7 +1059,8 @@ vm_fault_page(
 #if TRACEFAULTPAGE
                                dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
-                               VM_PAGE_FREE(m);
+                               if(m != VM_PAGE_NULL)
+                                       VM_PAGE_FREE(m);
                                XPR(XPR_VM_FAULT,
                                "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
                                        (integer_t)object, offset, 0, 0, 0);
@@ -926,21 +1073,29 @@ vm_fault_page(
                                vm_object_lock(object);
                                assert(object->ref_count > 0);
                                if (!object->pager_ready) {
-                                       vm_object_assert_wait(object,
+                                       wait_result = vm_object_assert_wait(object,
                                                              VM_OBJECT_EVENT_PAGER_READY,
                                                              interruptible);
                                        vm_object_unlock(object);
-                                       wait_result = thread_block((void (*)(void))0);
+                                       if (wait_result == THREAD_WAITING)
+                                               wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        vm_object_deallocate(object);
                                        goto backoff;
                                } else {
                                        vm_object_unlock(object);
                                        vm_object_deallocate(object);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_RETRY;
                                }
                        }
 
+                       if(object->phys_contiguous) {
+                               if(m != VM_PAGE_NULL) {
+                                       VM_PAGE_FREE(m);
+                                       m = VM_PAGE_NULL;
+                               }
+                               goto no_clustering;
+                       }
                        if (object->internal) {
                                /*
                                 *      Requests to the default pager
@@ -958,7 +1113,7 @@ vm_fault_page(
                                if (m->fictitious && !vm_page_convert(m)) {
                                        VM_PAGE_FREE(m);
                                        vm_fault_cleanup(object, first_m);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return(VM_FAULT_MEMORY_SHORTAGE);
                                }
                        } else if (object->absent_count >
@@ -972,7 +1127,8 @@ vm_fault_page(
 #if TRACEFAULTPAGE
                                dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
 #endif
-                               VM_PAGE_FREE(m);
+                               if(m != VM_PAGE_NULL)
+                                       VM_PAGE_FREE(m);
                                /* take an extra ref so object won't die */
                                assert(object->ref_count > 0);
                                object->ref_count++;
@@ -985,13 +1141,13 @@ vm_fault_page(
                                        vm_object_absent_assert_wait(object,
                                                                     interruptible);
                                        vm_object_unlock(object);
-                                       wait_result = thread_block((void (*)(void))0);
+                                       wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        vm_object_deallocate(object);
                                        goto backoff;
                                } else {
                                        vm_object_unlock(object);
                                        vm_object_deallocate(object);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_RETRY;
                                }
                        }
@@ -1001,233 +1157,39 @@ vm_fault_page(
                         *      from the memory manager.
                         */
 
-                       m->list_req_pending = TRUE;
-                       m->absent = TRUE;
-                       m->unusual = TRUE;
-                       object->absent_count++;
-
-                       cluster_start = offset;
-                       length = PAGE_SIZE;
-                       cluster_size = object->cluster_size;
-
-                       /*
-                        * Skip clustered pagein if it is globally disabled 
-                        * or random page reference behavior is expected
-                        * for the address range containing the faulting 
-                        * address or the object paging block size is
-                        * equal to the page size.
-                        */
-                       if (!vm_allow_clustered_pagein ||
-                            behavior == VM_BEHAVIOR_RANDOM ||
-                            cluster_size == PAGE_SIZE) {
-                               cluster_start = trunc_page_64(cluster_start);
-                               goto no_clustering;
-                       }
-
-                       assert(offset >= lo_offset);
-                       assert(offset < hi_offset);
-                       assert(ALIGNED(object->paging_offset));
-                       assert(cluster_size >= PAGE_SIZE);
+                       if(m != VM_PAGE_NULL) {
 
-#if TRACEFAULTPAGE
-                       dbgTrace(0xBEEF0011, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
-#endif
-                       /*
-                        * Decide whether to scan ahead or behind for
-                        * additional pages contiguous to the faulted
-                        * page in the same paging block.  The decision
-                        * is based on system wide globals and the
-                        * expected page reference behavior of the
-                        * address range contained the faulting address.
-                        * First calculate some constants.
-                        */
-                       paging_offset = offset + object->paging_offset;
-                       cluster_offset = paging_offset & (cluster_size - 1);
-                       align_offset = paging_offset&(PAGE_SIZE_64-1);
-                       if (align_offset != 0) {
-                               cluster_offset = trunc_page_64(cluster_offset);
-                       }
-
-#define SPANS_CLUSTER(x) ((((x) - align_offset) & (vm_object_offset_t)(cluster_size - 1)) == 0)
-
-                       /*
-                        * Backward scan only if reverse sequential
-                        * behavior has been specified
-                        */
-                       CLUSTER_STAT(pages_at_lower_offsets = 0;)
-                       if (((vm_default_behind != 0 && 
-                            behavior == VM_BEHAVIOR_DEFAULT) ||
-                            behavior == VM_BEHAVIOR_RSEQNTL) && offset) {
-                           vm_object_offset_t cluster_bot;
-
-                           /*
-                            * Calculate lower search boundary.
-                            * Exclude pages that span a cluster boundary.
-                            * Clip to start of map entry.
-                            * For default page reference behavior, scan
-                            * default pages behind.
-                            */
-                           cluster_bot = (offset > cluster_offset) ?
-                                           offset - cluster_offset : offset;
-                           if (align_offset != 0) {
-                               if ((cluster_bot < offset) &&
-                                   SPANS_CLUSTER(cluster_bot)) {
-                                       cluster_bot += PAGE_SIZE_64;
-                               }
-                           }
-                           if (behavior == VM_BEHAVIOR_DEFAULT) {
-                               vm_object_offset_t 
-                                       bot = (vm_object_offset_t)
-                                               (vm_default_behind * PAGE_SIZE);
-
-                               if (cluster_bot < (offset - bot))
-                                       cluster_bot = offset - bot;
-                           }
-                           if (lo_offset > cluster_bot)
-                               cluster_bot = lo_offset;
-
-                           for ( cluster_start = offset - PAGE_SIZE_64;
-                                (cluster_start >= cluster_bot) &&
-                                (cluster_start != 
-                                       (align_offset - PAGE_SIZE_64));
-                                 cluster_start -= PAGE_SIZE_64) {
-                               assert(cluster_size > PAGE_SIZE_64);
-retry_cluster_backw:
-                               if (!LOOK_FOR(object, cluster_start) ||
-                                   vm_page_lookup(object, cluster_start)
-                                               != VM_PAGE_NULL) {
-                                       break;
-                               }
-                               if (object->internal) {
-                                       /*
-                                        * need to acquire a real page in
-                                        * advance because this acts as
-                                        * a throttling mechanism for
-                                        * data_requests to the default
-                                        * pager.  If this fails, give up
-                                        * trying to find any more pages
-                                        * in the cluster and send off the
-                                        * request for what we already have.
-                                        */
-                                       if ((m = vm_page_grab())
-                                                       == VM_PAGE_NULL) {
-                                           cluster_start += PAGE_SIZE_64;
-                                           cluster_end = offset + PAGE_SIZE_64;
-                                           goto give_up;
-                                       }
-                               } else if ((m = vm_page_grab_fictitious())
-                                               == VM_PAGE_NULL) {
-                                       vm_object_unlock(object);
-                                       vm_page_more_fictitious();
-                                       vm_object_lock(object);
-                                       goto retry_cluster_backw;
-                               }
+                               m->list_req_pending = TRUE;
                                m->absent = TRUE;
                                m->unusual = TRUE;
-                               m->clustered = TRUE;
-                               m->list_req_pending = TRUE;
+                               object->absent_count++;
 
-                               vm_page_insert(m, object, cluster_start);
-                               CLUSTER_STAT(pages_at_lower_offsets++;)
-                               object->absent_count++;
-                           }
-                           cluster_start += PAGE_SIZE_64;
-                           assert(cluster_start >= cluster_bot);
                        }
-                       assert(cluster_start <= offset);
 
-                       /*
-                        * Forward scan if default or sequential behavior
-                        * specified
-                        */
-                       CLUSTER_STAT(pages_at_higher_offsets = 0;)
-                       if ((behavior == VM_BEHAVIOR_DEFAULT && 
-                            vm_default_ahead != 0) ||
-                            behavior == VM_BEHAVIOR_SEQUENTIAL) {
-                           vm_object_offset_t cluster_top;
-
-                           /*
-                            * Calculate upper search boundary.
-                            * Exclude pages that span a cluster boundary.
-                            * Clip to end of map entry.
-                            * For default page reference behavior, scan
-                            * default pages ahead.
-                            */
-                           cluster_top = (offset + cluster_size) - 
-                                         cluster_offset;
-                           if (align_offset != 0) {
-                               if ((cluster_top > (offset + PAGE_SIZE_64)) &&
-                                   SPANS_CLUSTER(cluster_top)) {
-                                       cluster_top -= PAGE_SIZE_64;
-                               }
-                           }
-                           if (behavior == VM_BEHAVIOR_DEFAULT) {
-                               vm_object_offset_t top = (vm_object_offset_t)
-                                    ((vm_default_ahead*PAGE_SIZE)+PAGE_SIZE);
-
-                               if (cluster_top > (offset + top))
-                                       cluster_top =  offset + top;
-                           }
-                           if (cluster_top > hi_offset)
-                                       cluster_top = hi_offset;
-
-                           for (cluster_end = offset + PAGE_SIZE_64;
-                                cluster_end < cluster_top;
-                                cluster_end += PAGE_SIZE_64) {
-                               assert(cluster_size > PAGE_SIZE);
-retry_cluster_forw:
-                               if (!LOOK_FOR(object, cluster_end) ||
-                                   vm_page_lookup(object, cluster_end)
-                                               != VM_PAGE_NULL) {
-                                       break;
-                               }
-                               if (object->internal) {
-                                       /*
-                                        * need to acquire a real page in
-                                        * advance because this acts as
-                                        * a throttling mechanism for
-                                        * data_requests to the default
-                                        * pager.  If this fails, give up
-                                        * trying to find any more pages
-                                        * in the cluster and send off the
-                                        * request for what we already have.
-                                        */
-                                       if ((m = vm_page_grab())
-                                                       == VM_PAGE_NULL) {
-                                           break;
-                                       }
-                               } else if ((m = vm_page_grab_fictitious())
-                                               == VM_PAGE_NULL) {
-                                   vm_object_unlock(object);
-                                   vm_page_more_fictitious();
-                                   vm_object_lock(object);
-                                   goto retry_cluster_forw;
-                               }
-                               m->absent = TRUE;
-                               m->unusual = TRUE;
-                               m->clustered = TRUE;
-                               m->list_req_pending = TRUE;
+no_clustering:
+                       cluster_start = offset;
+                       length = PAGE_SIZE;
 
-                               vm_page_insert(m, object, cluster_end);
-                               CLUSTER_STAT(pages_at_higher_offsets++;)
-                               object->absent_count++;
-                           }
-                           assert(cluster_end <= cluster_top);
-                       }
-                       else {
+                       /* 
+                        * lengthen the cluster by the pages in the working set
+                        */
+                       if((map != NULL) && 
+                               (current_task()->dynamic_working_set != 0)) {
+                               cluster_end = cluster_start + length;
+                               /* tws values for start and end are just a 
+                                * suggestions.  Therefore, as long as
+                                * build_cluster does not use pointers or
+                                * take action based on values that
+                                * could be affected by re-entrance we
+                                * do not need to take the map lock.
+                                */
                                cluster_end = offset + PAGE_SIZE_64;
+                               tws_build_cluster((tws_hash_t)
+                                       current_task()->dynamic_working_set,
+                                       object, &cluster_start,
+                                       &cluster_end, 0x40000);
+                               length = cluster_end - cluster_start;
                        }
-give_up:
-                       assert(cluster_end >= offset + PAGE_SIZE_64);
-                       length = cluster_end - cluster_start;
-
-#if    MACH_CLUSTER_STATS
-                       CLUSTER_STAT_HIGHER(pages_at_higher_offsets);
-                       CLUSTER_STAT_LOWER(pages_at_lower_offsets);
-                       CLUSTER_STAT_CLUSTER(length/PAGE_SIZE);
-#endif /* MACH_CLUSTER_STATS */
-
-no_clustering:
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
 #endif
@@ -1242,9 +1204,10 @@ no_clustering:
                         */
 
                        if (type_of_fault)
-                               *type_of_fault = DBG_PAGEIN_FAULT;
+                               *type_of_fault = (length << 8) | DBG_PAGEIN_FAULT;
                        VM_STAT(pageins++);
                        current_task()->pageins++;
+                       bumped_pagein = TRUE;
 
                        /*
                         *      If this object uses a copy_call strategy,
@@ -1270,29 +1233,11 @@ no_clustering:
                                (integer_t)object, offset, (integer_t)m,
                                access_required | wants_copy_flag, 0);
 
-#ifdef MACH_BSD
-                       if (((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
-                           ((rpc_subsystem_t) &vnode_pager_workaround)) {
-                               rc = vnode_pager_data_request(object->pager, 
-                                                             object->pager_request,
-                                                             cluster_start + object->paging_offset, 
-                                                             length,
-                                                             access_required | wants_copy_flag);
-                       } else {
-                               rc = memory_object_data_request(object->pager, 
-                                                               object->pager_request,
-                                                               cluster_start + object->paging_offset, 
-                                                               length,
-                                                               access_required | wants_copy_flag);
-                       }
-#else
                        rc = memory_object_data_request(object->pager, 
-                                       object->pager_request,
                                        cluster_start + object->paging_offset, 
                                        length,
                                        access_required | wants_copy_flag);
 
-#endif
 
 #if TRACEFAULTPAGE
                        dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
@@ -1300,35 +1245,65 @@ no_clustering:
                        if (rc != KERN_SUCCESS) {
                                if (rc != MACH_SEND_INTERRUPTED
                                    && vm_fault_debug)
-                                       printf("%s(0x%x, 0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d, object=0x%x\n",
+                                       printf("%s(0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d\n",
                                                "memory_object_data_request",
                                                object->pager,
-                                               object->pager_request,
                                                cluster_start + object->paging_offset, 
-                                               length, access_required,
-                                               rc, object);
+                                               length, access_required, rc);
                                /*
                                 *      Don't want to leave a busy page around,
                                 *      but the data request may have blocked,
                                 *      so check if it's still there and busy.
                                 */
-                               vm_object_lock(object);
-                               for (; length;
-                                    length -= PAGE_SIZE,
-                                    cluster_start += PAGE_SIZE_64) {
-                                       vm_page_t p;
-                                       if ((p = vm_page_lookup(object,
+                               if(!object->phys_contiguous) {
+                                  vm_object_lock(object);
+                                  for (; length; length -= PAGE_SIZE,
+                                     cluster_start += PAGE_SIZE_64) {
+                                     vm_page_t p;
+                                     if ((p = vm_page_lookup(object,
                                                                cluster_start))
-                                           && p->absent && p->busy
-                                           && p != first_m) {
-                                               VM_PAGE_FREE(m);
-                                       }
+                                           && p->absent && p->busy
+                                           && p != first_m) {
+                                        VM_PAGE_FREE(p);
+                                     }
+                                  }
                                }
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return((rc == MACH_SEND_INTERRUPTED) ?
                                        VM_FAULT_INTERRUPTED :
                                        VM_FAULT_MEMORY_ERROR);
+                       } else {
+#ifdef notdefcdy
+                               tws_hash_line_t line;
+                               task_t          task;
+
+                               task = current_task();
+                               
+                               if((map != NULL) && 
+                                       (task->dynamic_working_set != 0)) 
+                                               && !(object->private)) {
+                                       vm_object_t     base_object;
+                                       vm_object_offset_t base_offset;
+                                       base_object = object;
+                                       base_offset = offset;
+                                       while(base_object->shadow) {
+                                               base_offset +=
+                                                 base_object->shadow_offset;
+                                               base_object =
+                                                 base_object->shadow;
+                                       }
+                                       if(tws_lookup
+                                               ((tws_hash_t)
+                                               task->dynamic_working_set,
+                                               base_offset, base_object,
+                                               &line) == KERN_SUCCESS) {
+                                               tws_line_signal((tws_hash_t)
+                                               task->dynamic_working_set, 
+                                                       map, line, vaddr);
+                                       }
+                               }
+#endif
                        }
                        
                        /*
@@ -1340,9 +1315,11 @@ no_clustering:
                        if ((interruptible != THREAD_UNINT) && 
                            (current_thread()->state & TH_ABORT)) {
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_INTERRUPTED);
                        }
+                       if(m == VM_PAGE_NULL)
+                               break;
                        continue;
                }
 
@@ -1390,37 +1367,99 @@ no_clustering:
                        assert(m->object == object);
                        first_m = VM_PAGE_NULL;
 
+                       if(m == VM_PAGE_NULL) {
+                               m = vm_page_grab();
+                               if (m == VM_PAGE_NULL) {
+                                       vm_fault_cleanup(
+                                               object, VM_PAGE_NULL);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_MEMORY_SHORTAGE);
+                               }
+                               vm_page_insert(
+                                       m, object, offset);
+                       }
+
                        if (object->shadow_severed) {
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, VM_PAGE_NULL);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return VM_FAULT_MEMORY_ERROR;
                        }
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       VM_PAGE_FREE(m);
+                                       vm_fault_cleanup(object, VM_PAGE_NULL);
+                                       thread_block((void (*)(void)) 0);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        if (VM_PAGE_THROTTLED() ||
                            (m->fictitious && !vm_page_convert(m))) {
                                VM_PAGE_FREE(m);
                                vm_fault_cleanup(object, VM_PAGE_NULL);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_MEMORY_SHORTAGE);
                        }
+                       m->no_isync = FALSE;
 
                        if (!no_zero_fill) {
                                vm_object_unlock(object);
                                vm_page_zero_fill(m);
-                               if (type_of_fault)
-                                       *type_of_fault = DBG_ZERO_FILL_FAULT;
-                               VM_STAT(zero_fill_count++);
                                vm_object_lock(object);
                        }
+                       if (type_of_fault)
+                               *type_of_fault = DBG_ZERO_FILL_FAULT;
+                       VM_STAT(zero_fill_count++);
+
+                       if (bumped_pagein == TRUE) {
+                               VM_STAT(pageins--);
+                               current_task()->pageins--;
+                       }
+
                        vm_page_lock_queues();
                        VM_PAGE_QUEUES_REMOVE(m);
-                       queue_enter(&vm_page_queue_inactive, 
-                                               m, vm_page_t, pageq);
+                       if(m->object->size > 0x80000) {
+                               m->zero_fill = TRUE;
+                               /* depends on the queues lock */
+                               vm_zf_count += 1;
+                               queue_enter(&vm_page_queue_zf, 
+                                       m, vm_page_t, pageq);
+                       } else {
+                               queue_enter(
+                                       &vm_page_queue_inactive, 
+                                       m, vm_page_t, pageq);
+                       }
+                       m->page_ticket = vm_page_ticket;
+                       vm_page_ticket_roll++;
+                       if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
+                               vm_page_ticket_roll = 0;
+                               if(vm_page_ticket == 
+                                       VM_PAGE_TICKET_ROLL_IDS)
+                                       vm_page_ticket= 0;
+                               else
+                                       vm_page_ticket++;
+                       }
                        m->inactive = TRUE;
                        vm_page_inactive_count++;
                        vm_page_unlock_queues();
-                       pmap_clear_modify(m->phys_addr);
+#if 0
+                       pmap_clear_modify(m->phys_page);
+#endif
                        break;
                }
                else {
@@ -1459,10 +1498,12 @@ no_clustering:
        dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
 #endif
 #if    EXTRA_ASSERTIONS
-       assert(m->busy && !m->absent);
-       assert((first_m == VM_PAGE_NULL) ||
-               (first_m->busy && !first_m->absent &&
-                !first_m->active && !first_m->inactive));
+       if(m != VM_PAGE_NULL) {
+               assert(m->busy && !m->absent);
+               assert((first_m == VM_PAGE_NULL) ||
+                       (first_m->busy && !first_m->absent &&
+                        !first_m->active && !first_m->inactive));
+       }
 #endif /* EXTRA_ASSERTIONS */
 
        XPR(XPR_VM_FAULT,
@@ -1476,7 +1517,7 @@ no_clustering:
         *      by the top-level object.
         */
 
-       if (object != first_object) {
+       if ((object != first_object) && (m != VM_PAGE_NULL)) {
                /*
                 *      We only really need to copy if we
                 *      want to write it.
@@ -1490,6 +1531,27 @@ no_clustering:
 
                        assert(!must_be_resident);
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       RELEASE_PAGE(m);
+                                       vm_fault_cleanup(object, first_m);
+                                       thread_block((void (*)(void)) 0);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        /*
                         *      If we try to collapse first_object at this
                         *      point, we may deadlock when we try to get
@@ -1513,7 +1575,7 @@ no_clustering:
                        if (copy_m == VM_PAGE_NULL) {
                                RELEASE_PAGE(m);
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_MEMORY_SHORTAGE);
                        }
 
@@ -1539,7 +1601,7 @@ no_clustering:
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
-                       pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+                       pmap_page_protect(m->phys_page, VM_PROT_NONE);
                        vm_page_deactivate(m);
                        copy_m->dirty = TRUE;
                        /*
@@ -1580,7 +1642,7 @@ no_clustering:
                         */     
 
                        vm_object_paging_end(object); 
-                       vm_object_collapse(object);
+                       vm_object_collapse(object, offset);
                        vm_object_paging_begin(object);
 
                }
@@ -1597,8 +1659,8 @@ no_clustering:
         *      shadowed object, and one here to push it into the copy.
         */
 
-       while (first_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY &&
-              (copy_object = first_object->copy) != VM_OBJECT_NULL) {
+       while ((copy_object = first_object->copy) != VM_OBJECT_NULL &&
+                  (m!= VM_PAGE_NULL)) {
                vm_object_offset_t      copy_offset;
                vm_page_t               copy_m;
 
@@ -1678,13 +1740,13 @@ no_clustering:
                                if (copy_m != VM_PAGE_NULL && copy_m->busy) {
                                        PAGE_ASSERT_WAIT(copy_m, interruptible);
                                        vm_object_unlock(copy_object);
-                                       wait_result = thread_block((void (*)(void))0);
+                                       wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        vm_object_deallocate(copy_object);
                                        goto backoff;
                                } else {
                                        vm_object_unlock(copy_object);
                                        vm_object_deallocate(copy_object);
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_RETRY;
                                }
                        }
@@ -1700,6 +1762,31 @@ no_clustering:
                         * We must copy the page to the copy object.
                         */
 
+                       /*
+                        * are we protecting the system from
+                        * backing store exhaustion.  If so
+                        * sleep unless we are privileged.
+                        */
+
+                       if(vm_backing_store_low) {
+                               if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV)) {
+                                       assert_wait((event_t) 
+                                               &vm_backing_store_low, 
+                                               THREAD_UNINT);
+                                       RELEASE_PAGE(m);
+                                       VM_OBJ_RES_DECR(copy_object);
+                                       copy_object->ref_count--;
+                                       assert(copy_object->ref_count > 0);
+                                       vm_object_unlock(copy_object);
+                                       vm_fault_cleanup(object, first_m);
+                                       thread_block((void (*)(void)) 0);
+                                       thread_interrupt_level(
+                                               interruptible_state);
+                                       return(VM_FAULT_RETRY);
+                               }
+                       }
+
                        /*
                         *      Allocate a page for the copy
                         */
@@ -1711,7 +1798,7 @@ no_clustering:
                                assert(copy_object->ref_count > 0);
                                vm_object_unlock(copy_object);
                                vm_fault_cleanup(object, first_m);
-                               cur_thread->interruptible = interruptible_state;
+                               thread_interrupt_level(interruptible_state);
                                return(VM_FAULT_MEMORY_SHORTAGE);
                        }
 
@@ -1730,7 +1817,7 @@ no_clustering:
 
                        vm_page_lock_queues();
                        assert(!m->cleaning);
-                       pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+                       pmap_page_protect(m->phys_page, VM_PROT_NONE);
                        copy_m->dirty = TRUE;
                        vm_page_unlock_queues();
 
@@ -1845,32 +1932,22 @@ no_clustering:
         *      mark read-only data as dirty.]
         */
 
+
+       if(m != VM_PAGE_NULL) {
 #if    !VM_FAULT_STATIC_CONFIG
-       if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
-               m->dirty = TRUE;
-#endif
-#if TRACEFAULTPAGE
-       dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_page_deactivate_behind);  /* (TEST/DEBUG) */
-#endif
-       if (vm_page_deactivate_behind) {
-               if (offset && /* don't underflow */
-                       (object->last_alloc == (offset - PAGE_SIZE_64))) {
-                       m = vm_page_lookup(object, object->last_alloc);
-                       if ((m != VM_PAGE_NULL) && !m->busy) {
-                               vm_page_lock_queues();
-                               vm_page_deactivate(m);
-                               vm_page_unlock_queues();
-                       }
-#if TRACEFAULTPAGE
-                       dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
+               if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+                       m->dirty = TRUE;
 #endif
-               }
-               object->last_alloc = offset;
+               if (vm_page_deactivate_behind)
+                       vm_fault_deactivate_behind(object, offset, behavior);
+       } else {
+               vm_object_unlock(object);
        }
+       thread_interrupt_level(interruptible_state);
+
 #if TRACEFAULTPAGE
        dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
 #endif
-       cur_thread->interruptible = interruptible_state;
        return(VM_FAULT_SUCCESS);
 
 #if 0
@@ -1878,11 +1955,11 @@ no_clustering:
        vm_fault_cleanup(object, first_m);
 
        counter(c_vm_fault_page_block_backoff_kernel++);
-       thread_block((void (*)(void))0);
+       thread_block(THREAD_CONTINUE_NULL);
 #endif
 
     backoff:
-       cur_thread->interruptible = interruptible_state;
+       thread_interrupt_level(interruptible_state);
        if (wait_result == THREAD_INTERRUPTED)
                return VM_FAULT_INTERRUPTED;
        return VM_FAULT_RETRY;
@@ -1890,6 +1967,97 @@ no_clustering:
 #undef RELEASE_PAGE
 }
 
+/*
+ *     Routine:        vm_fault_tws_insert
+ *     Purpose:
+ *             Add fault information to the task working set.
+ *     Implementation:
+ *             We always insert the base object/offset pair
+ *             rather the actual object/offset.
+ *     Assumptions:
+ *             Map and pmap_map locked.
+ *             Object locked and referenced.
+ *     Returns:
+ *             TRUE if startup file should be written.
+ *             With object locked and still referenced.
+ *             But we may drop the object lock temporarily.
+ */
+static boolean_t
+vm_fault_tws_insert(
+       vm_map_t map,
+       vm_map_t pmap_map,
+       vm_offset_t vaddr,
+       vm_object_t object,
+       vm_object_offset_t offset)
+{
+       tws_hash_line_t line;
+       task_t          task;
+       kern_return_t   kr;
+       boolean_t       result = FALSE;
+       extern vm_map_t kalloc_map;
+
+       /* Avoid possible map lock deadlock issues */
+       if (map == kernel_map || map == kalloc_map ||
+           pmap_map == kernel_map || pmap_map == kalloc_map)
+               return result;
+
+       task = current_task();
+       if (task->dynamic_working_set != 0) {
+               vm_object_t     base_object;
+               vm_object_t     base_shadow;
+               vm_object_offset_t base_offset;
+               base_object = object;
+               base_offset = offset;
+               while(base_shadow = base_object->shadow) {
+                       vm_object_lock(base_shadow);
+                       vm_object_unlock(base_object);
+                       base_offset +=
+                        base_object->shadow_offset;
+                       base_object = base_shadow;
+               }
+               kr = tws_lookup((tws_hash_t)
+                       task->dynamic_working_set,
+                       base_offset, base_object, 
+                       &line);
+               if (kr == KERN_OPERATION_TIMED_OUT){
+                       result = TRUE;
+                       if (base_object != object) {
+                               vm_object_unlock(base_object);
+                               vm_object_lock(object);
+                       }
+               } else if (kr != KERN_SUCCESS) {
+                       if(base_object != object)
+                               vm_object_reference_locked(base_object);
+                       kr = tws_insert((tws_hash_t)
+                                  task->dynamic_working_set,
+                                  base_offset, base_object,
+                                  vaddr, pmap_map);
+                       if(base_object != object) {
+                               vm_object_unlock(base_object);
+                               vm_object_deallocate(base_object);
+                       }
+                       if(kr == KERN_NO_SPACE) {
+                               if (base_object == object)
+                                       vm_object_unlock(object);
+                               tws_expand_working_set(
+                                  task->dynamic_working_set, 
+                                  TWS_HASH_LINE_COUNT,
+                                  FALSE);
+                               if (base_object == object)
+                                       vm_object_lock(object);
+                       } else if(kr == KERN_OPERATION_TIMED_OUT) {
+                               result = TRUE;
+                       }
+                       if(base_object != object)
+                               vm_object_lock(object);
+               } else if (base_object != object) {
+                       vm_object_unlock(base_object);
+                       vm_object_lock(object);
+               }
+       }
+       return result;
+}
+
 /*
  *     Routine:        vm_fault
  *     Purpose:
@@ -1910,7 +2078,9 @@ vm_fault(
        vm_offset_t     vaddr,
        vm_prot_t       fault_type,
        boolean_t       change_wiring,
-       int             interruptible)
+       int             interruptible,
+       pmap_t          caller_pmap,
+       vm_offset_t     caller_pmap_addr)
 {
        vm_map_version_t        version;        /* Map version for verificiation */
        boolean_t               wired;          /* Should mapping be wired down? */
@@ -1941,7 +2111,12 @@ vm_fault(
        funnel_t                *curflock;
        thread_t                cur_thread;
        boolean_t               interruptible_state;
-       
+       unsigned int            cache_attr;
+       int                     write_startup_file = 0;
+       vm_prot_t               full_fault_type;
+
+       if (get_preemption_level() != 0)
+               return (KERN_FAILURE);
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
                              vaddr,
@@ -1950,11 +2125,16 @@ vm_fault(
                              0,
                              0);
 
-       cur_thread = current_thread();
+       /* at present we do not fully check for execute permission */
+       /* we generally treat it is read except in certain device  */
+       /* memory settings */
+       full_fault_type = fault_type;
+       if(fault_type & VM_PROT_EXECUTE) {
+               fault_type &= ~VM_PROT_EXECUTE;
+               fault_type |= VM_PROT_READ;
+       }
 
-       interruptible_state = cur_thread->interruptible;
-       if (interruptible == THREAD_UNINT)
-               cur_thread->interruptible = FALSE;
+       interruptible_state = thread_interrupt_level(interruptible);
 
        /*
         * assume we will hit a page in the cache
@@ -1969,6 +2149,8 @@ vm_fault(
        /*
         * drop funnel if it is already held. Then restore while returning
         */
+       cur_thread = current_thread();
+
        if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
                funnel_set = TRUE;
                curflock = cur_thread->funnel_lock;
@@ -2056,13 +2238,39 @@ vm_fault(
        while (TRUE) {
                m = vm_page_lookup(cur_object, cur_offset);
                if (m != VM_PAGE_NULL) {
-                       if (m->busy)
-                               break;
+                       if (m->busy) {
+                               wait_result_t   result;
 
-                       if (m->unusual && (m->error || m->restart ||
-                           m->absent || (fault_type & m->page_lock))) {
+                               if (object != cur_object)
+                                       vm_object_unlock(object);
 
-                       /*
+                               vm_map_unlock_read(map);
+                               if (pmap_map != map)
+                                       vm_map_unlock(pmap_map);
+
+#if    !VM_FAULT_STATIC_CONFIG
+                               if (!vm_fault_interruptible)
+                                       interruptible = THREAD_UNINT;
+#endif
+                               result = PAGE_ASSERT_WAIT(m, interruptible);
+
+                               vm_object_unlock(cur_object);
+
+                               if (result == THREAD_WAITING) {
+                                       result = thread_block(THREAD_CONTINUE_NULL);
+
+                                       counter(c_vm_fault_page_block_busy_kernel++);
+                               }
+                               if (result == THREAD_AWAKENED || result == THREAD_RESTART)
+                                       goto RetryFault;
+
+                               kr = KERN_ABORTED;
+                               goto done;
+                       }
+                       if (m->unusual && (m->error || m->restart || m->private
+                           || m->absent || (fault_type & m->page_lock))) {
+
+                               /*
                                 *      Unusual case. Give up.
                                 */
                                break;
@@ -2080,6 +2288,7 @@ vm_fault(
                                goto FastMapInFault;
 
                        if ((fault_type & VM_PROT_WRITE) == 0) {
+                               boolean_t sequential;
 
                                prot &= ~VM_PROT_WRITE;
 
@@ -2098,7 +2307,6 @@ FastMapInFault:
                                m->busy = TRUE;
 
                                vm_object_paging_begin(object);
-                               vm_object_unlock(object);
 
 FastPmapEnter:
                                /*
@@ -2120,20 +2328,39 @@ FastPmapEnter:
                                        prot &= ~VM_PROT_WRITE;
 #endif /* MACH_KDB */
 #endif /* STATIC_CONFIG */
-                               PMAP_ENTER(pmap, vaddr, m, prot, wired);
-                               pmap_attribute(pmap,
-                                              vaddr,
-                                              PAGE_SIZE,
-                                              MATTR_CACHE,
-                                              &mv_cache_sync);
-
-                               if (m->clustered) {
-                                       vm_pagein_cluster_used++;
-                                       m->clustered = FALSE;
+                               cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+                               sequential = FALSE;
+                               if (m->no_isync == TRUE) {
+                                       m->no_isync = FALSE;
+                                       pmap_sync_caches_phys(m->phys_page);
+                                       if (type_of_fault == DBG_CACHE_HIT_FAULT) {
+                                               /*
+                                                * found it in the cache, but this
+                                                * is the first fault-in of the page (no_isync == TRUE)
+                                                * so it must have come in as part of
+                                                * a cluster... account 1 pagein against it
+                                                */
+                                               VM_STAT(pageins++);
+                                               current_task()->pageins++;
+                                               type_of_fault = DBG_PAGEIN_FAULT;
+                                               sequential = TRUE;
+                                       }
+                               } else if (cache_attr != VM_WIMG_DEFAULT) {
+                                       pmap_sync_caches_phys(m->phys_page);
+                               }
 
+                               if(caller_pmap) {
+                                       PMAP_ENTER(caller_pmap, 
+                                               caller_pmap_addr, m, 
+                                               prot, cache_attr, wired);
+                               } else {
+                                       PMAP_ENTER(pmap, vaddr, m, 
+                                               prot, cache_attr, wired);
                                }
+
                                /*
-                                *      Grab the object lock to manipulate
+                                *      Hold queues lock to manipulate
                                 *      the page queues.  Change wiring
                                 *      case is obvious.  In soft ref bits
                                 *      case activate page only if it fell
@@ -2144,9 +2371,11 @@ FastPmapEnter:
                                 *      move active page to back of active
                                 *      queue.  This code doesn't.
                                 */
-                               vm_object_lock(object);
                                vm_page_lock_queues();
-
+                               if (m->clustered) {
+                                       vm_pagein_cluster_used++;
+                                       m->clustered = FALSE;
+                               }
                                m->reference = TRUE;
 
                                if (change_wiring) {
@@ -2175,24 +2404,45 @@ FastPmapEnter:
                                 *      That's it, clean up and return.
                                 */
                                PAGE_WAKEUP_DONE(m);
+
+                               sequential = (sequential && vm_page_deactivate_behind) ?
+                                       vm_fault_deactivate_behind(object, cur_offset, behavior) :
+                                       FALSE;
+
+                               /* 
+                                * Add non-sequential pages to the working set.
+                                * The sequential pages will be brought in through
+                                * normal clustering behavior.
+                                */
+                               if (!sequential && !object->private) {
+                                       write_startup_file = 
+                                               vm_fault_tws_insert(map, pmap_map, vaddr, 
+                                                               object, cur_offset);
+                               }
+
                                vm_object_paging_end(object);
                                vm_object_unlock(object);
+
                                vm_map_unlock_read(map);
                                if(pmap_map != map)
                                        vm_map_unlock(pmap_map);
 
-                               if (funnel_set) {
+                               if(write_startup_file)
+                                       tws_send_startup_info(current_task());
+
+                               if (funnel_set)
                                        thread_funnel_set( curflock, TRUE);
-                                       funnel_set = FALSE;
-                               }
-                               cur_thread->interruptible = interruptible_state;
+
+                               thread_interrupt_level(interruptible_state);
+
 
                                KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
                                                      vaddr,
-                                                     type_of_fault,
+                                                     type_of_fault & 0xff,
                                                      KERN_SUCCESS,
-                                                     0,
+                                                     type_of_fault >> 8,
                                                      0);
+
                                return KERN_SUCCESS;
                        }
 
@@ -2206,7 +2456,6 @@ FastPmapEnter:
 
                        if (cur_object == object)
                                break;
-
                        /*
                         *      This is now a shadow based copy on write
                         *      fault -- it requires a copy up the shadow
@@ -2222,7 +2471,6 @@ FastPmapEnter:
                        if (m == VM_PAGE_NULL) {
                                break;
                        }
-
                        /*
                         *      Now do the copy.  Mark the source busy
                         *      and take out paging references on both
@@ -2254,7 +2502,7 @@ FastPmapEnter:
                        vm_page_lock_queues();
                        vm_page_deactivate(cur_m);
                        m->dirty = TRUE;
-                       pmap_page_protect(cur_m->phys_addr,
+                       pmap_page_protect(cur_m->phys_page,
                                                  VM_PROT_NONE);
                        vm_page_unlock_queues();
 
@@ -2269,9 +2517,8 @@ FastPmapEnter:
                         */     
  
                        vm_object_paging_end(object); 
-                       vm_object_collapse(object);
+                       vm_object_collapse(object, offset);
                        vm_object_paging_begin(object);
-                       vm_object_unlock(object);
 
                        goto FastPmapEnter;
                }
@@ -2286,7 +2533,6 @@ FastPmapEnter:
                                /*
                                 *      Have to talk to the pager.  Give up.
                                 */
-
                                break;
                        }
 
@@ -2300,11 +2546,15 @@ FastPmapEnter:
                                        if(pmap_map != map)
                                                vm_map_unlock(pmap_map);
 
+                                       if(write_startup_file)
+                                               tws_send_startup_info(
+                                                               current_task());
+
                                        if (funnel_set) {
                                                thread_funnel_set( curflock, TRUE);
                                                funnel_set = FALSE;
                                        }
-                                       cur_thread->interruptible = interruptible_state;
+                                       thread_interrupt_level(interruptible_state);
 
                                        return VM_FAULT_MEMORY_ERROR;
                                }
@@ -2315,15 +2565,36 @@ FastPmapEnter:
                                 *      page, then drop any lower lock.
                                 *      Give up if no page.
                                 */
-                               if ((vm_page_free_target - 
-                                  ((vm_page_free_target-vm_page_free_min)>>2))
-                                               > vm_page_free_count) {
+                               if (VM_PAGE_THROTTLED()) {
+                                       break;
+                               }
+
+                               /*
+                                * are we protecting the system from
+                                * backing store exhaustion.  If so
+                                * sleep unless we are privileged.
+                                */
+                               if(vm_backing_store_low) {
+                                       if(!(current_task()->priv_flags 
+                                               & VM_BACKING_STORE_PRIV))
                                        break;
                                }
                                m = vm_page_alloc(object, offset);
                                if (m == VM_PAGE_NULL) {
                                        break;
                                }
+                               /*
+                                * This is a zero-fill or initial fill
+                                * page fault.  As such, we consider it
+                                * undefined with respect to instruction
+                                * execution.  i.e. it is the responsibility
+                                * of higher layers to call for an instruction
+                                * sync after changing the contents and before
+                                * sending a program into this area.  We 
+                                * choose this approach for performance
+                                */
+
+                               m->no_isync = FALSE;
 
                                if (cur_object != object)
                                        vm_object_unlock(cur_object);
@@ -2348,11 +2619,35 @@ FastPmapEnter:
                                }
                                vm_page_lock_queues();
                                VM_PAGE_QUEUES_REMOVE(m);
-                               queue_enter(&vm_page_queue_inactive, 
-                                                       m, vm_page_t, pageq);
+
+                               m->page_ticket = vm_page_ticket;
+                               if(m->object->size > 0x80000) {
+                                       m->zero_fill = TRUE;
+                                       /* depends on the queues lock */
+                                       vm_zf_count += 1;
+                                       queue_enter(&vm_page_queue_zf, 
+                                               m, vm_page_t, pageq);
+                               } else {
+                                       queue_enter(
+                                               &vm_page_queue_inactive, 
+                                               m, vm_page_t, pageq);
+                               }
+                               vm_page_ticket_roll++;
+                               if(vm_page_ticket_roll == 
+                                               VM_PAGE_TICKETS_IN_ROLL) {
+                                       vm_page_ticket_roll = 0;
+                                       if(vm_page_ticket == 
+                                               VM_PAGE_TICKET_ROLL_IDS)
+                                               vm_page_ticket= 0;
+                                       else
+                                               vm_page_ticket++;
+                               }
+
                                m->inactive = TRUE;
                                vm_page_inactive_count++;
                                vm_page_unlock_queues();
+                               vm_object_lock(object);
+
                                goto FastPmapEnter;
                        }
 
@@ -2380,6 +2675,7 @@ FastPmapEnter:
                vm_object_unlock(cur_object);
        }
        vm_map_unlock_read(map);
+
        if(pmap_map != map)
                vm_map_unlock(pmap_map);
 
@@ -2397,13 +2693,19 @@ FastPmapEnter:
        vm_object_paging_begin(object);
 
        XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
+
+       if (!object->private) {
+               write_startup_file = 
+                       vm_fault_tws_insert(map, pmap_map, vaddr, object, offset);
+       }
+
        kr = vm_fault_page(object, offset, fault_type,
                           (change_wiring && !wired),
                           interruptible,
                           lo_offset, hi_offset, behavior,
                           &prot, &result_page, &top_page,
                           &type_of_fault,
-                          &error_code, map->no_zero_fill, FALSE);
+                          &error_code, map->no_zero_fill, FALSE, map, vaddr);
 
        /*
         *      If we didn't succeed, lose the object reference immediately.
@@ -2443,9 +2745,11 @@ FastPmapEnter:
 
        m = result_page;
 
-       assert((change_wiring && !wired) ?
-              (top_page == VM_PAGE_NULL) :
-              ((top_page == VM_PAGE_NULL) == (m->object == object)));
+       if(m != VM_PAGE_NULL) {
+               assert((change_wiring && !wired) ?
+                   (top_page == VM_PAGE_NULL) :
+                   ((top_page == VM_PAGE_NULL) == (m->object == object)));
+       }
 
        /*
         *      How to clean up the result of vm_fault_page.  This
@@ -2477,9 +2781,12 @@ FastPmapEnter:
         *      since our last lookup.
         */
 
-       old_copy_object = m->object->copy;
-
-       vm_object_unlock(m->object);
+       if(m != VM_PAGE_NULL) {
+               old_copy_object = m->object->copy;
+               vm_object_unlock(m->object);
+       } else {
+               old_copy_object = VM_OBJECT_NULL;
+       }
        if ((map != original_map) || !vm_map_verify(map, &version)) {
                vm_object_t             retry_object;
                vm_object_offset_t      retry_offset;
@@ -2505,22 +2812,34 @@ FastPmapEnter:
 
                if (kr != KERN_SUCCESS) {
                        vm_map_unlock_read(map);
-                       vm_object_lock(m->object);
-                       RELEASE_PAGE(m);
-                       UNLOCK_AND_DEALLOCATE;
+                       if(m != VM_PAGE_NULL) {
+                               vm_object_lock(m->object);
+                               RELEASE_PAGE(m);
+                               UNLOCK_AND_DEALLOCATE;
+                       } else {
+                               vm_object_deallocate(object);
+                       }
                        goto done;
                }
 
                vm_object_unlock(retry_object);
-               vm_object_lock(m->object);
+               if(m != VM_PAGE_NULL) {
+                       vm_object_lock(m->object);
+               } else {
+                       vm_object_lock(object);
+               }
 
                if ((retry_object != object) ||
                    (retry_offset != offset)) {
                        vm_map_unlock_read(map);
                        if(pmap_map != map)
                                vm_map_unlock(pmap_map);
-                       RELEASE_PAGE(m);
-                       UNLOCK_AND_DEALLOCATE;
+                       if(m != VM_PAGE_NULL) {
+                               RELEASE_PAGE(m);
+                               UNLOCK_AND_DEALLOCATE;
+                       } else {
+                               vm_object_deallocate(object);
+                       }
                        goto RetryFault;
                }
 
@@ -2529,17 +2848,27 @@ FastPmapEnter:
                 *      has been copied while we left the map unlocked.
                 */
                prot &= retry_prot;
-               vm_object_unlock(m->object);
+               if(m != VM_PAGE_NULL) {
+                       vm_object_unlock(m->object);
+               } else {
+                       vm_object_unlock(object);
+               }
+       }
+       if(m != VM_PAGE_NULL) {
+               vm_object_lock(m->object);
+       } else {
+               vm_object_lock(object);
        }
-       vm_object_lock(m->object);
 
        /*
         *      If the copy object changed while the top-level object
         *      was unlocked, then we must take away write permission.
         */
 
-       if (m->object->copy != old_copy_object)
-               prot &= ~VM_PROT_WRITE;
+       if(m != VM_PAGE_NULL) {
+               if (m->object->copy != old_copy_object)
+                       prot &= ~VM_PROT_WRITE;
+       }
 
        /*
         *      If we want to wire down this page, but no longer have
@@ -2550,17 +2879,15 @@ FastPmapEnter:
                vm_map_verify_done(map, &version);
                if(pmap_map != map)
                        vm_map_unlock(pmap_map);
-               RELEASE_PAGE(m);
-               UNLOCK_AND_DEALLOCATE;
+               if(m != VM_PAGE_NULL) {
+                       RELEASE_PAGE(m);
+                       UNLOCK_AND_DEALLOCATE;
+               } else {
+                       vm_object_deallocate(object);
+               }
                goto RetryFault;
        }
 
-       /*
-        *      It's critically important that a wired-down page be faulted
-        *      only once in each map for which it is wired.
-        */
-       vm_object_unlock(m->object);
-
        /*
         *      Put this page into the physical map.
         *      We had to do the unlock above because pmap_enter
@@ -2568,44 +2895,186 @@ FastPmapEnter:
         *      the pageout queues.  If the pageout daemon comes
         *      across the page, it will remove it from the queues.
         */
-       PMAP_ENTER(pmap, vaddr, m, prot, wired);
+       if (m != VM_PAGE_NULL) {
+               if (m->no_isync == TRUE) {
+                       pmap_sync_caches_phys(m->phys_page);
+
+                        if (type_of_fault == DBG_CACHE_HIT_FAULT) {
+                                /*
+                                 * found it in the cache, but this
+                                 * is the first fault-in of the page (no_isync == TRUE)
+                                 * so it must have come in as part of
+                                 * a cluster... account 1 pagein against it
+                                 */
+                                 VM_STAT(pageins++);
+                                 current_task()->pageins++;
+
+                                 type_of_fault = DBG_PAGEIN_FAULT;
+                        }
+                       m->no_isync = FALSE;
+               }
+               cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
 
-       /* Sync I & D caches for new mapping*/
-       pmap_attribute(pmap,
-                      vaddr,
-                      PAGE_SIZE,
-                      MATTR_CACHE,
-                      &mv_cache_sync);
+               if(caller_pmap) {
+                       PMAP_ENTER(caller_pmap, 
+                                       caller_pmap_addr, m, 
+                                       prot, cache_attr, wired);
+               } else {
+                       PMAP_ENTER(pmap, vaddr, m, 
+                                       prot, cache_attr, wired);
+               }
+
+               /*
+                * Add working set information for private objects here.
+                */
+               if (m->object->private) {
+                       write_startup_file =
+                               vm_fault_tws_insert(map, pmap_map, vaddr, 
+                                           m->object, m->offset);
+               }
+       } else {
+
+#ifndef i386
+               int                     memattr;
+               vm_map_entry_t          entry;
+               vm_offset_t             laddr;
+               vm_offset_t             ldelta, hdelta;
+
+               /* 
+                * do a pmap block mapping from the physical address
+                * in the object 
+                */
+
+               /* While we do not worry about execution protection in   */
+               /* general, certian pages may have instruction execution */
+               /* disallowed.  We will check here, and if not allowed   */
+               /* to execute, we return with a protection failure.      */
+
+               if((full_fault_type & VM_PROT_EXECUTE) &&
+                       (pmap_canExecute((ppnum_t)
+                               (object->shadow_offset >> 12)) < 1)) {
+
+                       vm_map_verify_done(map, &version);
+                       if(pmap_map != map)
+                               vm_map_unlock(pmap_map);
+                       vm_fault_cleanup(object, top_page);
+                       vm_object_deallocate(object);
+                       kr = KERN_PROTECTION_FAILURE;
+                       goto done;
+               }
+
+               if(pmap_map != map) {
+                       vm_map_unlock(pmap_map);
+               }
+               if (original_map != map) {
+                       vm_map_unlock_read(map);
+                       vm_map_lock_read(original_map);
+                       map = original_map;
+               }
+               pmap_map = map;
+
+               laddr = vaddr;
+               hdelta = 0xFFFFF000;
+               ldelta = 0xFFFFF000;
+
+
+               while(vm_map_lookup_entry(map, laddr, &entry)) {
+                       if(ldelta > (laddr - entry->vme_start))
+                               ldelta = laddr - entry->vme_start;
+                       if(hdelta > (entry->vme_end - laddr))
+                               hdelta = entry->vme_end - laddr;
+                       if(entry->is_sub_map) {
+                               
+                               laddr = (laddr - entry->vme_start) 
+                                                       + entry->offset;
+                               vm_map_lock_read(entry->object.sub_map);
+                               if(map != pmap_map)
+                                       vm_map_unlock_read(map);
+                               if(entry->use_pmap) {
+                                       vm_map_unlock_read(pmap_map);
+                                       pmap_map = entry->object.sub_map;
+                               }
+                               map = entry->object.sub_map;
+                               
+                       } else {
+                               break;
+                       }
+               }
+
+               if(vm_map_lookup_entry(map, laddr, &entry) && 
+                                       (entry->object.vm_object != NULL) &&
+                                       (entry->object.vm_object == object)) {
+
+
+                       if(caller_pmap) {
+                               /* Set up a block mapped area */
+                               pmap_map_block(caller_pmap, 
+                                       (addr64_t)(caller_pmap_addr - ldelta), 
+                                       (((vm_offset_t)
+                                   (entry->object.vm_object->shadow_offset)) 
+                                       + entry->offset + 
+                                       (laddr - entry->vme_start) 
+                                                       - ldelta)>>12,
+                               ldelta + hdelta, prot, 
+                               (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                       } else { 
+                               /* Set up a block mapped area */
+                               pmap_map_block(pmap_map->pmap, 
+                                  (addr64_t)(vaddr - ldelta), 
+                                  (((vm_offset_t)
+                                   (entry->object.vm_object->shadow_offset)) 
+                                      + entry->offset + 
+                                      (laddr - entry->vme_start) - ldelta)>>12,
+                                  ldelta + hdelta, prot, 
+                                  (VM_WIMG_MASK & (int)object->wimg_bits), 0);
+                       }
+               }
+#else
+#ifdef notyet
+               if(caller_pmap) {
+                               pmap_enter(caller_pmap, caller_pmap_addr, 
+                               object->shadow_offset>>12, prot, 0, TRUE);
+               } else {
+                               pmap_enter(pmap, vaddr, 
+                               object->shadow_offset>>12, prot, 0, TRUE);
+               }
+                       /* Map it in */
+#endif
+#endif
+
+       }
 
        /*
         *      If the page is not wired down and isn't already
         *      on a pageout queue, then put it where the
         *      pageout daemon can find it.
         */
-       vm_object_lock(m->object);
-       vm_page_lock_queues();
-       if (change_wiring) {
-               if (wired)
-                       vm_page_wire(m);
-               else
-                       vm_page_unwire(m);
-       }
+       if(m != VM_PAGE_NULL) {
+               vm_page_lock_queues();
+
+               if (change_wiring) {
+                       if (wired)
+                               vm_page_wire(m);
+                       else
+                               vm_page_unwire(m);
+               }
 #if    VM_FAULT_STATIC_CONFIG
-       else {
-               if (!m->active && !m->inactive)
-                       vm_page_activate(m);
-               m->reference = TRUE;
-       }
+               else {
+                       if (!m->active && !m->inactive)
+                               vm_page_activate(m);
+                       m->reference = TRUE;
+               }
 #else
-       else if (software_reference_bits) {
-               if (!m->active && !m->inactive)
+               else if (software_reference_bits) {
+                       if (!m->active && !m->inactive)
+                               vm_page_activate(m);
+                       m->reference = TRUE;
+               } else {
                        vm_page_activate(m);
-               m->reference = TRUE;
-       } else {
-               vm_page_activate(m);
-       }
+               }
 #endif
-       vm_page_unlock_queues();
+               vm_page_unlock_queues();
+       }
 
        /*
         *      Unlock everything, and return
@@ -2614,26 +3083,34 @@ FastPmapEnter:
        vm_map_verify_done(map, &version);
        if(pmap_map != map)
                vm_map_unlock(pmap_map);
-       PAGE_WAKEUP_DONE(m);
+       if(m != VM_PAGE_NULL) {
+               PAGE_WAKEUP_DONE(m);
+               UNLOCK_AND_DEALLOCATE;
+       } else {
+               vm_fault_cleanup(object, top_page);
+               vm_object_deallocate(object);
+       }
        kr = KERN_SUCCESS;
-       UNLOCK_AND_DEALLOCATE;
 
 #undef UNLOCK_AND_DEALLOCATE
 #undef RELEASE_PAGE
 
     done:
+       if(write_startup_file)
+               tws_send_startup_info(current_task());
        if (funnel_set) {
                thread_funnel_set( curflock, TRUE);
                funnel_set = FALSE;
        }
-       cur_thread->interruptible = interruptible_state;
+       thread_interrupt_level(interruptible_state);
 
        KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
                              vaddr,
-                             type_of_fault,
+                             type_of_fault & 0xff,
                              kr,
-                             0,
+                             type_of_fault >> 8,
                              0);
+
        return(kr);
 }
 
@@ -2646,7 +3123,8 @@ kern_return_t
 vm_fault_wire(
        vm_map_t        map,
        vm_map_entry_t  entry,
-       pmap_t          pmap)
+       pmap_t          pmap,
+       vm_offset_t     pmap_addr)
 {
 
        register vm_offset_t    va;
@@ -2655,13 +3133,20 @@ vm_fault_wire(
 
        assert(entry->in_transition);
 
+       if ((entry->object.vm_object != NULL) && 
+                       !entry->is_sub_map && 
+                       entry->object.vm_object->phys_contiguous) {
+               return KERN_SUCCESS;
+       }
+
        /*
         *      Inform the physical mapping system that the
         *      range of addresses may not fault, so that
         *      page tables and such can be locked down as well.
         */
 
-       pmap_pageable(pmap, entry->vme_start, end_addr, FALSE);
+       pmap_pageable(pmap, pmap_addr, 
+               pmap_addr + (end_addr - entry->vme_start), FALSE);
 
        /*
         *      We simulate a fault to get the page and enter it
@@ -2670,9 +3155,13 @@ vm_fault_wire(
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
                if ((rc = vm_fault_wire_fast(
-                               map, va, entry, pmap)) != KERN_SUCCESS) {
+                       map, va, entry, pmap, 
+                       pmap_addr + (va - entry->vme_start)
+                       )) != KERN_SUCCESS) {
                        rc = vm_fault(map, va, VM_PROT_NONE, TRUE, 
-                                     (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE);
+                               (pmap == kernel_pmap) ? 
+                                       THREAD_UNINT : THREAD_ABORTSAFE, 
+                               pmap, pmap_addr + (va - entry->vme_start));
                }
 
                if (rc != KERN_SUCCESS) {
@@ -2680,7 +3169,8 @@ vm_fault_wire(
 
                        /* unwire wired pages */
                        tmp_entry.vme_end = va;
-                       vm_fault_unwire(map, &tmp_entry, FALSE, pmap);
+                       vm_fault_unwire(map, 
+                               &tmp_entry, FALSE, pmap, pmap_addr);
 
                        return rc;
                }
@@ -2698,7 +3188,8 @@ vm_fault_unwire(
        vm_map_t        map,
        vm_map_entry_t  entry,
        boolean_t       deallocate,
-       pmap_t          pmap)
+       pmap_t          pmap,
+       vm_offset_t     pmap_addr)
 {
        register vm_offset_t    va;
        register vm_offset_t    end_addr = entry->vme_end;
@@ -2713,10 +3204,14 @@ vm_fault_unwire(
         */
 
        for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
-               pmap_change_wiring(pmap, va, FALSE);
+               pmap_change_wiring(pmap, 
+                       pmap_addr + (va - entry->vme_start), FALSE);
 
                if (object == VM_OBJECT_NULL) {
-                       (void) vm_fault(map, va, VM_PROT_NONE, TRUE, THREAD_UNINT);
+                       (void) vm_fault(map, va, VM_PROT_NONE, 
+                                       TRUE, THREAD_UNINT, pmap, pmap_addr);
+               } else if (object->phys_contiguous) {
+                       continue;
                } else {
                        vm_prot_t       prot;
                        vm_page_t       result_page;
@@ -2747,7 +3242,7 @@ vm_fault_unwire(
                                                &top_page,
                                                (int *)0,
                                                0, map->no_zero_fill, 
-                                               FALSE);
+                                               FALSE, NULL, 0);
                        } while (result == VM_FAULT_RETRY);
 
                        if (result != VM_FAULT_SUCCESS)
@@ -2756,7 +3251,7 @@ vm_fault_unwire(
                        result_object = result_page->object;
                        if (deallocate) {
                                assert(!result_page->fictitious);
-                               pmap_page_protect(result_page->phys_addr,
+                               pmap_page_protect(result_page->phys_page,
                                                VM_PROT_NONE);
                                VM_PAGE_FREE(result_page);
                        } else {
@@ -2776,7 +3271,8 @@ vm_fault_unwire(
         *      such may be unwired themselves.
         */
 
-       pmap_pageable(pmap, entry->vme_start, end_addr, TRUE);
+       pmap_pageable(pmap, pmap_addr, 
+               pmap_addr + (end_addr - entry->vme_start), TRUE);
 
 }
 
@@ -2805,13 +3301,15 @@ vm_fault_wire_fast(
        vm_map_t        map,
        vm_offset_t     va,
        vm_map_entry_t  entry,
-       pmap_t          pmap)
+       pmap_t          pmap,
+       vm_offset_t     pmap_addr)
 {
        vm_object_t             object;
        vm_object_offset_t      offset;
        register vm_page_t      m;
        vm_prot_t               prot;
        thread_act_t            thr_act;
+       unsigned int            cache_attr;
 
        VM_STAT(faults++);
 
@@ -2932,20 +3430,15 @@ vm_fault_wire_fast(
         *      We have to unlock the object because pmap_enter
         *      may cause other faults.   
         */
-       vm_object_unlock(object);
+       if (m->no_isync == TRUE) {
+               pmap_sync_caches_phys(m->phys_page);
+
+               m->no_isync = FALSE;
+       }
 
-       PMAP_ENTER(pmap, va, m, prot, TRUE);
-       /* Sync I & D caches for new mapping */
-       pmap_attribute(pmap,
-                      va,
-                      PAGE_SIZE,
-                      MATTR_CACHE,
-                      &mv_cache_sync);
+       cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
 
-       /*
-        *      Must relock object so that paging_in_progress can be cleared.
-        */
-       vm_object_lock(object);
+       PMAP_ENTER(pmap, pmap_addr, m, prot, cache_attr, TRUE);
 
        /*
         *      Unlock everything, and return
@@ -3097,7 +3590,7 @@ vm_fault_copy(
                                      (int *)0,
                                      &error,
                                      dst_map->no_zero_fill,
-                                     FALSE)) {
+                                     FALSE, NULL, 0)) {
                case VM_FAULT_SUCCESS:
                        break;
                case VM_FAULT_RETRY:
@@ -3183,7 +3676,7 @@ vm_fault_copy(
                                                      (int *)0,
                                                      &error,
                                                      FALSE,
-                                                     FALSE)) {
+                                                     FALSE, NULL, 0)) {
 
                                case VM_FAULT_SUCCESS:
                                        break;
@@ -3405,11 +3898,12 @@ vm_fault_page_overwrite(
                                 */
 
                                if (!dst_object->pager_ready) {
-                                       vm_object_assert_wait(dst_object,
-                                               VM_OBJECT_EVENT_PAGER_READY,
-                                               interruptible);
+                                       wait_result = vm_object_assert_wait(dst_object,
+                                                               VM_OBJECT_EVENT_PAGER_READY,
+                                                               interruptible);
                                        vm_object_unlock(dst_object);
-                                       wait_result = thread_block((void (*)(void))0);
+                                       if (wait_result == THREAD_WAITING)
+                                               wait_result = thread_block(THREAD_CONTINUE_NULL);
                                        if (wait_result != THREAD_AWAKENED) {
                                                DISCARD_PAGE;
                                                return(VM_FAULT_INTERRUPTED);
@@ -3422,7 +3916,6 @@ vm_fault_page_overwrite(
 
                                if ((rc = memory_object_data_unlock(
                                                dst_object->pager,
-                                               dst_object->pager_request,
                                                dst_offset + dst_object->paging_offset,
                                                PAGE_SIZE,
                                                u)) != KERN_SUCCESS) {
@@ -3448,9 +3941,10 @@ vm_fault_page_overwrite(
                                break;
                }
 
-               PAGE_ASSERT_WAIT(dst_page, interruptible);
+               wait_result = PAGE_ASSERT_WAIT(dst_page, interruptible);
                vm_object_unlock(dst_object);
-               wait_result = thread_block((void (*)(void))0);
+               if (wait_result == THREAD_WAITING)
+                       wait_result = thread_block(THREAD_CONTINUE_NULL);
                if (wait_result != THREAD_AWAKENED) {
                        DISCARD_PAGE;
                        return(VM_FAULT_INTERRUPTED);