-
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the
+ * License may not be used to create, or enable the creation or
+ * redistribution of, unlawful or unlicensed copies of an Apple operating
+ * system, or to circumvent, violate, or enable the circumvention or
+ * violation of, any terms of an Apple operating system software license
+ * agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
* file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*
* Page fault handling module.
*/
-#ifdef MACH_BSD
-/* remove after component interface available */
-extern int vnode_pager_workaround;
-extern int device_pager_workaround;
-#endif
#include <mach_cluster_stats.h>
#include <mach_pagemap.h>
#include <mach_kdb.h>
-#include <vm/vm_fault.h>
+#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/message.h> /* for error codes */
+#include <mach/vm_param.h>
+#include <mach/vm_behavior.h>
+#include <mach/memory_object.h>
+ /* For memory_object_data_{request,unlock} */
+
+#include <kern/kern_types.h>
#include <kern/host_statistics.h>
#include <kern/counters.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
#include <kern/host.h>
#include <kern/xpr.h>
+#include <kern/mach_param.h>
+#include <kern/macro_help.h>
+#include <kern/zalloc.h>
+#include <kern/misc_protos.h>
+
#include <ppc/proc_reg.h>
-#include <ppc/pmap_internals.h>
+
+#include <vm/vm_fault.h>
#include <vm/task_working_set.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_pageout.h>
-#include <mach/vm_param.h>
-#include <mach/vm_behavior.h>
-#include <mach/memory_object.h>
- /* For memory_object_data_{request,unlock} */
-#include <kern/mach_param.h>
-#include <kern/macro_help.h>
-#include <kern/zalloc.h>
-#include <kern/misc_protos.h>
+#include <vm/vm_protos.h>
#include <sys/kdebug.h>
#define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
-int vm_object_absent_max = 50;
+unsigned int vm_object_absent_max = 50;
int vm_fault_debug = 0;
-boolean_t vm_page_deactivate_behind = TRUE;
-
#if !VM_FAULT_STATIC_CONFIG
boolean_t vm_fault_dirty_handling = FALSE;
extern struct db_watchpoint *db_watchpoint_list;
#endif /* MACH_KDB */
+
/* Forward declarations of internal routines. */
extern kern_return_t vm_fault_wire_fast(
vm_map_t map,
- vm_offset_t va,
+ vm_map_offset_t va,
vm_map_entry_t entry,
pmap_t pmap,
- vm_offset_t pmap_addr);
+ vm_map_offset_t pmap_addr);
extern void vm_fault_continue(void);
boolean_t vm_allow_clustered_pagein = FALSE;
int vm_pagein_cluster_used = 0;
+#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
+
+
+boolean_t vm_page_deactivate_behind = TRUE;
/*
* Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior
*/
-int vm_default_ahead = 1; /* Number of pages to prepage ahead */
-int vm_default_behind = 0; /* Number of pages to prepage behind */
+int vm_default_ahead = 0;
+int vm_default_behind = MAX_UPL_TRANSFER;
+
+/*
+ * vm_page_deactivate_behind
+ *
+ * Determine if sequential access is in progress
+ * in accordance with the behavior specified. If
+ * so, compute a potential page to deactive and
+ * deactivate it.
+ *
+ * The object must be locked.
+ */
+static
+boolean_t
+vm_fault_deactivate_behind(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_behavior_t behavior)
+{
+ vm_page_t m;
+
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
+#endif
+
+ if (object == kernel_object) {
+ /*
+ * Do not deactivate pages from the kernel object: they
+ * are not intended to become pageable.
+ */
+ return FALSE;
+ }
+
+ switch (behavior) {
+ case VM_BEHAVIOR_RANDOM:
+ object->sequential = PAGE_SIZE_64;
+ m = VM_PAGE_NULL;
+ break;
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if (offset &&
+ object->last_alloc == offset - PAGE_SIZE_64) {
+ object->sequential += PAGE_SIZE_64;
+ m = vm_page_lookup(object, offset - PAGE_SIZE_64);
+ } else {
+ object->sequential = PAGE_SIZE_64; /* reset */
+ m = VM_PAGE_NULL;
+ }
+ break;
+ case VM_BEHAVIOR_RSEQNTL:
+ if (object->last_alloc &&
+ object->last_alloc == offset + PAGE_SIZE_64) {
+ object->sequential += PAGE_SIZE_64;
+ m = vm_page_lookup(object, offset + PAGE_SIZE_64);
+ } else {
+ object->sequential = PAGE_SIZE_64; /* reset */
+ m = VM_PAGE_NULL;
+ }
+ break;
+ case VM_BEHAVIOR_DEFAULT:
+ default:
+ if (offset &&
+ object->last_alloc == offset - PAGE_SIZE_64) {
+ vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+ object->sequential += PAGE_SIZE_64;
+ m = (offset >= behind &&
+ object->sequential >= behind) ?
+ vm_page_lookup(object, offset - behind) :
+ VM_PAGE_NULL;
+ } else if (object->last_alloc &&
+ object->last_alloc == offset + PAGE_SIZE_64) {
+ vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
+
+ object->sequential += PAGE_SIZE_64;
+ m = (offset < -behind &&
+ object->sequential >= behind) ?
+ vm_page_lookup(object, offset + behind) :
+ VM_PAGE_NULL;
+ } else {
+ object->sequential = PAGE_SIZE_64;
+ m = VM_PAGE_NULL;
+ }
+ break;
+ }
+
+ object->last_alloc = offset;
+
+ if (m) {
+ if (!m->busy) {
+ vm_page_lock_queues();
+ vm_page_deactivate(m);
+ vm_page_unlock_queues();
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
+#endif
+ }
+ return TRUE;
+ }
+ return FALSE;
+}
-#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
/*
* Routine: vm_fault_page
vm_prot_t fault_type, /* What access is requested */
boolean_t must_be_resident,/* Must page be resident? */
int interruptible, /* how may fault be interrupted? */
- vm_object_offset_t lo_offset, /* Map entry start */
- vm_object_offset_t hi_offset, /* Map entry end */
+ vm_map_offset_t lo_offset, /* Map entry start */
+ vm_map_offset_t hi_offset, /* Map entry end */
vm_behavior_t behavior, /* Page reference behavior */
/* Modifies in place: */
vm_prot_t *protection, /* Protection for mapping */
* it is a write fault and a full
* page is provided */
vm_map_t map,
- vm_offset_t vaddr)
+ __unused vm_map_offset_t vaddr)
{
register
vm_page_t m;
boolean_t look_for_page;
vm_prot_t access_required = fault_type;
vm_prot_t wants_copy_flag;
- vm_size_t cluster_size, length;
- vm_object_offset_t cluster_offset;
- vm_object_offset_t cluster_start, cluster_end, paging_offset;
- vm_object_offset_t align_offset;
+ vm_object_size_t length;
+ vm_object_offset_t cluster_start, cluster_end;
CLUSTER_STAT(int pages_at_higher_offsets;)
CLUSTER_STAT(int pages_at_lower_offsets;)
kern_return_t wait_result;
/*
* If the page was pre-paged as part of a
* cluster, record the fact.
+ * If we were passed a valid pointer for
+ * "type_of_fault", than we came from
+ * vm_fault... we'll let it deal with
+ * this condition, since it
+ * needs to see m->clustered to correctly
+ * account the pageins.
*/
- if (m->clustered) {
+ if (type_of_fault == NULL && m->clustered) {
vm_pagein_cluster_used++;
m->clustered = FALSE;
}
continue;
}
+ if (m->encrypted) {
+ /*
+ * ENCRYPTED SWAP:
+ * the user needs access to a page that we
+ * encrypted before paging it out.
+ * Decrypt the page now.
+ * Keep it busy to prevent anyone from
+ * accessing it during the decryption.
+ */
+ m->busy = TRUE;
+ vm_page_decrypt(m, 0);
+ assert(object == m->object);
+ assert(m->busy);
+ PAGE_WAKEUP_DONE(m);
+
+ /*
+ * Retry from the top, in case
+ * something changed while we were
+ * decrypting.
+ */
+ continue;
+ }
+ ASSERT_PAGE_DECRYPTED(m);
+
/*
* If the page is in error, give up now.
*/
* need to allocate a real page.
*/
if (VM_PAGE_THROTTLED() ||
- (real_m = vm_page_grab()) == VM_PAGE_NULL) {
- vm_fault_cleanup(object, first_m);
- thread_interrupt_level(interruptible_state);
- return(VM_FAULT_MEMORY_SHORTAGE);
+ (real_m = vm_page_grab())
+ == VM_PAGE_NULL) {
+ vm_fault_cleanup(
+ object, first_m);
+ thread_interrupt_level(
+ interruptible_state);
+ return(
+ VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+
+ if(vm_backing_store_low) {
+ if(!(current_task()->priv_flags
+ & VM_BACKING_STORE_PRIV)) {
+ assert_wait((event_t)
+ &vm_backing_store_low,
+ THREAD_UNINT);
+ vm_fault_cleanup(object,
+ first_m);
+ thread_block(THREAD_CONTINUE_NULL);
+ thread_interrupt_level(
+ interruptible_state);
+ return(VM_FAULT_RETRY);
+ }
}
+
XPR(XPR_VM_FAULT,
"vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
(integer_t)object, offset,
if (!no_zero_fill) {
vm_object_unlock(object);
vm_page_zero_fill(m);
+ vm_object_lock(object);
+
if (type_of_fault)
*type_of_fault = DBG_ZERO_FILL_FAULT;
VM_STAT(zero_fill_count++);
-
- if (bumped_pagein == TRUE) {
- VM_STAT(pageins--);
- current_task()->pageins--;
- }
- vm_object_lock(object);
}
- pmap_clear_modify(m->phys_addr);
+ if (bumped_pagein == TRUE) {
+ VM_STAT(pageins--);
+ current_task()->pageins--;
+ }
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
m->page_ticket = vm_page_ticket;
- if(m->object->size > 0x80000) {
+ assert(!m->laundry);
+ assert(m->object != kernel_object);
+ assert(m->pageq.next == NULL &&
+ m->pageq.prev == NULL);
+ if(m->object->size > 0x200000) {
m->zero_fill = TRUE;
/* depends on the queues lock */
vm_zf_count += 1;
* do not need to take the map lock.
*/
cluster_end = offset + PAGE_SIZE_64;
- tws_build_cluster((tws_hash_t)
+ tws_build_cluster(
current_task()->dynamic_working_set,
object, &cluster_start,
&cluster_end, 0x40000);
*/
if (type_of_fault)
- *type_of_fault = (length << 8) | DBG_PAGEIN_FAULT;
+ *type_of_fault = ((int)length << 8) | DBG_PAGEIN_FAULT;
VM_STAT(pageins++);
current_task()->pageins++;
bumped_pagein = TRUE;
if (rc != KERN_SUCCESS) {
if (rc != MACH_SEND_INTERRUPTED
&& vm_fault_debug)
- printf("%s(0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d\n",
+ printf("%s(0x%x, 0x%xll, 0x%xll, 0x%x) failed, rc=%d\n",
"memory_object_data_request",
object->pager,
cluster_start + object->paging_offset,
return((rc == MACH_SEND_INTERRUPTED) ?
VM_FAULT_INTERRUPTED :
VM_FAULT_MEMORY_ERROR);
- } else {
-#ifdef notdefcdy
- tws_hash_line_t line;
- task_t task;
-
- task = current_task();
-
- if((map != NULL) &&
- (task->dynamic_working_set != 0))
- && !(object->private)) {
- vm_object_t base_object;
- vm_object_offset_t base_offset;
- base_object = object;
- base_offset = offset;
- while(base_object->shadow) {
- base_offset +=
- base_object->shadow_offset;
- base_object =
- base_object->shadow;
- }
- if(tws_lookup
- ((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- &line) == KERN_SUCCESS) {
- tws_line_signal((tws_hash_t)
- task->dynamic_working_set,
- map, line, vaddr);
- }
- }
-#endif
}
- /*
- * Retry with same object/offset, since new data may
- * be in a different page (i.e., m is meaningless at
- * this point).
- */
vm_object_lock(object);
if ((interruptible != THREAD_UNINT) &&
(current_thread()->state & TH_ABORT)) {
thread_interrupt_level(interruptible_state);
return(VM_FAULT_INTERRUPTED);
}
- if(m == VM_PAGE_NULL)
+ if (m == VM_PAGE_NULL &&
+ object->phys_contiguous) {
+ /*
+ * No page here means that the object we
+ * initially looked up was "physically
+ * contiguous" (i.e. device memory). However,
+ * with Virtual VRAM, the object might not
+ * be backed by that device memory anymore,
+ * so we're done here only if the object is
+ * still "phys_contiguous".
+ * Otherwise, if the object is no longer
+ * "phys_contiguous", we need to retry the
+ * page fault against the object's new backing
+ * store (different memory object).
+ */
break;
+ }
+
+ /*
+ * Retry with same object/offset, since new data may
+ * be in a different page (i.e., m is meaningless at
+ * this point).
+ */
continue;
}
assert(m->object == object);
first_m = VM_PAGE_NULL;
+ if(m == VM_PAGE_NULL) {
+ m = vm_page_grab();
+ if (m == VM_PAGE_NULL) {
+ vm_fault_cleanup(
+ object, VM_PAGE_NULL);
+ thread_interrupt_level(
+ interruptible_state);
+ return(VM_FAULT_MEMORY_SHORTAGE);
+ }
+ vm_page_insert(
+ m, object, offset);
+ }
+
if (object->shadow_severed) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
return VM_FAULT_MEMORY_ERROR;
}
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+
+ if(vm_backing_store_low) {
+ if(!(current_task()->priv_flags
+ & VM_BACKING_STORE_PRIV)) {
+ assert_wait((event_t)
+ &vm_backing_store_low,
+ THREAD_UNINT);
+ VM_PAGE_FREE(m);
+ vm_fault_cleanup(object, VM_PAGE_NULL);
+ thread_block(THREAD_CONTINUE_NULL);
+ thread_interrupt_level(
+ interruptible_state);
+ return(VM_FAULT_RETRY);
+ }
+ }
+
if (VM_PAGE_THROTTLED() ||
(m->fictitious && !vm_page_convert(m))) {
VM_PAGE_FREE(m);
if (!no_zero_fill) {
vm_object_unlock(object);
vm_page_zero_fill(m);
+ vm_object_lock(object);
+
if (type_of_fault)
*type_of_fault = DBG_ZERO_FILL_FAULT;
VM_STAT(zero_fill_count++);
-
- if (bumped_pagein == TRUE) {
- VM_STAT(pageins--);
- current_task()->pageins--;
- }
- vm_object_lock(object);
+ }
+ if (bumped_pagein == TRUE) {
+ VM_STAT(pageins--);
+ current_task()->pageins--;
}
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(m);
- if(m->object->size > 0x80000) {
+ assert(!m->laundry);
+ assert(m->object != kernel_object);
+ assert(m->pageq.next == NULL &&
+ m->pageq.prev == NULL);
+ if(m->object->size > 0x200000) {
m->zero_fill = TRUE;
/* depends on the queues lock */
vm_zf_count += 1;
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
- pmap_clear_modify(m->phys_addr);
+#if 0
+ pmap_clear_modify(m->phys_page);
+#endif
break;
}
else {
}
#endif /* EXTRA_ASSERTIONS */
+ /*
+ * ENCRYPTED SWAP:
+ * If we found a page, we must have decrypted it before we
+ * get here...
+ */
+ if (m != VM_PAGE_NULL) {
+ ASSERT_PAGE_DECRYPTED(m);
+ }
+
XPR(XPR_VM_FAULT,
"vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
(integer_t)object, offset, (integer_t)m,
assert(!must_be_resident);
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+
+ if(vm_backing_store_low) {
+ if(!(current_task()->priv_flags
+ & VM_BACKING_STORE_PRIV)) {
+ assert_wait((event_t)
+ &vm_backing_store_low,
+ THREAD_UNINT);
+ RELEASE_PAGE(m);
+ vm_fault_cleanup(object, first_m);
+ thread_block(THREAD_CONTINUE_NULL);
+ thread_interrupt_level(
+ interruptible_state);
+ return(VM_FAULT_RETRY);
+ }
+ }
+
/*
* If we try to collapse first_object at this
* point, we may deadlock when we try to get
*
* XXXO If we know that only one map has
* access to this page, then we could
- * avoid the pmap_page_protect() call.
+ * avoid the pmap_disconnect() call.
*/
vm_page_lock_queues();
assert(!m->cleaning);
- pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ pmap_disconnect(m->phys_page);
vm_page_deactivate(m);
copy_m->dirty = TRUE;
/*
*/
vm_object_paging_end(object);
- vm_object_collapse(object);
+ vm_object_collapse(object, offset, TRUE);
vm_object_paging_begin(object);
}
copy_object->ref_count--;
assert(copy_object->ref_count > 0);
copy_m = vm_page_lookup(copy_object, copy_offset);
+ /*
+ * ENCRYPTED SWAP:
+ * it's OK if the "copy_m" page is encrypted,
+ * because we're not moving it nor handling its
+ * contents.
+ */
if (copy_m != VM_PAGE_NULL && copy_m->busy) {
PAGE_ASSERT_WAIT(copy_m, interruptible);
vm_object_unlock(copy_object);
* We must copy the page to the copy object.
*/
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+
+ if(vm_backing_store_low) {
+ if(!(current_task()->priv_flags
+ & VM_BACKING_STORE_PRIV)) {
+ assert_wait((event_t)
+ &vm_backing_store_low,
+ THREAD_UNINT);
+ RELEASE_PAGE(m);
+ VM_OBJ_RES_DECR(copy_object);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+ vm_object_unlock(copy_object);
+ vm_fault_cleanup(object, first_m);
+ thread_block(THREAD_CONTINUE_NULL);
+ thread_interrupt_level(
+ interruptible_state);
+ return(VM_FAULT_RETRY);
+ }
+ }
+
/*
* Allocate a page for the copy
*/
vm_page_lock_queues();
assert(!m->cleaning);
- pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ pmap_disconnect(m->phys_page);
copy_m->dirty = TRUE;
vm_page_unlock_queues();
* mark read-only data as dirty.]
*/
+
+ if(m != VM_PAGE_NULL) {
#if !VM_FAULT_STATIC_CONFIG
- if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE) &&
- (m != VM_PAGE_NULL)) {
- m->dirty = TRUE;
- }
-#endif
-#if TRACEFAULTPAGE
- dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_page_deactivate_behind); /* (TEST/DEBUG) */
-#endif
- if (vm_page_deactivate_behind) {
- if (offset && /* don't underflow */
- (object->last_alloc == (offset - PAGE_SIZE_64))) {
- m = vm_page_lookup(object, object->last_alloc);
- if ((m != VM_PAGE_NULL) && !m->busy) {
- vm_page_lock_queues();
- vm_page_deactivate(m);
- vm_page_unlock_queues();
- }
-#if TRACEFAULTPAGE
- dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
+ if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE))
+ m->dirty = TRUE;
#endif
- }
- object->last_alloc = offset;
+ if (vm_page_deactivate_behind)
+ vm_fault_deactivate_behind(object, offset, behavior);
+ } else {
+ vm_object_unlock(object);
}
+ thread_interrupt_level(interruptible_state);
+
#if TRACEFAULTPAGE
dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
#endif
- thread_interrupt_level(interruptible_state);
- if(*result_page == VM_PAGE_NULL) {
- vm_object_unlock(object);
- }
return(VM_FAULT_SUCCESS);
#if 0
#undef RELEASE_PAGE
}
+/*
+ * Routine: vm_fault_tws_insert
+ * Purpose:
+ * Add fault information to the task working set.
+ * Implementation:
+ * We always insert the base object/offset pair
+ * rather the actual object/offset.
+ * Assumptions:
+ * Map and real_map locked.
+ * Object locked and referenced.
+ * Returns:
+ * TRUE if startup file should be written.
+ * With object locked and still referenced.
+ * But we may drop the object lock temporarily.
+ */
+static boolean_t
+vm_fault_tws_insert(
+ vm_map_t map,
+ vm_map_t real_map,
+ vm_map_offset_t vaddr,
+ vm_object_t object,
+ vm_object_offset_t offset)
+{
+ tws_hash_line_t line;
+ task_t task;
+ kern_return_t kr;
+ boolean_t result = FALSE;
+
+ /* Avoid possible map lock deadlock issues */
+ if (map == kernel_map || map == kalloc_map ||
+ real_map == kernel_map || real_map == kalloc_map)
+ return result;
+
+ task = current_task();
+ if (task->dynamic_working_set != 0) {
+ vm_object_t base_object;
+ vm_object_t base_shadow;
+ vm_object_offset_t base_offset;
+ base_object = object;
+ base_offset = offset;
+ while ((base_shadow = base_object->shadow)) {
+ vm_object_lock(base_shadow);
+ vm_object_unlock(base_object);
+ base_offset +=
+ base_object->shadow_offset;
+ base_object = base_shadow;
+ }
+ kr = tws_lookup(
+ task->dynamic_working_set,
+ base_offset, base_object,
+ &line);
+ if (kr == KERN_OPERATION_TIMED_OUT){
+ result = TRUE;
+ if (base_object != object) {
+ vm_object_unlock(base_object);
+ vm_object_lock(object);
+ }
+ } else if (kr != KERN_SUCCESS) {
+ if(base_object != object)
+ vm_object_reference_locked(base_object);
+ kr = tws_insert(
+ task->dynamic_working_set,
+ base_offset, base_object,
+ vaddr, real_map);
+ if(base_object != object) {
+ vm_object_unlock(base_object);
+ vm_object_deallocate(base_object);
+ }
+ if(kr == KERN_NO_SPACE) {
+ if (base_object == object)
+ vm_object_unlock(object);
+ tws_expand_working_set(
+ task->dynamic_working_set,
+ TWS_HASH_LINE_COUNT,
+ FALSE);
+ if (base_object == object)
+ vm_object_lock(object);
+ } else if(kr == KERN_OPERATION_TIMED_OUT) {
+ result = TRUE;
+ }
+ if(base_object != object)
+ vm_object_lock(object);
+ } else if (base_object != object) {
+ vm_object_unlock(base_object);
+ vm_object_lock(object);
+ }
+ }
+ return result;
+}
+
/*
* Routine: vm_fault
* Purpose:
* and deallocated when leaving vm_fault.
*/
+extern int _map_enter_debug;
+
kern_return_t
vm_fault(
vm_map_t map,
- vm_offset_t vaddr,
+ vm_map_offset_t vaddr,
vm_prot_t fault_type,
boolean_t change_wiring,
int interruptible,
pmap_t caller_pmap,
- vm_offset_t caller_pmap_addr)
+ vm_map_offset_t caller_pmap_addr)
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
vm_object_offset_t offset; /* Top-level offset */
vm_prot_t prot; /* Protection for mapping */
vm_behavior_t behavior; /* Expected paging behavior */
- vm_object_offset_t lo_offset, hi_offset;
+ vm_map_offset_t lo_offset, hi_offset;
vm_object_t old_copy_object; /* Saved copy object */
vm_page_t result_page; /* Result of vm_fault_page */
vm_page_t top_page; /* Placeholder page */
register
vm_page_t m; /* Fast access to result_page */
- kern_return_t error_code; /* page error reasons */
+ kern_return_t error_code = 0; /* page error reasons */
register
vm_object_t cur_object;
register
vm_page_t cur_m;
vm_object_t new_object;
int type_of_fault;
- vm_map_t pmap_map = map;
+ vm_map_t real_map = map;
vm_map_t original_map = map;
pmap_t pmap = NULL;
- boolean_t funnel_set = FALSE;
- funnel_t *curflock;
- thread_t cur_thread;
boolean_t interruptible_state;
unsigned int cache_attr;
int write_startup_file = 0;
- vm_prot_t full_fault_type;
-
+ boolean_t need_activation;
+ vm_prot_t original_fault_type;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
0,
0);
- cur_thread = current_thread();
- /* at present we do not fully check for execute permission */
- /* we generally treat it is read except in certain device */
- /* memory settings */
- full_fault_type = fault_type;
- if(fault_type & VM_PROT_EXECUTE) {
- fault_type &= ~VM_PROT_EXECUTE;
- fault_type |= VM_PROT_READ;
+ if (get_preemption_level() != 0) {
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
+ vaddr,
+ 0,
+ KERN_FAILURE,
+ 0,
+ 0);
+
+ return (KERN_FAILURE);
}
interruptible_state = thread_interrupt_level(interruptible);
VM_STAT(faults++);
current_task()->faults++;
- /*
- * drop funnel if it is already held. Then restore while returning
- */
- if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
- funnel_set = TRUE;
- curflock = cur_thread->funnel_lock;
- thread_funnel_set( curflock , FALSE);
- }
-
+ original_fault_type = fault_type;
+
RetryFault: ;
/*
* Find the backing store object and offset into
* it to begin the search.
*/
+ fault_type = original_fault_type;
map = original_map;
vm_map_lock_read(map);
kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version,
&object, &offset,
&prot, &wired,
- &behavior, &lo_offset, &hi_offset, &pmap_map);
+ &behavior, &lo_offset, &hi_offset, &real_map);
+
+//if (_map_enter_debug)printf("vm_map_lookup_locked(map=0x%x, addr=0x%llx, prot=%d wired=%d) = %d\n", map, vaddr, prot, wired, kr);
- pmap = pmap_map->pmap;
+ pmap = real_map->pmap;
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
while (TRUE) {
m = vm_page_lookup(cur_object, cur_offset);
if (m != VM_PAGE_NULL) {
- if (m->busy) {
+ if (m->busy) {
wait_result_t result;
if (object != cur_object)
vm_object_unlock(object);
vm_map_unlock_read(map);
- if (pmap_map != map)
- vm_map_unlock(pmap_map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
#if !VM_FAULT_STATIC_CONFIG
if (!vm_fault_interruptible)
break;
}
+ if (m->encrypted) {
+ /*
+ * ENCRYPTED SWAP:
+ * We've soft-faulted (because it's not in the page
+ * table) on an encrypted page.
+ * Keep the page "busy" so that noone messes with
+ * it during the decryption.
+ * Release the extra locks we're holding, keep only
+ * the page's VM object lock.
+ */
+ m->busy = TRUE;
+ if (object != cur_object) {
+ vm_object_unlock(object);
+ }
+ vm_map_unlock_read(map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
+
+ vm_page_decrypt(m, 0);
+
+ assert(m->busy);
+ PAGE_WAKEUP_DONE(m);
+ vm_object_unlock(m->object);
+
+ /*
+ * Retry from the top, in case anything
+ * changed while we were decrypting...
+ */
+ goto RetryFault;
+ }
+ ASSERT_PAGE_DECRYPTED(m);
+
/*
* Two cases of map in faults:
* - At top level w/o copy object.
goto FastMapInFault;
if ((fault_type & VM_PROT_WRITE) == 0) {
+ boolean_t sequential;
prot &= ~VM_PROT_WRITE;
FastMapInFault:
m->busy = TRUE;
- vm_object_paging_begin(object);
-
FastPmapEnter:
/*
* Check a couple of global reasons to
prot &= ~VM_PROT_WRITE;
#endif /* MACH_KDB */
#endif /* STATIC_CONFIG */
+ cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+ sequential = FALSE;
+ need_activation = FALSE;
+
if (m->no_isync == TRUE) {
- pmap_sync_caches_phys(m->phys_addr);
m->no_isync = FALSE;
+ pmap_sync_page_data_phys(m->phys_page);
+
+ if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
+ /*
+ * found it in the cache, but this
+ * is the first fault-in of the page (no_isync == TRUE)
+ * so it must have come in as part of
+ * a cluster... account 1 pagein against it
+ */
+ VM_STAT(pageins++);
+ current_task()->pageins++;
+ type_of_fault = DBG_PAGEIN_FAULT;
+ sequential = TRUE;
+ }
+ if (m->clustered)
+ need_activation = TRUE;
+
+ } else if (cache_attr != VM_WIMG_DEFAULT) {
+ pmap_sync_page_attributes_phys(m->phys_page);
}
- cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
if(caller_pmap) {
PMAP_ENTER(caller_pmap,
caller_pmap_addr, m,
}
/*
- * Grab the queues lock to manipulate
+ * Hold queues lock to manipulate
* the page queues. Change wiring
* case is obvious. In soft ref bits
* case activate page only if it fell
* move active page to back of active
* queue. This code doesn't.
*/
- vm_page_lock_queues();
-
if (m->clustered) {
vm_pagein_cluster_used++;
m->clustered = FALSE;
}
- m->reference = TRUE;
-
if (change_wiring) {
+ vm_page_lock_queues();
+
if (wired)
vm_page_wire(m);
else
vm_page_unwire(m);
+
+ vm_page_unlock_queues();
}
-#if VM_FAULT_STATIC_CONFIG
else {
- if (!m->active && !m->inactive)
+ if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active)) {
+ vm_page_lock_queues();
vm_page_activate(m);
+ vm_page_unlock_queues();
+ }
}
-#else
- else if (software_reference_bits) {
- if (!m->active && !m->inactive)
- vm_page_activate(m);
- }
- else if (!m->active) {
- vm_page_activate(m);
- }
-#endif
- vm_page_unlock_queues();
/*
* That's it, clean up and return.
*/
PAGE_WAKEUP_DONE(m);
- vm_object_paging_end(object);
- {
- tws_hash_line_t line;
- task_t task;
-
- task = current_task();
- if((map != NULL) &&
- (task->dynamic_working_set != 0) &&
- !(object->private)) {
- kern_return_t kr;
- vm_object_t base_object;
- vm_object_offset_t base_offset;
- base_object = object;
- base_offset = cur_offset;
- while(base_object->shadow) {
- base_offset +=
- base_object->shadow_offset;
- base_object =
- base_object->shadow;
- }
- kr = tws_lookup((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- &line);
- if(kr == KERN_OPERATION_TIMED_OUT){
- write_startup_file = 1;
- } else if (kr != KERN_SUCCESS) {
- kr = tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- if(kr == KERN_NO_SPACE) {
- vm_object_unlock(object);
-
- tws_expand_working_set(
- task->dynamic_working_set,
- TWS_HASH_LINE_COUNT,
- FALSE);
-
- vm_object_lock(object);
- }
- if(kr ==
- KERN_OPERATION_TIMED_OUT) {
- write_startup_file = 1;
- }
- }
- }
+ sequential = (sequential && vm_page_deactivate_behind) ?
+ vm_fault_deactivate_behind(object, cur_offset, behavior) :
+ FALSE;
+
+ /*
+ * Add non-sequential pages to the working set.
+ * The sequential pages will be brought in through
+ * normal clustering behavior.
+ */
+ if (!sequential && !object->private) {
+ vm_object_paging_begin(object);
+
+ write_startup_file =
+ vm_fault_tws_insert(map, real_map, vaddr,
+ object, cur_offset);
+
+ vm_object_paging_end(object);
}
vm_object_unlock(object);
vm_map_unlock_read(map);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
if(write_startup_file)
tws_send_startup_info(current_task());
- if (funnel_set)
- thread_funnel_set( curflock, TRUE);
-
thread_interrupt_level(interruptible_state);
* Now cope with the source page and object
* If the top object has a ref count of 1
* then no other map can access it, and hence
- * it's not necessary to do the pmap_page_protect.
+ * it's not necessary to do the pmap_disconnect.
*/
-
vm_page_lock_queues();
vm_page_deactivate(cur_m);
m->dirty = TRUE;
- pmap_page_protect(cur_m->phys_addr,
- VM_PROT_NONE);
+ pmap_disconnect(cur_m->phys_page);
vm_page_unlock_queues();
PAGE_WAKEUP_DONE(cur_m);
*/
vm_object_paging_end(object);
- vm_object_collapse(object);
- vm_object_paging_begin(object);
+ vm_object_collapse(object, offset, TRUE);
goto FastPmapEnter;
}
vm_object_paging_end(object);
vm_object_unlock(object);
vm_map_unlock_read(map);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
if(write_startup_file)
tws_send_startup_info(
current_task());
- if (funnel_set) {
- thread_funnel_set( curflock, TRUE);
- funnel_set = FALSE;
- }
thread_interrupt_level(interruptible_state);
- return VM_FAULT_MEMORY_ERROR;
+ return KERN_MEMORY_ERROR;
}
/*
* page, then drop any lower lock.
* Give up if no page.
*/
- if ((vm_page_free_target -
- ((vm_page_free_target-vm_page_free_min)>>2))
- > vm_page_free_count) {
+ if (VM_PAGE_THROTTLED()) {
+ break;
+ }
+
+ /*
+ * are we protecting the system from
+ * backing store exhaustion. If so
+ * sleep unless we are privileged.
+ */
+ if(vm_backing_store_low) {
+ if(!(current_task()->priv_flags
+ & VM_BACKING_STORE_PRIV))
break;
}
m = vm_page_alloc(object, offset);
if (cur_object != object)
vm_object_unlock(cur_object);
- vm_object_paging_begin(object);
- vm_object_unlock(object);
-
/*
* Now zero fill page and map it.
* the page is probably going to
VM_PAGE_QUEUES_REMOVE(m);
m->page_ticket = vm_page_ticket;
- if(m->object->size > 0x80000) {
+ assert(!m->laundry);
+ assert(m->object != kernel_object);
+ assert(m->pageq.next == NULL &&
+ m->pageq.prev == NULL);
+ if(m->object->size > 0x200000) {
m->zero_fill = TRUE;
/* depends on the queues lock */
vm_zf_count += 1;
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
- vm_object_lock(object);
goto FastPmapEnter;
}
}
vm_map_unlock_read(map);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
/*
* Make a reference to this object to
vm_object_paging_begin(object);
XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0);
- {
- tws_hash_line_t line;
- task_t task;
- kern_return_t kr;
-
- task = current_task();
- if((map != NULL) &&
- (task->dynamic_working_set != 0)
- && !(object->private)) {
- vm_object_t base_object;
- vm_object_offset_t base_offset;
- base_object = object;
- base_offset = offset;
- while(base_object->shadow) {
- base_offset +=
- base_object->shadow_offset;
- base_object =
- base_object->shadow;
- }
- kr = tws_lookup((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- &line);
- if(kr == KERN_OPERATION_TIMED_OUT){
- write_startup_file = 1;
- } else if (kr != KERN_SUCCESS) {
- tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- kr = tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- if(kr == KERN_NO_SPACE) {
- vm_object_unlock(object);
- tws_expand_working_set(
- task->dynamic_working_set,
- TWS_HASH_LINE_COUNT,
- FALSE);
- vm_object_lock(object);
- }
- if(kr == KERN_OPERATION_TIMED_OUT) {
- write_startup_file = 1;
- }
- }
- }
+
+ if (!object->private) {
+ write_startup_file =
+ vm_fault_tws_insert(map, real_map, vaddr, object, offset);
}
+
kr = vm_fault_page(object, offset, fault_type,
(change_wiring && !wired),
interruptible,
fault_type & ~VM_PROT_WRITE, &version,
&retry_object, &retry_offset, &retry_prot,
&wired, &behavior, &lo_offset, &hi_offset,
- &pmap_map);
- pmap = pmap_map->pmap;
+ &real_map);
+ pmap = real_map->pmap;
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
if ((retry_object != object) ||
(retry_offset != offset)) {
vm_map_unlock_read(map);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
if(m != VM_PAGE_NULL) {
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
if (wired && (fault_type != (prot|VM_PROT_WRITE))) {
vm_map_verify_done(map, &version);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
if(m != VM_PAGE_NULL) {
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
* the pageout queues. If the pageout daemon comes
* across the page, it will remove it from the queues.
*/
+ need_activation = FALSE;
+
if (m != VM_PAGE_NULL) {
if (m->no_isync == TRUE) {
- pmap_sync_caches_phys(m->phys_addr);
-
+ pmap_sync_page_data_phys(m->phys_page);
+
+ if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
+ /*
+ * found it in the cache, but this
+ * is the first fault-in of the page (no_isync == TRUE)
+ * so it must have come in as part of
+ * a cluster... account 1 pagein against it
+ */
+ VM_STAT(pageins++);
+ current_task()->pageins++;
+
+ type_of_fault = DBG_PAGEIN_FAULT;
+ }
+ if (m->clustered) {
+ need_activation = TRUE;
+ }
m->no_isync = FALSE;
}
-
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
if(caller_pmap) {
PMAP_ENTER(pmap, vaddr, m,
prot, cache_attr, wired);
}
- {
- tws_hash_line_t line;
- task_t task;
- kern_return_t kr;
-
- task = current_task();
- if((map != NULL) &&
- (task->dynamic_working_set != 0)
- && (object->private)) {
- vm_object_t base_object;
- vm_object_offset_t base_offset;
- base_object = m->object;
- base_offset = m->offset;
- while(base_object->shadow) {
- base_offset +=
- base_object->shadow_offset;
- base_object =
- base_object->shadow;
- }
- kr = tws_lookup((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object, &line);
- if(kr == KERN_OPERATION_TIMED_OUT){
- write_startup_file = 1;
- } else if (kr != KERN_SUCCESS) {
- tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- kr = tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- if(kr == KERN_NO_SPACE) {
- vm_object_unlock(m->object);
- tws_expand_working_set(
- task->dynamic_working_set,
- TWS_HASH_LINE_COUNT,
- FALSE);
- vm_object_lock(m->object);
- }
- if(kr == KERN_OPERATION_TIMED_OUT) {
- write_startup_file = 1;
- }
- }
- }
+
+ /*
+ * Add working set information for private objects here.
+ */
+ if (m->object->private) {
+ write_startup_file =
+ vm_fault_tws_insert(map, real_map, vaddr,
+ m->object, m->offset);
}
} else {
-#ifndef i386
- int memattr;
- struct phys_entry *pp;
vm_map_entry_t entry;
- vm_offset_t laddr;
- vm_offset_t ldelta, hdelta;
+ vm_map_offset_t laddr;
+ vm_map_offset_t ldelta, hdelta;
/*
* do a pmap block mapping from the physical address
* in the object
*/
- if(pp = pmap_find_physentry(
- (vm_offset_t)object->shadow_offset)) {
- memattr = ((pp->pte1 & 0x00000078) >> 3);
- } else {
- memattr = VM_WIMG_MASK & (int)object->wimg_bits;
- }
+#ifndef i386
+ /* While we do not worry about execution protection in */
+ /* general, certian pages may have instruction execution */
+ /* disallowed. We will check here, and if not allowed */
+ /* to execute, we return with a protection failure. */
- /* While we do not worry about execution protection in */
- /* general, we may be able to read device memory and */
- /* still not be able to execute it. Here we check for */
- /* the guarded bit. If its set and we are attempting */
- /* to execute, we return with a protection failure. */
+ if((fault_type & VM_PROT_EXECUTE) &&
+ (!pmap_eligible_for_execute((ppnum_t)
+ (object->shadow_offset >> 12)))) {
- if((memattr & VM_MEM_GUARDED) &&
- (full_fault_type & VM_PROT_EXECUTE)) {
vm_map_verify_done(map, &version);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
vm_fault_cleanup(object, top_page);
vm_object_deallocate(object);
kr = KERN_PROTECTION_FAILURE;
goto done;
}
+#endif /* !i386 */
-
-
- if(pmap_map != map) {
- vm_map_unlock(pmap_map);
+ if(real_map != map) {
+ vm_map_unlock(real_map);
}
if (original_map != map) {
vm_map_unlock_read(map);
vm_map_lock_read(original_map);
map = original_map;
}
- pmap_map = map;
+ real_map = map;
laddr = vaddr;
hdelta = 0xFFFFF000;
laddr = (laddr - entry->vme_start)
+ entry->offset;
vm_map_lock_read(entry->object.sub_map);
- if(map != pmap_map)
+ if(map != real_map)
vm_map_unlock_read(map);
if(entry->use_pmap) {
- vm_map_unlock_read(pmap_map);
- pmap_map = entry->object.sub_map;
+ vm_map_unlock_read(real_map);
+ real_map = entry->object.sub_map;
}
map = entry->object.sub_map;
}
if(vm_map_lookup_entry(map, laddr, &entry) &&
- (entry->object.vm_object != NULL) &&
- (entry->object.vm_object == object)) {
+ (entry->object.vm_object != NULL) &&
+ (entry->object.vm_object == object)) {
+ vm_map_offset_t phys_offset;
+ phys_offset = (entry->object.vm_object->shadow_offset
+ + entry->offset
+ + laddr
+ - entry->vme_start);
+ phys_offset -= ldelta;
if(caller_pmap) {
- pmap_map_block(caller_pmap,
- caller_pmap_addr - ldelta,
- ((vm_offset_t)
- (entry->object.vm_object->shadow_offset))
- + entry->offset +
- (laddr - entry->vme_start) - ldelta,
- ldelta + hdelta, prot,
- memattr, 0); /* Set up a block mapped area */
- } else {
- pmap_map_block(pmap_map->pmap, vaddr - ldelta,
- ((vm_offset_t)
- (entry->object.vm_object->shadow_offset))
- + entry->offset +
- (laddr - entry->vme_start) - ldelta,
- ldelta + hdelta, prot,
- memattr, 0); /* Set up a block mapped area */
+ /* Set up a block mapped area */
+ pmap_map_block(
+ caller_pmap,
+ (addr64_t)(caller_pmap_addr - ldelta),
+ phys_offset >> 12,
+ (ldelta + hdelta) >> 12,
+ prot,
+ (VM_WIMG_MASK & (int)object->wimg_bits),
+ 0);
+ } else {
+ /* Set up a block mapped area */
+ pmap_map_block(
+ real_map->pmap,
+ (addr64_t)(vaddr - ldelta),
+ phys_offset >> 12,
+ (ldelta + hdelta) >> 12,
+ prot,
+ (VM_WIMG_MASK & (int)object->wimg_bits),
+ 0);
}
}
-#else
-#ifdef notyet
- if(caller_pmap) {
- pmap_enter(caller_pmap, caller_pmap_addr,
- object->shadow_offset, prot, 0, TRUE);
- } else {
- pmap_enter(pmap, vaddr,
- object->shadow_offset, prot, 0, TRUE);
- }
- /* Map it in */
-#endif
-#endif
}
if(m != VM_PAGE_NULL) {
vm_page_lock_queues();
+ if (m->clustered) {
+ vm_pagein_cluster_used++;
+ m->clustered = FALSE;
+ }
+ m->reference = TRUE;
+
if (change_wiring) {
if (wired)
vm_page_wire(m);
}
#if VM_FAULT_STATIC_CONFIG
else {
- if (!m->active && !m->inactive)
+ if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active))
vm_page_activate(m);
- m->reference = TRUE;
}
#else
else if (software_reference_bits) {
*/
vm_map_verify_done(map, &version);
- if(pmap_map != map)
- vm_map_unlock(pmap_map);
+ if(real_map != map)
+ vm_map_unlock(real_map);
if(m != VM_PAGE_NULL) {
PAGE_WAKEUP_DONE(m);
UNLOCK_AND_DEALLOCATE;
done:
if(write_startup_file)
tws_send_startup_info(current_task());
- if (funnel_set) {
- thread_funnel_set( curflock, TRUE);
- funnel_set = FALSE;
- }
+
thread_interrupt_level(interruptible_state);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
vm_map_t map,
vm_map_entry_t entry,
pmap_t pmap,
- vm_offset_t pmap_addr)
+ vm_map_offset_t pmap_addr)
{
- register vm_offset_t va;
- register vm_offset_t end_addr = entry->vme_end;
+ register vm_map_offset_t va;
+ register vm_map_offset_t end_addr = entry->vme_end;
register kern_return_t rc;
assert(entry->in_transition);
vm_map_entry_t entry,
boolean_t deallocate,
pmap_t pmap,
- vm_offset_t pmap_addr)
+ vm_map_offset_t pmap_addr)
{
- register vm_offset_t va;
- register vm_offset_t end_addr = entry->vme_end;
+ register vm_map_offset_t va;
+ register vm_map_offset_t end_addr = entry->vme_end;
vm_object_t object;
object = (entry->is_sub_map)
result_object = result_page->object;
if (deallocate) {
assert(!result_page->fictitious);
- pmap_page_protect(result_page->phys_addr,
- VM_PROT_NONE);
+ pmap_disconnect(result_page->phys_page);
VM_PAGE_FREE(result_page);
} else {
vm_page_lock_queues();
*/
kern_return_t
vm_fault_wire_fast(
- vm_map_t map,
- vm_offset_t va,
+ __unused vm_map_t map,
+ vm_map_offset_t va,
vm_map_entry_t entry,
- pmap_t pmap,
- vm_offset_t pmap_addr)
+ pmap_t pmap,
+ vm_map_offset_t pmap_addr)
{
vm_object_t object;
vm_object_offset_t offset;
register vm_page_t m;
vm_prot_t prot;
- thread_act_t thr_act;
+ thread_t thread = current_thread();
unsigned int cache_attr;
VM_STAT(faults++);
- if((thr_act=current_act()) && (thr_act->task != TASK_NULL))
- thr_act->task->faults++;
+ if (thread != THREAD_NULL && thread->task != TASK_NULL)
+ thread->task->faults++;
/*
* Recovery actions
#undef UNLOCK_THINGS
#define UNLOCK_THINGS { \
- object->paging_in_progress--; \
- vm_object_unlock(object); \
+ vm_object_paging_end(object); \
+ vm_object_unlock(object); \
}
#undef UNLOCK_AND_DEALLOCATE
assert(object->ref_count > 0);
object->ref_count++;
vm_object_res_reference(object);
- object->paging_in_progress++;
+ vm_object_paging_begin(object);
/*
* INVARIANTS (through entire routine):
/*
* Look for page in top-level object. If it's not there or
* there's something going on, give up.
+ * ENCRYPTED SWAP: use the slow fault path, since we'll need to
+ * decrypt the page before wiring it down.
*/
m = vm_page_lookup(object, offset);
- if ((m == VM_PAGE_NULL) || (m->busy) ||
+ if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) ||
(m->unusual && ( m->error || m->restart || m->absent ||
prot & m->page_lock))) {
GIVE_UP;
}
+ ASSERT_PAGE_DECRYPTED(m);
/*
* Wire the page down now. All bail outs beyond this
* may cause other faults.
*/
if (m->no_isync == TRUE) {
- pmap_sync_caches_phys(m->phys_addr);
+ pmap_sync_page_data_phys(m->phys_page);
m->no_isync = FALSE;
}
vm_fault_copy(
vm_object_t src_object,
vm_object_offset_t src_offset,
- vm_size_t *src_size, /* INOUT */
+ vm_map_size_t *copy_size, /* INOUT */
vm_object_t dst_object,
vm_object_offset_t dst_offset,
vm_map_t dst_map,
vm_page_t dst_top_page;
vm_prot_t dst_prot;
- vm_size_t amount_left;
+ vm_map_size_t amount_left;
vm_object_t old_copy_object;
kern_return_t error = 0;
- vm_size_t part_size;
+ vm_map_size_t part_size;
/*
* In order not to confuse the clustered pageins, align
* the different offsets on a page boundary.
*/
- vm_object_offset_t src_lo_offset = trunc_page_64(src_offset);
- vm_object_offset_t dst_lo_offset = trunc_page_64(dst_offset);
- vm_object_offset_t src_hi_offset = round_page_64(src_offset + *src_size);
- vm_object_offset_t dst_hi_offset = round_page_64(dst_offset + *src_size);
+ vm_object_offset_t src_lo_offset = vm_object_trunc_page(src_offset);
+ vm_object_offset_t dst_lo_offset = vm_object_trunc_page(dst_offset);
+ vm_object_offset_t src_hi_offset = vm_object_round_page(src_offset + *copy_size);
+ vm_object_offset_t dst_hi_offset = vm_object_round_page(dst_offset + *copy_size);
#define RETURN(x) \
MACRO_BEGIN \
- *src_size -= amount_left; \
+ *copy_size -= amount_left; \
MACRO_RETURN(x); \
MACRO_END
- amount_left = *src_size;
+ amount_left = *copy_size;
do { /* while (amount_left > 0) */
/*
* There may be a deadlock if both source and destination
XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
switch (vm_fault_page(dst_object,
- trunc_page_64(dst_offset),
+ vm_object_trunc_page(dst_offset),
VM_PROT_WRITE|VM_PROT_READ,
FALSE,
interruptible,
} else {
vm_object_lock(src_object);
src_page = vm_page_lookup(src_object,
- trunc_page_64(src_offset));
+ vm_object_trunc_page(src_offset));
if (src_page == dst_page) {
src_prot = dst_prot;
result_page = VM_PAGE_NULL;
"vm_fault_copy(2) -> vm_fault_page\n",
0,0,0,0,0);
switch (vm_fault_page(src_object,
- trunc_page_64(src_offset),
+ vm_object_trunc_page(src_offset),
VM_PROT_READ,
FALSE,
interruptible,
vm_object_offset_t src_po,
dst_po;
- src_po = src_offset - trunc_page_64(src_offset);
- dst_po = dst_offset - trunc_page_64(dst_offset);
+ src_po = src_offset - vm_object_trunc_page(src_offset);
+ dst_po = dst_offset - vm_object_trunc_page(dst_offset);
if (dst_po > src_po) {
part_size = PAGE_SIZE - dst_po;