#include <mach_cluster_stats.h>
#include <mach_pagemap.h>
-#include <mach_kdb.h>
#include <libkern/OSAtomic.h>
#include <mach/mach_types.h>
#include <kern/zalloc.h>
#include <kern/misc_protos.h>
-#include <ppc/proc_reg.h>
-
+#include <vm/vm_compressor.h>
+#include <vm/vm_compressor_pager.h>
#include <vm/vm_fault.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_external.h>
#include <vm/memory_object.h>
#include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
+#include <vm/vm_shared_region.h>
+
+#include <sys/codesign.h>
-#include <sys/kdebug.h>
+#include <libsa/sys/timers.h> /* for struct timespec */
#define VM_FAULT_CLASSIFY 0
* delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
*/
-boolean_t thread_is_io_throttled(void);
+extern void throttle_lowpri_io(int);
uint64_t vm_hard_throttle_threshold;
-extern unsigned int dp_pages_free, dp_pages_reserve;
-#define NEED_TO_HARD_THROTTLE_THIS_TASK() (((dp_pages_free + dp_pages_reserve < 2000) && \
- (get_task_resident_size(current_task()) > vm_hard_throttle_threshold) && \
- (current_task() != kernel_task) && IP_VALID(memory_manager_default)) || \
- (vm_page_free_count < vm_page_throttle_limit && thread_is_io_throttled() && \
- (get_task_resident_size(current_task()) > vm_hard_throttle_threshold)))
+#define NEED_TO_HARD_THROTTLE_THIS_TASK() ((current_task() != kernel_task && \
+ get_task_resident_size(current_task()) > (((AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE) / 5)) && \
+ (vm_low_on_space() || (vm_page_free_count < vm_page_throttle_limit && \
+ proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED )))
-#define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
-extern int cs_debug;
-
-#if MACH_KDB
-extern struct db_watchpoint *db_watchpoint_list;
-#endif /* MACH_KDB */
+#define HARD_THROTTLE_DELAY 20000 /* 20000 us == 20 ms */
+#define SOFT_THROTTLE_DELAY 2000 /* 2000 us == 2 ms */
boolean_t current_thread_aborted(void);
extern void vm_fault_classify_init(void);
#endif
+unsigned long vm_pmap_enter_blocked = 0;
+unsigned long vm_pmap_enter_retried = 0;
unsigned long vm_cs_validates = 0;
unsigned long vm_cs_revalidates = 0;
unsigned long vm_cs_query_modified = 0;
unsigned long vm_cs_validated_dirtied = 0;
-
-#if CONFIG_ENFORCE_SIGNED_CODE
-int cs_enforcement_disable=0;
-#else
-static const int cs_enforcement_disable=1;
-#endif
+unsigned long vm_cs_bitmap_validated = 0;
/*
* Routine: vm_fault_init
void
vm_fault_init(void)
{
-#if !SECURE_KERNEL
-#if CONFIG_ENFORCE_SIGNED_CODE
- PE_parse_boot_argn("cs_enforcement_disable", &cs_enforcement_disable,
- sizeof (cs_enforcement_disable));
-#endif
- PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug));
-#endif
-
+ int i, vm_compressor_temp;
+ boolean_t need_default_val = TRUE;
/*
* Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
* computed as a percentage of available memory, and the percentage used is scaled inversely with
- * the amount of memory. The pertange runs between 10% and 35%. We use 35% for small memory systems
+ * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
* and reduce the value down to 10% for very large memory configurations. This helps give us a
* definition of a memory hog that makes more sense relative to the amount of ram in the machine.
* The formula here simply uses the number of gigabytes of ram to adjust the percentage.
*/
vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100;
+
+ /*
+ * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
+ */
+
+ if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) {
+ for ( i = 0; i < VM_PAGER_MAX_MODES; i++) {
+ if (vm_compressor_temp > 0 &&
+ ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) {
+ need_default_val = FALSE;
+ vm_compressor_mode = vm_compressor_temp;
+ break;
+ }
+ }
+ if (need_default_val)
+ printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
+ }
+ if (need_default_val) {
+ /* If no boot arg or incorrect boot arg, try device tree. */
+ PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
+ }
+ PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count));
+ printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
}
/*
register vm_page_t top_page)
{
vm_object_paging_end(object);
- vm_object_unlock(object);
+ vm_object_unlock(object);
if (top_page != VM_PAGE_NULL) {
object = top_page->object;
for (n = 0; n < max_pages_in_run; n++) {
m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
- if (m && !m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
+ if (m && !m->laundry && !m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) {
page_run[pages_in_run++] = m;
- pmap_clear_reference(m->phys_page);
+
+ /*
+ * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+ *
+ * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+ * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+ * new reference happens. If no futher references happen on the page after that remote TLB flushes
+ * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+ * by pageout_scan, which is just fine since the last reference would have happened quite far
+ * in the past (TLB caches don't hang around for very long), and of course could just as easily
+ * have happened before we did the deactivate_behind.
+ */
+ pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
}
}
if (pages_in_run) {
}
-static boolean_t
+static int
vm_page_throttled(void)
{
clock_sec_t elapsed_sec;
thread_t thread = current_thread();
if (thread->options & TH_OPT_VMPRIV)
- return (FALSE);
+ return (0);
thread->t_page_creation_count++;
if (NEED_TO_HARD_THROTTLE_THIS_TASK())
- return (TRUE);
+ return (HARD_THROTTLE_DELAY);
- if (vm_page_free_count < vm_page_throttle_limit &&
+ if ((vm_page_free_count < vm_page_throttle_limit || ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
thread->t_page_creation_count > vm_page_creation_throttle) {
-
+
clock_get_system_microtime(&tv_sec, &tv_usec);
elapsed_sec = tv_sec - thread->t_page_creation_time;
}
++vm_page_throttle_count;
- return (TRUE);
+ if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && HARD_THROTTLE_LIMIT_REACHED())
+ return (HARD_THROTTLE_DELAY);
+ else
+ return (SOFT_THROTTLE_DELAY);
}
thread->t_page_creation_time = tv_sec;
thread->t_page_creation_count = 0;
}
- return (FALSE);
+ return (0);
}
static vm_fault_return_t
vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state)
{
+ int throttle_delay;
+
if (object->shadow_severed ||
VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
/*
return (VM_FAULT_RETRY);
}
}
- if (vm_page_throttled()) {
+ if ((throttle_delay = vm_page_throttled())) {
/*
* we're throttling zero-fills...
* treat this as if we couldn't grab a page
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
- if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
- delay(HARD_THROTTLE_DELAY);
+ VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
- if (current_thread_aborted()) {
- thread_interrupt_level(interruptible_state);
- return VM_FAULT_INTERRUPTED;
- }
- }
+ delay(throttle_delay);
+ if (current_thread_aborted()) {
+ thread_interrupt_level(interruptible_state);
+ return VM_FAULT_INTERRUPTED;
+ }
thread_interrupt_level(interruptible_state);
return (VM_FAULT_MEMORY_SHORTAGE);
m->cs_validated = FALSE;
m->cs_tainted = FALSE;
- if (no_zero_fill == TRUE)
- my_fault = DBG_NZF_PAGE_FAULT;
- else {
+ if (no_zero_fill == TRUE) {
+ my_fault = DBG_NZF_PAGE_FAULT;
+ } else {
vm_page_zero_fill(m);
VM_STAT_INCR(zero_fill_count);
assert(m->object != kernel_object);
//assert(m->pageq.next == NULL && m->pageq.prev == NULL);
- if (!IP_VALID(memory_manager_default) &&
+ if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
(m->object->purgable == VM_PURGABLE_DENY ||
m->object->purgable == VM_PURGABLE_NONVOLATILE ||
m->object->purgable == VM_PURGABLE_VOLATILE )) {
+
vm_page_lockspin_queues();
- queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
- m->throttled = TRUE;
- vm_page_throttled_count++;
+ if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
+ assert(!VM_PAGE_WIRED(m));
- vm_page_unlock_queues();
- } else {
- if (current_thread()->t_page_creation_count > vm_page_creation_throttle) {
- m->zero_fill = TRUE;
- VM_ZF_COUNT_INCR();
+ /*
+ * can't be on the pageout queue since we don't
+ * have a pager to try and clean to
+ */
+ assert(!m->pageout_queue);
+
+ VM_PAGE_QUEUES_REMOVE(m);
+
+ queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
+ m->throttled = TRUE;
+ vm_page_throttled_count++;
}
+ vm_page_unlock_queues();
}
return (my_fault);
}
* paging_in_progress reference.
*/
unsigned int vm_fault_page_blocked_access = 0;
+unsigned int vm_fault_page_forced_retry = 0;
vm_fault_return_t
vm_fault_page(
vm_object_offset_t first_offset, /* Offset into object */
vm_prot_t fault_type, /* What access is requested */
boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t caller_lookup, /* caller looked up page */
/* Modifies in place: */
vm_prot_t *protection, /* Protection for mapping */
- /* Returns: */
vm_page_t *result_page, /* Page found, if successful */
+ /* Returns: */
vm_page_t *top_page, /* Page in top object, if
* not result_page. */
int *type_of_fault, /* if non-null, fill in with type of fault
/* More arguments: */
kern_return_t *error_code, /* code if page is in error */
boolean_t no_zero_fill, /* don't zero fill absent pages */
-#if MACH_PAGEMAP
boolean_t data_supply, /* treat as data_supply if
* it is a write fault and a full
* page is provided */
-#else
- __unused boolean_t data_supply,
-#endif
vm_object_fault_info_t fault_info)
{
vm_page_t m;
vm_object_t next_object;
vm_object_t copy_object;
boolean_t look_for_page;
+ boolean_t force_fault_retry = FALSE;
vm_prot_t access_required = fault_type;
vm_prot_t wants_copy_flag;
CLUSTER_STAT(int pages_at_higher_offsets;)
CLUSTER_STAT(int pages_at_lower_offsets;)
kern_return_t wait_result;
boolean_t interruptible_state;
+ boolean_t data_already_requested = FALSE;
+ vm_behavior_t orig_behavior;
+ vm_size_t orig_cluster_size;
vm_fault_return_t error;
int my_fault;
uint32_t try_failed_count;
int interruptible; /* how may fault be interrupted? */
+ int external_state = VM_EXTERNAL_STATE_UNKNOWN;
memory_object_t pager;
vm_fault_return_t retval;
* into a copy object in order to avoid a redundant page out operation.
*/
#if MACH_PAGEMAP
-#define MUST_ASK_PAGER(o, f) (vm_external_state_get((o)->existence_map, (f)) \
- != VM_EXTERNAL_STATE_ABSENT)
-#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
- == VM_EXTERNAL_STATE_EXISTS)
-#else
-#define MUST_ASK_PAGER(o, f) (TRUE)
-#define PAGED_OUT(o, f) (FALSE)
-#endif
+#define MUST_ASK_PAGER(o, f, s) \
+ ((vm_external_state_get((o)->existence_map, (f)) \
+ != VM_EXTERNAL_STATE_ABSENT) && \
+ (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)))) \
+ != VM_EXTERNAL_STATE_ABSENT)
+#define PAGED_OUT(o, f) \
+ ((vm_external_state_get((o)->existence_map, (f)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) \
+ == VM_EXTERNAL_STATE_EXISTS))
+#else /* MACH_PAGEMAP */
+#define MUST_ASK_PAGER(o, f, s) \
+ ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
+#define PAGED_OUT(o, f) \
+ (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
+#endif /* MACH_PAGEMAP */
/*
* Recovery actions
PAGE_WAKEUP_DONE(m); \
if (!m->active && !m->inactive && !m->throttled) { \
vm_page_lockspin_queues(); \
- if (!m->active && !m->inactive && !m->throttled) \
- vm_page_activate(m); \
+ if (!m->active && !m->inactive && !m->throttled) { \
+ if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) \
+ vm_page_deactivate(m); \
+ else \
+ vm_page_activate(m); \
+ } \
vm_page_unlock_queues(); \
} \
MACRO_END
dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
#endif
-
-#if MACH_KDB
- /*
- * If there are watchpoints set, then
- * we don't want to give away write permission
- * on a read fault. Make the task write fault,
- * so that the watchpoint code notices the access.
- */
- if (db_watchpoint_list) {
- /*
- * If we aren't asking for write permission,
- * then don't give it away. We're using write
- * faults to set the dirty bit.
- */
- if (!(fault_type & VM_PROT_WRITE))
- *protection &= ~VM_PROT_WRITE;
- }
-#endif /* MACH_KDB */
-
interruptible = fault_info->interruptible;
interruptible_state = thread_interrupt_level(interruptible);
* must be a "large page" object. We do not deal
* with VM pages for this object.
*/
+ caller_lookup = FALSE;
m = VM_PAGE_NULL;
goto phys_contig_object;
}
* a "activity_in_progress" reference and wait for
* access to be unblocked.
*/
+ caller_lookup = FALSE; /* no longer valid after sleep */
vm_object_activity_begin(object);
vm_object_paging_end(object);
while (object->blocked_access) {
/*
* See whether the page at 'offset' is resident
*/
- m = vm_page_lookup(object, offset);
+ if (caller_lookup == TRUE) {
+ /*
+ * The caller has already looked up the page
+ * and gave us the result in "result_page".
+ * We can use this for the first lookup but
+ * it loses its validity as soon as we unlock
+ * the object.
+ */
+ m = *result_page;
+ caller_lookup = FALSE; /* no longer valid after that */
+ } else {
+ m = vm_page_lookup(object, offset);
+ }
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
#endif
/*
* The page is being brought in,
* wait for it and then retry.
- *
- * A possible optimization: if the page
- * is known to be resident, we can ignore
- * pages that are absent (regardless of
- * whether they're busy).
*/
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
#endif
wait_result = PAGE_SLEEP(object, m, interruptible);
+
XPR(XPR_VM_FAULT,
"vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
- object, offset,
- m, 0, 0);
+ object, offset,
+ m, 0, 0);
counter(c_vm_fault_page_block_busy_kernel++);
if (wait_result != THREAD_AWAKENED) {
thread_interrupt_level(interruptible_state);
if (wait_result == THREAD_RESTART)
- return (VM_FAULT_RETRY);
+ return (VM_FAULT_RETRY);
else
return (VM_FAULT_INTERRUPTED);
}
continue;
}
+ if (m->laundry) {
+ m->pageout = FALSE;
+ if (!m->cleaning)
+ vm_pageout_steal_laundry(m, FALSE);
+ }
if (m->phys_page == vm_page_guard_addr) {
/*
* Guard page: off limits !
*top_page = first_m;
if (type_of_fault)
*type_of_fault = DBG_GUARD_FAULT;
+ thread_interrupt_level(interruptible_state);
return VM_FAULT_SUCCESS;
} else {
/*
*/
my_fault = vm_fault_zero_page(m, no_zero_fill);
+ if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+ m->absent = TRUE;
+
break;
} else {
if (must_be_resident)
m->busy = TRUE;
vm_page_lockspin_queues();
+
+ assert(!m->pageout_queue);
VM_PAGE_QUEUES_REMOVE(m);
+
vm_page_unlock_queues();
}
XPR(XPR_VM_FAULT,
"vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
object, offset,
next_object,
- offset+object->shadow_offset,0);
+ offset+object->vo_shadow_offset,0);
- offset += object->shadow_offset;
- fault_info->lo_offset += object->shadow_offset;
- fault_info->hi_offset += object->shadow_offset;
+ offset += object->vo_shadow_offset;
+ fault_info->lo_offset += object->vo_shadow_offset;
+ fault_info->hi_offset += object->vo_shadow_offset;
access_required = VM_PROT_READ;
vm_object_lock(next_object);
* the page in the speculative queue.
*/
vm_page_lockspin_queues();
- VM_PAGE_QUEUES_REMOVE(m);
+ if (m->speculative)
+ VM_PAGE_QUEUES_REMOVE(m);
vm_page_unlock_queues();
}
* this object can provide the data or we're the top object...
* object is locked; m == NULL
*/
- look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset) == TRUE) && !data_supply);
+ if (must_be_resident) {
+ if (fault_type == VM_PROT_NONE &&
+ object == kernel_object) {
+ /*
+ * We've been called from vm_fault_unwire()
+ * while removing a map entry that was allocated
+ * with KMA_KOBJECT and KMA_VAONLY. This page
+ * is not present and there's nothing more to
+ * do here (nothing to unwire).
+ */
+ vm_fault_cleanup(object, first_m);
+ thread_interrupt_level(interruptible_state);
+
+ return VM_FAULT_MEMORY_ERROR;
+ }
+
+ goto dont_look_for_page;
+ }
+
+#if !MACH_PAGEMAP
+ data_supply = FALSE;
+#endif /* !MACH_PAGEMAP */
+
+ look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
#endif
- if ((look_for_page || (object == first_object)) && !must_be_resident && !object->phys_contiguous) {
+ if (!look_for_page && object == first_object && !object->phys_contiguous) {
/*
- * Allocate a new page for this object/offset pair
+ * Allocate a new page for this object/offset pair as a placeholder
*/
m = vm_page_grab();
#if TRACEFAULTPAGE
return (VM_FAULT_MEMORY_SHORTAGE);
}
- vm_page_insert(m, object, offset);
+
+ if (fault_info && fault_info->batch_pmap_op == TRUE) {
+ vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+ } else {
+ vm_page_insert(m, object, offset);
+ }
}
- if (look_for_page && !must_be_resident) {
+ if (look_for_page) {
kern_return_t rc;
+ int my_fault_type;
/*
* If the memory manager is not ready, we
vm_object_lock(object);
assert(object->ref_count > 0);
- if (object->paging_in_progress > vm_object_pagein_throttle) {
- vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS, interruptible);
+ if (object->paging_in_progress >= vm_object_pagein_throttle) {
+ vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
vm_object_unlock(object);
wait_result = thread_block(THREAD_CONTINUE_NULL);
return (VM_FAULT_RETRY);
}
}
- if (m != VM_PAGE_NULL) {
- /*
- * Indicate that the page is waiting for data
- * from the memory manager.
- */
- m->list_req_pending = TRUE;
+ if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && object->internal) {
+
+ if (m == VM_PAGE_NULL) {
+ /*
+ * Allocate a new page for this object/offset pair as a placeholder
+ */
+ m = vm_page_grab();
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
+#endif
+ if (m == VM_PAGE_NULL) {
+
+ vm_fault_cleanup(object, first_m);
+ thread_interrupt_level(interruptible_state);
+
+ return (VM_FAULT_MEMORY_SHORTAGE);
+ }
+
+ m->absent = TRUE;
+ if (fault_info && fault_info->batch_pmap_op == TRUE) {
+ vm_page_insert_internal(m, object, offset, FALSE, TRUE, TRUE);
+ } else {
+ vm_page_insert(m, object, offset);
+ }
+ }
+ assert(m->busy);
+
m->absent = TRUE;
+ pager = object->pager;
+
+ vm_object_unlock(object);
+
+ rc = vm_compressor_pager_get(pager, offset + object->paging_offset, m->phys_page, &my_fault_type, 0);
+
+ vm_object_lock(object);
+
+ switch (rc) {
+ case KERN_SUCCESS:
+ m->absent = FALSE;
+ m->dirty = TRUE;
+ if ((m->object->wimg_bits &
+ VM_WIMG_MASK) !=
+ VM_WIMG_USE_DEFAULT) {
+ /*
+ * If the page is not cacheable,
+ * we can't let its contents
+ * linger in the data cache
+ * after the decompression.
+ */
+ pmap_sync_page_attributes_phys(
+ m->phys_page);
+ } else
+ m->written_by_kernel = TRUE;
+ break;
+ case KERN_MEMORY_FAILURE:
+ m->unusual = TRUE;
+ m->error = TRUE;
+ m->absent = FALSE;
+ break;
+ case KERN_MEMORY_ERROR:
+ assert(m->absent);
+ break;
+ default:
+ panic("?");
+ }
+ PAGE_WAKEUP_DONE(m);
+
+ rc = KERN_SUCCESS;
+ goto data_requested;
+ }
+ my_fault_type = DBG_PAGEIN_FAULT;
+
+ if (m != VM_PAGE_NULL) {
+ VM_PAGE_FREE(m);
+ m = VM_PAGE_NULL;
}
#if TRACEFAULTPAGE
object, offset, m,
access_required | wants_copy_flag, 0);
+ if (object->copy == first_object) {
+ /*
+ * if we issue the memory_object_data_request in
+ * this state, we are subject to a deadlock with
+ * the underlying filesystem if it is trying to
+ * shrink the file resulting in a push of pages
+ * into the copy object... that push will stall
+ * on the placeholder page, and if the pushing thread
+ * is holding a lock that is required on the pagein
+ * path (such as a truncate lock), we'll deadlock...
+ * to avoid this potential deadlock, we throw away
+ * our placeholder page before calling memory_object_data_request
+ * and force this thread to retry the vm_fault_page after
+ * we have issued the I/O. the second time through this path
+ * we will find the page already in the cache (presumably still
+ * busy waiting for the I/O to complete) and then complete
+ * the fault w/o having to go through memory_object_data_request again
+ */
+ assert(first_m != VM_PAGE_NULL);
+ assert(first_m->object == first_object);
+
+ vm_object_lock(first_object);
+ VM_PAGE_FREE(first_m);
+ vm_object_paging_end(first_object);
+ vm_object_unlock(first_object);
+
+ first_m = VM_PAGE_NULL;
+ force_fault_retry = TRUE;
+
+ vm_fault_page_forced_retry++;
+ }
+
+ if (data_already_requested == TRUE) {
+ orig_behavior = fault_info->behavior;
+ orig_cluster_size = fault_info->cluster_size;
+
+ fault_info->behavior = VM_BEHAVIOR_RANDOM;
+ fault_info->cluster_size = PAGE_SIZE;
+ }
/*
* Call the memory manager to retrieve the data.
*/
access_required | wants_copy_flag,
(memory_object_fault_info_t)fault_info);
+ if (data_already_requested == TRUE) {
+ fault_info->behavior = orig_behavior;
+ fault_info->cluster_size = orig_cluster_size;
+ } else
+ data_already_requested = TRUE;
+
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
#endif
vm_object_lock(object);
+ data_requested:
if (rc != KERN_SUCCESS) {
vm_fault_cleanup(object, first_m);
} else {
clock_sec_t tv_sec;
clock_usec_t tv_usec;
-
- clock_get_system_microtime(&tv_sec, &tv_usec);
- current_thread()->t_page_creation_time = tv_sec;
- current_thread()->t_page_creation_count = 0;
+
+ if (my_fault_type == DBG_PAGEIN_FAULT) {
+ clock_get_system_microtime(&tv_sec, &tv_usec);
+ current_thread()->t_page_creation_time = tv_sec;
+ current_thread()->t_page_creation_count = 0;
+ }
}
- if ((interruptible != THREAD_UNINT) && (current_thread()->sched_mode & TH_MODE_ABORT)) {
+ if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
vm_fault_cleanup(object, first_m);
thread_interrupt_level(interruptible_state);
return (VM_FAULT_INTERRUPTED);
}
+ if (force_fault_retry == TRUE) {
+
+ vm_fault_cleanup(object, first_m);
+ thread_interrupt_level(interruptible_state);
+
+ return (VM_FAULT_RETRY);
+ }
if (m == VM_PAGE_NULL && object->phys_contiguous) {
/*
* No page here means that the object we
* if we make it through the state checks
* above, than we'll count it as such
*/
- my_fault = DBG_PAGEIN_FAULT;
+ my_fault = my_fault_type;
/*
* Retry with same object/offset, since new data may
*/
continue;
}
-
+dont_look_for_page:
/*
* We get here if the object has no pager, or an existence map
* exists and indicates the page isn't present on the pager
}
my_fault = vm_fault_zero_page(m, no_zero_fill);
+ if (fault_info->mark_zf_absent && no_zero_fill == TRUE)
+ m->absent = TRUE;
break;
} else {
if ((object != first_object) || must_be_resident)
vm_object_paging_end(object);
- offset += object->shadow_offset;
- fault_info->lo_offset += object->shadow_offset;
- fault_info->hi_offset += object->shadow_offset;
+ offset += object->vo_shadow_offset;
+ fault_info->lo_offset += object->vo_shadow_offset;
+ fault_info->hi_offset += object->vo_shadow_offset;
access_required = VM_PROT_READ;
vm_object_lock(next_object);
/*
* We no longer need the old page or object.
*/
- PAGE_WAKEUP_DONE(m);
+ RELEASE_PAGE(m);
+
vm_object_paging_end(object);
vm_object_unlock(object);
*/
assert(copy_m->busy);
vm_page_insert(copy_m, object, offset);
- copy_m->dirty = TRUE;
+ SET_PAGE_DIRTY(copy_m, TRUE);
m = copy_m;
/*
/*
* Does the page exist in the copy?
*/
- copy_offset = first_offset - copy_object->shadow_offset;
+ copy_offset = first_offset - copy_object->vo_shadow_offset;
- if (copy_object->size <= copy_offset)
+ if (copy_object->vo_size <= copy_offset)
/*
* Copy object doesn't cover this page -- do nothing.
*/
#if MACH_PAGEMAP
|| vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT
#endif
+ || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
) {
vm_page_lockspin_queues();
vm_page_activate(copy_m);
vm_page_unlock_queues();
- copy_m->dirty = TRUE;
+ SET_PAGE_DIRTY(copy_m, TRUE);
PAGE_WAKEUP_DONE(copy_m);
- }
- else {
+
+ } else if (copy_object->internal &&
+ (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE)) {
+ /*
+ * For internal objects check with the pager to see
+ * if the page already exists in the backing store.
+ * If yes, then we can drop the copy page. If not,
+ * then we'll activate it, mark it dirty and keep it
+ * around.
+ */
+
+ kern_return_t kr = KERN_SUCCESS;
+
+ memory_object_t copy_pager = copy_object->pager;
+ assert(copy_pager != MEMORY_OBJECT_NULL);
+ vm_object_paging_begin(copy_object);
+
+ vm_object_unlock(copy_object);
+
+ kr = memory_object_data_request(
+ copy_pager,
+ copy_offset + copy_object->paging_offset,
+ 0, /* Only query the pager. */
+ VM_PROT_READ,
+ NULL);
+
+ vm_object_lock(copy_object);
+
+ vm_object_paging_end(copy_object);
+
+ /*
+ * Since we dropped the copy_object's lock,
+ * check whether we'll have to deallocate
+ * the hard way.
+ */
+ if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
+ vm_object_unlock(copy_object);
+ vm_object_deallocate(copy_object);
+ vm_object_lock(object);
+
+ continue;
+ }
+ if (kr == KERN_SUCCESS) {
+ /*
+ * The pager has the page. We don't want to overwrite
+ * that page by sending this one out to the backing store.
+ * So we drop the copy page.
+ */
+ VM_PAGE_FREE(copy_m);
+
+ } else {
+ /*
+ * The pager doesn't have the page. We'll keep this one
+ * around in the copy object. It might get sent out to
+ * the backing store under memory pressure.
+ */
+ vm_page_lockspin_queues();
+ assert(!m->cleaning);
+ vm_page_activate(copy_m);
+ vm_page_unlock_queues();
+
+ SET_PAGE_DIRTY(copy_m, TRUE);
+ PAGE_WAKEUP_DONE(copy_m);
+ }
+ } else {
+
assert(copy_m->busy == TRUE);
assert(!m->cleaning);
/*
* dirty is protected by the object lock
*/
- copy_m->dirty = TRUE;
+ SET_PAGE_DIRTY(copy_m, TRUE);
/*
* The page is already ready for pageout:
*/
vm_object_lock(object);
}
+
/*
* Because we're pushing a page upward
* in the object tree, we must restart
retval = VM_FAULT_SUCCESS;
if (my_fault == DBG_PAGEIN_FAULT) {
- VM_STAT_INCR(pageins);
+ if (!m->object->internal || (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE))
+ VM_STAT_INCR(pageins);
DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);
DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
current_task()->pageins++;
vm_fault_is_sequential(object, offset, fault_info->behavior);
vm_fault_deactivate_behind(object, offset, fault_info->behavior);
+ } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
+
+ VM_STAT_INCR(decompressions);
}
if (type_of_fault)
*type_of_fault = my_fault;
* careful not to modify the VM object in any way that is not
* legal under a shared lock...
*/
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
unsigned long cs_enter_tainted_rejected = 0;
unsigned long cs_enter_tainted_accepted = 0;
kern_return_t
pmap_t pmap,
vm_map_offset_t vaddr,
vm_prot_t prot,
+ vm_prot_t fault_type,
boolean_t wired,
boolean_t change_wiring,
boolean_t no_cache,
+ boolean_t cs_bypass,
+ boolean_t *need_retry,
int *type_of_fault)
{
- unsigned int cache_attr;
- kern_return_t kr;
+ kern_return_t kr, pe_result;
boolean_t previously_pmapped = m->pmapped;
boolean_t must_disconnect = 0;
boolean_t map_is_switched, map_is_switch_protected;
+ int cs_enforcement_enabled;
vm_object_lock_assert_held(m->object);
#if DEBUG
return KERN_SUCCESS;
}
- cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+ if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
- if (m->pmapped == FALSE) {
+ vm_object_lock_assert_exclusive(m->object);
+
+ } else if ((fault_type & VM_PROT_WRITE) == 0) {
/*
- * This is the first time this page is being
- * mapped in an address space (pmapped == FALSE).
- *
- * Part of that page may still be in the data cache
- * and not flushed to memory. In case we end up
- * accessing that page via the instruction cache,
- * we need to ensure that the 2 caches are in sync.
+ * This is not a "write" fault, so we
+ * might not have taken the object lock
+ * exclusively and we might not be able
+ * to update the "wpmapped" bit in
+ * vm_fault_enter().
+ * Let's just grant read access to
+ * the page for now and we'll
+ * soft-fault again if we need write
+ * access later...
*/
- pmap_sync_page_data_phys(m->phys_page);
+ prot &= ~VM_PROT_WRITE;
+ }
+ if (m->pmapped == FALSE) {
if ((*type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
/*
}
VM_PAGE_CONSUME_CLUSTERED(m);
- } else if (cache_attr != VM_WIMG_DEFAULT)
- pmap_sync_page_attributes_phys(m->phys_page);
+ }
if (*type_of_fault != DBG_COW_FAULT) {
DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
* from the current map. We do that below right before we do the
* PMAP_ENTER.
*/
- if(!cs_enforcement_disable && map_is_switched &&
+ cs_enforcement_enabled = cs_enforcement(NULL);
+
+ if(cs_enforcement_enabled && map_is_switched &&
map_is_switch_protected && page_immutable(m, prot) &&
(prot & VM_PROT_WRITE))
{
* code can be created
*/
if (m->cs_tainted ||
- ( !cs_enforcement_disable &&
+ ((cs_enforcement_enabled && !cs_bypass ) &&
(/* The page is unsigned and wants to be executable */
(!m->cs_validated && (prot & VM_PROT_EXECUTE)) ||
/* The page should be immutable, but is in danger of being modified
assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
reject_page = FALSE;
} else {
+ if (cs_debug > 5)
+ printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n",
+ m->object->code_signed ? "yes" : "no",
+ m->cs_validated ? "yes" : "no",
+ m->cs_tainted ? "yes" : "no",
+ m->wpmapped ? "yes" : "no",
+ m->slid ? "yes" : "no",
+ (int)prot);
reject_page = cs_invalid_page((addr64_t) vaddr);
}
if (reject_page) {
/* reject the tainted page: abort the page fault */
+ int pid;
+ const char *procname;
+ task_t task;
+ vm_object_t file_object, shadow;
+ vm_object_offset_t file_offset;
+ char *pathname, *filename;
+ vm_size_t pathname_len, filename_len;
+ boolean_t truncated_path;
+#define __PATH_MAX 1024
+ struct timespec mtime, cs_mtime;
+
kr = KERN_CODESIGN_ERROR;
cs_enter_tainted_rejected++;
+
+ /* get process name and pid */
+ procname = "?";
+ task = current_task();
+ pid = proc_selfpid();
+ if (task->bsd_info != NULL)
+ procname = proc_name_address(task->bsd_info);
+
+ /* get file's VM object */
+ file_object = m->object;
+ file_offset = m->offset;
+ for (shadow = file_object->shadow;
+ shadow != VM_OBJECT_NULL;
+ shadow = file_object->shadow) {
+ vm_object_lock_shared(shadow);
+ if (file_object != m->object) {
+ vm_object_unlock(file_object);
+ }
+ file_offset += file_object->vo_shadow_offset;
+ file_object = shadow;
+ }
+
+ mtime.tv_sec = 0;
+ mtime.tv_nsec = 0;
+ cs_mtime.tv_sec = 0;
+ cs_mtime.tv_nsec = 0;
+
+ /* get file's pathname and/or filename */
+ pathname = NULL;
+ filename = NULL;
+ pathname_len = 0;
+ filename_len = 0;
+ truncated_path = FALSE;
+ if (file_object->pager == NULL) {
+ /* no pager -> no file -> no pathname */
+ pathname = (char *) "<nil>";
+ } else {
+ pathname = (char *)kalloc(__PATH_MAX * 2);
+ if (pathname) {
+ pathname_len = __PATH_MAX;
+ filename = pathname + pathname_len;
+ filename_len = __PATH_MAX;
+ }
+ vnode_pager_get_object_name(file_object->pager,
+ pathname,
+ pathname_len,
+ filename,
+ filename_len,
+ &truncated_path);
+ vnode_pager_get_object_mtime(file_object->pager,
+ &mtime,
+ &cs_mtime);
+ }
+ printf("CODE SIGNING: process %d[%s]: "
+ "rejecting invalid page at address 0x%llx "
+ "from offset 0x%llx in file \"%s%s%s\" "
+ "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+ "(signed:%d validated:%d tainted:%d "
+ "wpmapped:%d slid:%d)\n",
+ pid, procname, (addr64_t) vaddr,
+ file_offset,
+ pathname,
+ (truncated_path ? "/.../" : ""),
+ (truncated_path ? filename : ""),
+ cs_mtime.tv_sec, cs_mtime.tv_nsec,
+ ((cs_mtime.tv_sec == mtime.tv_sec &&
+ cs_mtime.tv_nsec == mtime.tv_nsec)
+ ? "=="
+ : "!="),
+ mtime.tv_sec, mtime.tv_nsec,
+ m->object->code_signed,
+ m->cs_validated,
+ m->cs_tainted,
+ m->wpmapped,
+ m->slid);
+ if (file_object != m->object) {
+ vm_object_unlock(file_object);
+ }
+ if (pathname_len != 0) {
+ kfree(pathname, __PATH_MAX * 2);
+ pathname = NULL;
+ filename = NULL;
+ }
} else {
/* proceed with the tainted page */
kr = KERN_SUCCESS;
m->cs_tainted = TRUE;
cs_enter_tainted_accepted++;
}
- if (cs_debug || kr != KERN_SUCCESS) {
- printf("CODESIGNING: vm_fault_enter(0x%llx): "
- "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
- (long long)vaddr, m, m->object, m->offset);
+ if (kr != KERN_SUCCESS) {
+ if (cs_debug) {
+ printf("CODESIGNING: vm_fault_enter(0x%llx): "
+ "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
+ (long long)vaddr, m, m->object, m->offset);
+ }
+#if !SECURE_KERNEL
+ if (cs_enforcement_panic) {
+ panic("CODESIGNING: panicking on invalid page\n");
+ }
+#endif
}
} else {
kr = KERN_SUCCESS;
}
- /* If we have a KERN_SUCCESS from the previous checks, we either have
- * a good page, or a tainted page that has been accepted by the process.
- * In both cases the page will be entered into the pmap.
- * If the page is writeable, we need to disconnect it from other pmaps
- * now so those processes can take note.
- */
- if (kr == KERN_SUCCESS) {
- /*
- * NOTE: we may only hold the vm_object lock SHARED
- * at this point, but the update of pmapped is ok
- * since this is the ONLY bit updated behind the SHARED
- * lock... however, we need to figure out how to do an atomic
- * update on a bit field to make this less fragile... right
- * now I don't know how to coerce 'C' to give me the offset info
- * that's needed for an AtomicCompareAndSwap
- */
- m->pmapped = TRUE;
- if (prot & VM_PROT_WRITE) {
- vm_object_lock_assert_exclusive(m->object);
- m->wpmapped = TRUE;
- if(must_disconnect) {
- /* We can only get here
- * because of the CSE logic */
- assert(cs_enforcement_disable == FALSE);
- pmap_disconnect(m->phys_page);
- /* If we are faulting for a write, we can clear
- * the execute bit - that will ensure the page is
- * checked again before being executable, which
- * protects against a map switch.
- * This only happens the first time the page
- * gets tainted, so we won't get stuck here
- * to make an already writeable page executable. */
- prot &= ~VM_PROT_EXECUTE;
- }
- }
- PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired);
- }
+ boolean_t page_queues_locked = FALSE;
+#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (! page_queues_locked) { \
+ page_queues_locked = TRUE; \
+ vm_page_lockspin_queues(); \
+ } \
+MACRO_END
+#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (page_queues_locked) { \
+ page_queues_locked = FALSE; \
+ vm_page_unlock_queues(); \
+ } \
+MACRO_END
/*
* Hold queues lock to manipulate
* the page queues. Change wiring
* case is obvious.
*/
- if (change_wiring) {
- vm_page_lockspin_queues();
+ assert(m->compressor || m->object != compressor_object);
+ if (m->compressor) {
+ /*
+ * Compressor pages are neither wired
+ * nor pageable and should never change.
+ */
+ assert(m->object == compressor_object);
+ } else if (change_wiring) {
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
if (wired) {
if (kr == KERN_SUCCESS) {
vm_page_wire(m);
}
} else {
- vm_page_unwire(m);
+ vm_page_unwire(m, TRUE);
}
- vm_page_unlock_queues();
+ /* we keep the page queues lock, if we need it later */
} else {
if (kr != KERN_SUCCESS) {
- vm_page_lockspin_queues();
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
vm_page_deactivate(m);
- vm_page_unlock_queues();
- } else {
- if (((!m->active && !m->inactive) || no_cache) && !VM_PAGE_WIRED(m) && !m->throttled) {
+ /* we keep the page queues lock, if we need it later */
+ } else if (((!m->active && !m->inactive) ||
+ m->clean_queue ||
+ no_cache) &&
+ !VM_PAGE_WIRED(m) && !m->throttled) {
+
+ if (vm_page_local_q &&
+ !no_cache &&
+ (*type_of_fault == DBG_COW_FAULT ||
+ *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
+ struct vpl *lq;
+ uint32_t lid;
+
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+ vm_object_lock_assert_exclusive(m->object);
- if ( vm_page_local_q && !no_cache && (*type_of_fault == DBG_COW_FAULT || *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
- struct vpl *lq;
- uint32_t lid;
+ /*
+ * we got a local queue to stuff this
+ * new page on...
+ * its safe to manipulate local and
+ * local_id at this point since we're
+ * behind an exclusive object lock and
+ * the page is not on any global queue.
+ *
+ * we'll use the current cpu number to
+ * select the queue note that we don't
+ * need to disable preemption... we're
+ * going to behind the local queue's
+ * lock to do the real work
+ */
+ lid = cpu_number();
+
+ lq = &vm_page_local_q[lid].vpl_un.vpl;
+
+ VPL_LOCK(&lq->vpl_lock);
+
+ queue_enter(&lq->vpl_queue, m,
+ vm_page_t, pageq);
+ m->local = TRUE;
+ m->local_id = lid;
+ lq->vpl_count++;
+
+ if (m->object->internal)
+ lq->vpl_internal_count++;
+ else
+ lq->vpl_external_count++;
+
+ VPL_UNLOCK(&lq->vpl_lock);
+ if (lq->vpl_count > vm_page_local_q_soft_limit)
+ {
/*
- * we got a local queue to stuff this new page on...
- * its safe to manipulate local and local_id at this point
- * since we're behind an exclusive object lock and the
- * page is not on any global queue.
- *
- * we'll use the current cpu number to select the queue
- * note that we don't need to disable preemption... we're
- * going to behind the local queue's lock to do the real
- * work
+ * we're beyond the soft limit
+ * for the local queue
+ * vm_page_reactivate_local will
+ * 'try' to take the global page
+ * queue lock... if it can't
+ * that's ok... we'll let the
+ * queue continue to grow up
+ * to the hard limit... at that
+ * point we'll wait for the
+ * lock... once we've got the
+ * lock, we'll transfer all of
+ * the pages from the local
+ * queue to the global active
+ * queue
*/
- lid = cpu_number();
+ vm_page_reactivate_local(lid, FALSE, FALSE);
+ }
+ } else {
- lq = &vm_page_local_q[lid].vpl_un.vpl;
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
- VPL_LOCK(&lq->vpl_lock);
+ /*
+ * test again now that we hold the
+ * page queue lock
+ */
+ if (!VM_PAGE_WIRED(m)) {
+ if (m->clean_queue) {
+ VM_PAGE_QUEUES_REMOVE(m);
- queue_enter(&lq->vpl_queue, m, vm_page_t, pageq);
- m->local = TRUE;
- m->local_id = lid;
- lq->vpl_count++;
-
- VPL_UNLOCK(&lq->vpl_lock);
+ vm_pageout_cleaned_reactivated++;
+ vm_pageout_cleaned_fault_reactivated++;
+ }
- if (lq->vpl_count > vm_page_local_q_soft_limit) {
+ if ((!m->active &&
+ !m->inactive) ||
+ no_cache) {
/*
- * we're beyond the soft limit for the local queue
- * vm_page_reactivate_local will 'try' to take
- * the global page queue lock... if it can't that's
- * ok... we'll let the queue continue to grow up
- * to the hard limit... at that point we'll wait
- * for the lock... once we've got the lock, we'll
- * transfer all of the pages from the local queue
- * to the global active queue
+ * If this is a no_cache mapping
+ * and the page has never been
+ * mapped before or was
+ * previously a no_cache page,
+ * then we want to leave pages
+ * in the speculative state so
+ * that they can be readily
+ * recycled if free memory runs
+ * low. Otherwise the page is
+ * activated as normal.
*/
- vm_page_reactivate_local(lid, FALSE, FALSE);
+
+ if (no_cache &&
+ (!previously_pmapped ||
+ m->no_cache)) {
+ m->no_cache = TRUE;
+
+ if (!m->speculative)
+ vm_page_speculate(m, FALSE);
+
+ } else if (!m->active &&
+ !m->inactive) {
+
+ vm_page_activate(m);
+ }
}
- return kr;
}
+ /* we keep the page queues lock, if we need it later */
+ }
+ }
+ }
- vm_page_lockspin_queues();
+ if ((prot & VM_PROT_EXECUTE) &&
+ ! m->xpmapped) {
+
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ /*
+ * xpmapped is protected by the page queues lock
+ * so it matters not that we might only hold the
+ * object lock in the shared state
+ */
+
+ if (! m->xpmapped) {
+
+ m->xpmapped = TRUE;
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+ if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) &&
+ m->object->internal &&
+ m->object->pager != NULL) {
/*
- * test again now that we hold the page queue lock
+ * This page could have been
+ * uncompressed by the
+ * compressor pager and its
+ * contents might be only in
+ * the data cache.
+ * Since it's being mapped for
+ * "execute" for the fist time,
+ * make sure the icache is in
+ * sync.
*/
- if (((!m->active && !m->inactive) || no_cache) && !VM_PAGE_WIRED(m)) {
+ pmap_sync_page_data_phys(m->phys_page);
+ }
- /*
- * If this is a no_cache mapping and the page has never been
- * mapped before or was previously a no_cache page, then we
- * want to leave pages in the speculative state so that they
- * can be readily recycled if free memory runs low. Otherwise
- * the page is activated as normal.
- */
+ }
+ }
+ /* we're done with the page queues lock, if we ever took it */
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+
+ /* If we have a KERN_SUCCESS from the previous checks, we either have
+ * a good page, or a tainted page that has been accepted by the process.
+ * In both cases the page will be entered into the pmap.
+ * If the page is writeable, we need to disconnect it from other pmaps
+ * now so those processes can take note.
+ */
+ if (kr == KERN_SUCCESS) {
+ /*
+ * NOTE: we may only hold the vm_object lock SHARED
+ * at this point, but the update of pmapped is ok
+ * since this is the ONLY bit updated behind the SHARED
+ * lock... however, we need to figure out how to do an atomic
+ * update on a bit field to make this less fragile... right
+ * now I don't know how to coerce 'C' to give me the offset info
+ * that's needed for an AtomicCompareAndSwap
+ */
+ m->pmapped = TRUE;
+ if(vm_page_is_slideable(m)) {
+ boolean_t was_busy = m->busy;
- if (no_cache && (!previously_pmapped || m->no_cache)) {
- m->no_cache = TRUE;
+ vm_object_lock_assert_exclusive(m->object);
- if (m->active || m->inactive)
- VM_PAGE_QUEUES_REMOVE(m);
+ m->busy = TRUE;
+ kr = vm_page_slide(m, 0);
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ if (kr != KERN_SUCCESS) {
+ /*
+ * This page has not been slid correctly,
+ * do not do the pmap_enter() !
+ * Let vm_fault_enter() return the error
+ * so the caller can fail the fault.
+ */
+ goto after_the_pmap_enter;
+ }
+ }
- if (!m->speculative)
- vm_page_speculate(m, TRUE);
+ if (fault_type & VM_PROT_WRITE) {
- } else if (!m->active && !m->inactive)
- vm_page_activate(m);
+ if (m->wpmapped == FALSE) {
+ vm_object_lock_assert_exclusive(m->object);
+ m->wpmapped = TRUE;
+ }
+ if (must_disconnect) {
+ /*
+ * We can only get here
+ * because of the CSE logic
+ */
+ assert(cs_enforcement_enabled);
+ pmap_disconnect(m->phys_page);
+ /*
+ * If we are faulting for a write, we can clear
+ * the execute bit - that will ensure the page is
+ * checked again before being executable, which
+ * protects against a map switch.
+ * This only happens the first time the page
+ * gets tainted, so we won't get stuck here
+ * to make an already writeable page executable.
+ */
+ if (!cs_bypass){
+ prot &= ~VM_PROT_EXECUTE;
}
+ }
+ }
- vm_page_unlock_queues();
+ /* Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958> */
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
+ wired, PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if(pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ if (need_retry) {
+ /*
+ * this will be non-null in the case where we hold the lock
+ * on the top-object in this chain... we can't just drop
+ * the lock on the object we're inserting the page into
+ * and recall the PMAP_ENTER since we can still cause
+ * a deadlock if one of the critical paths tries to
+ * acquire the lock on the top-object and we're blocked
+ * in PMAP_ENTER waiting for memory... our only recourse
+ * is to deal with it at a higher level where we can
+ * drop both locks.
+ */
+ *need_retry = TRUE;
+ vm_pmap_enter_retried++;
+ goto after_the_pmap_enter;
+ }
+ /* The nonblocking version of pmap_enter did not succeed.
+ * and we don't need to drop other locks and retry
+ * at the level above us, so
+ * use the blocking version instead. Requires marking
+ * the page busy and unlocking the object */
+ boolean_t was_busy = m->busy;
+
+ vm_object_lock_assert_exclusive(m->object);
+
+ m->busy = TRUE;
+ vm_object_unlock(m->object);
+
+ PMAP_ENTER(pmap, vaddr, m, prot, fault_type, 0, wired);
+
+ /* Take the object lock again. */
+ vm_object_lock(m->object);
+
+ /* If the page was busy, someone else will wake it up.
+ * Otherwise, we have to do it now. */
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
}
+ vm_pmap_enter_blocked++;
}
}
+
+after_the_pmap_enter:
return kr;
}
unsigned long vm_fault_collapse_total = 0;
unsigned long vm_fault_collapse_skipped = 0;
+
kern_return_t
vm_fault(
vm_map_t map,
vm_prot_t original_fault_type;
struct vm_object_fault_info fault_info;
boolean_t need_collapse = FALSE;
+ boolean_t need_retry = FALSE;
+ boolean_t *need_retry_ptr = NULL;
int object_lock_type = 0;
int cur_object_lock_type;
vm_object_t top_object = VM_OBJECT_NULL;
+ int throttle_delay;
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
- (int)((uint64_t)vaddr >> 32),
- (int)vaddr,
- 0,
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
+ ((uint64_t)vaddr >> 32),
+ vaddr,
+ (map == kernel_map),
0,
0);
if (get_preemption_level() != 0) {
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
- (int)((uint64_t)vaddr >> 32),
- (int)vaddr,
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+ ((uint64_t)vaddr >> 32),
+ vaddr,
KERN_FAILURE,
0,
0);
pmap = real_map->pmap;
fault_info.interruptible = interruptible;
fault_info.stealth = FALSE;
+ fault_info.io_sync = FALSE;
+ fault_info.mark_zf_absent = FALSE;
+ fault_info.batch_pmap_op = FALSE;
/*
* If the page is wired, we must fault for the current protection
* have object that 'm' belongs to locked exclusively
*/
if (object != cur_object) {
- vm_object_unlock(object);
if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
if (vm_object_lock_upgrade(cur_object) == FALSE) {
/*
* couldn't upgrade so go do a full retry
- * immediately since we've already dropped
- * the top object lock associated with this page
- * and the current one got dropped due to the
- * failed upgrade... the state is no longer valid
+ * immediately since we can no longer be
+ * certain about cur_object (since we
+ * don't hold a reference on it)...
+ * first drop the top object lock
*/
+ vm_object_unlock(object);
+
vm_map_unlock_read(map);
if (real_map != map)
vm_map_unlock(real_map);
continue;
}
}
+ if (m->pageout_queue && m->object->internal && COMPRESSED_PAGER_IS_ACTIVE) {
+ /*
+ * m->busy == TRUE and the object is locked exclusively
+ * if m->pageout_queue == TRUE after we acquire the
+ * queues lock, we are guaranteed that it is stable on
+ * the pageout queue and therefore reclaimable
+ *
+ * NOTE: this is only true for the internal pageout queue
+ * in the compressor world
+ */
+ vm_page_lock_queues();
+
+ if (m->pageout_queue) {
+ vm_pageout_throttle_up(m);
+ vm_page_unlock_queues();
+
+ PAGE_WAKEUP_DONE(m);
+ goto reclaimed_from_pageout;
+ }
+ vm_page_unlock_queues();
+ }
+ if (object != cur_object)
+ vm_object_unlock(object);
+
vm_map_unlock_read(map);
if (real_map != map)
vm_map_unlock(real_map);
kr = KERN_ABORTED;
goto done;
}
+reclaimed_from_pageout:
+ if (m->laundry) {
+ if (object != cur_object) {
+ if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+ cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+ vm_object_unlock(object);
+ vm_object_unlock(cur_object);
+
+ vm_map_unlock_read(map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
+
+ goto RetryFault;
+ }
+
+ } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+ if (vm_object_lock_upgrade(object) == FALSE) {
+ /*
+ * couldn't upgrade, so explictly take the lock
+ * exclusively and go relookup the page since we
+ * will have dropped the object lock and
+ * a different thread could have inserted
+ * a page at this offset
+ * no need for a full retry since we're
+ * at the top level of the object chain
+ */
+ vm_object_lock(object);
+
+ continue;
+ }
+ }
+ m->pageout = FALSE;
+
+ vm_pageout_steal_laundry(m, FALSE);
+ }
+
if (m->phys_page == vm_page_guard_addr) {
/*
* Guard page: let the slow path deal with it
}
ASSERT_PAGE_DECRYPTED(m);
+ if(vm_page_is_slideable(m)) {
+ /*
+ * We might need to slide this page, and so,
+ * we want to hold the VM object exclusively.
+ */
+ if (object != cur_object) {
+ if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+ vm_object_unlock(object);
+ vm_object_unlock(cur_object);
+
+ cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+ vm_map_unlock_read(map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
+
+ goto RetryFault;
+ }
+ } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ vm_map_unlock_read(map);
+ goto RetryFault;
+ }
+ }
+
if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m)) {
+upgrade_for_validation:
/*
* We might need to validate this page
* against its code signature, so we
*/
if (object == cur_object && object->copy == VM_OBJECT_NULL) {
- if ((fault_type & VM_PROT_WRITE) == 0) {
- /*
- * This is not a "write" fault, so we
- * might not have taken the object lock
- * exclusively and we might not be able
- * to update the "wpmapped" bit in
- * vm_fault_enter().
- * Let's just grant read access to
- * the page for now and we'll
- * soft-fault again if we need write
- * access later...
- */
- prot &= ~VM_PROT_WRITE;
- }
+
goto FastPmapEnter;
}
if ((fault_type & VM_PROT_WRITE) == 0) {
- prot &= ~VM_PROT_WRITE;
-
if (object != cur_object) {
/*
* We still need to hold the top object
* cur_object == NULL or it's been unlocked
* no paging references on either object or cur_object
*/
-#if MACH_KDB
- if (db_watchpoint_list && (fault_type & VM_PROT_WRITE) == 0)
- prot &= ~VM_PROT_WRITE;
-#endif
+ if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE)
+ need_retry_ptr = &need_retry;
+ else
+ need_retry_ptr = NULL;
+
if (caller_pmap) {
kr = vm_fault_enter(m,
caller_pmap,
caller_pmap_addr,
prot,
+ fault_type,
wired,
change_wiring,
fault_info.no_cache,
+ fault_info.cs_bypass,
+ need_retry_ptr,
&type_of_fault);
} else {
kr = vm_fault_enter(m,
pmap,
vaddr,
prot,
+ fault_type,
wired,
change_wiring,
fault_info.no_cache,
+ fault_info.cs_bypass,
+ need_retry_ptr,
&type_of_fault);
}
if (need_collapse == TRUE)
vm_object_collapse(object, offset, TRUE);
-
- if (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT) {
+
+ if (need_retry == FALSE &&
+ (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
/*
* evaluate access pattern and update state
* vm_fault_deactivate_behind depends on the
if (real_map != map)
vm_map_unlock(real_map);
+ if (need_retry == TRUE) {
+ /*
+ * vm_fault_enter couldn't complete the PMAP_ENTER...
+ * at this point we don't hold any locks so it's safe
+ * to ask the pmap layer to expand the page table to
+ * accommodate this mapping... once expanded, we'll
+ * re-drive the fault which should result in vm_fault_enter
+ * being able to successfully enter the mapping this time around
+ */
+ (void)pmap_enter_options(pmap, vaddr, 0, 0, 0, 0, 0, PMAP_OPTIONS_NOENTER, NULL);
+
+ need_retry = FALSE;
+ goto RetryFault;
+ }
goto done;
}
/*
*/
assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
- if (vm_page_throttled()) {
+ if ((throttle_delay = vm_page_throttled())) {
/*
* drop all of our locks...
* wait until the free queue is
if (real_map != map)
vm_map_unlock(real_map);
- if (NEED_TO_HARD_THROTTLE_THIS_TASK())
- delay(HARD_THROTTLE_DELAY);
+ VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+ delay(throttle_delay);
if (!current_thread_aborted() && vm_page_wait((change_wiring) ?
THREAD_UNINT :
*/
break;
}
+
/*
* This is now a shadow based copy on write
* fault -- it requires a copy up the shadow
* chain.
- *
+ */
+
+ if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
+ VM_FAULT_NEED_CS_VALIDATION(NULL, m)) {
+ goto upgrade_for_validation;
+ }
+
+ /*
* Allocate a page in the original top level
* object. Give up if allocate fails. Also
* need to remember current page, as it's the
*/
vm_page_copy(cur_m, m);
vm_page_insert(m, object, offset);
- m->dirty = TRUE;
+ SET_PAGE_DIRTY(m, FALSE);
/*
* Now cope with the source page and object
* No page at cur_object, cur_offset... m == NULL
*/
if (cur_object->pager_created) {
- if (MUST_ASK_PAGER(cur_object, cur_offset) == TRUE) {
+ int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+
+ if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
+ int my_fault_type;
+ int c_flags = C_DONT_BLOCK;
+ boolean_t insert_cur_object = FALSE;
+
/*
* May have to talk to a pager...
- * take the slow path.
+ * if so, take the slow path by
+ * doing a 'break' from the while (TRUE) loop
+ *
+ * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
+ * if the compressor is active and the page exists there
*/
- break;
+ if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS)
+ break;
+
+ if (map == kernel_map || real_map == kernel_map) {
+ /*
+ * can't call into the compressor with the kernel_map
+ * lock held, since the compressor may try to operate
+ * on the kernel map in order to return an empty c_segment
+ */
+ break;
+ }
+ if (object != cur_object) {
+ if (fault_type & VM_PROT_WRITE)
+ c_flags |= C_KEEP;
+ else
+ insert_cur_object = TRUE;
+ }
+ if (insert_cur_object == TRUE) {
+
+ if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
+
+ cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+ if (vm_object_lock_upgrade(cur_object) == FALSE) {
+ /*
+ * couldn't upgrade so go do a full retry
+ * immediately since we can no longer be
+ * certain about cur_object (since we
+ * don't hold a reference on it)...
+ * first drop the top object lock
+ */
+ vm_object_unlock(object);
+
+ vm_map_unlock_read(map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
+
+ goto RetryFault;
+ }
+ }
+ } else if (object_lock_type == OBJECT_LOCK_SHARED) {
+
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+
+ if (object != cur_object) {
+ /*
+ * we can't go for the upgrade on the top
+ * lock since the upgrade may block waiting
+ * for readers to drain... since we hold
+ * cur_object locked at this point, waiting
+ * for the readers to drain would represent
+ * a lock order inversion since the lock order
+ * for objects is the reference order in the
+ * shadown chain
+ */
+ vm_object_unlock(object);
+ vm_object_unlock(cur_object);
+
+ vm_map_unlock_read(map);
+ if (real_map != map)
+ vm_map_unlock(real_map);
+
+ goto RetryFault;
+ }
+ if (vm_object_lock_upgrade(object) == FALSE) {
+ /*
+ * couldn't upgrade, so explictly take the lock
+ * exclusively and go relookup the page since we
+ * will have dropped the object lock and
+ * a different thread could have inserted
+ * a page at this offset
+ * no need for a full retry since we're
+ * at the top level of the object chain
+ */
+ vm_object_lock(object);
+
+ continue;
+ }
+ }
+ m = vm_page_grab();
+
+ if (m == VM_PAGE_NULL) {
+ /*
+ * no free page currently available...
+ * must take the slow path
+ */
+ break;
+ }
+ if (vm_compressor_pager_get(cur_object->pager, cur_offset + cur_object->paging_offset,
+ m->phys_page, &my_fault_type, c_flags) != KERN_SUCCESS) {
+ vm_page_release(m);
+ break;
+ }
+ m->dirty = TRUE;
+
+ if (insert_cur_object)
+ vm_page_insert(m, cur_object, cur_offset);
+ else
+ vm_page_insert(m, object, offset);
+
+ if ((m->object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
+ /*
+ * If the page is not cacheable,
+ * we can't let its contents
+ * linger in the data cache
+ * after the decompression.
+ */
+ pmap_sync_page_attributes_phys(m->phys_page);
+ }
+ type_of_fault = my_fault_type;
+
+ VM_STAT_INCR(decompressions);
+
+ if (cur_object != object) {
+ if (insert_cur_object) {
+ top_object = object;
+ /*
+ * switch to the object that has the new page
+ */
+ object = cur_object;
+ object_lock_type = cur_object_lock_type;
+ } else {
+ vm_object_unlock(cur_object);
+ cur_object = object;
+ }
+ }
+ goto FastPmapEnter;
}
/*
* existence map present and indicates
kr = KERN_MEMORY_ERROR;
goto done;
}
- if (vm_page_throttled()) {
+ if ((throttle_delay = vm_page_throttled())) {
/*
* drop all of our locks...
* wait until the free queue is
if (real_map != map)
vm_map_unlock(real_map);
- if (NEED_TO_HARD_THROTTLE_THIS_TASK())
- delay(HARD_THROTTLE_DELAY);
+ VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
+
+ delay(throttle_delay);
if (!current_thread_aborted() && vm_page_wait((change_wiring) ?
THREAD_UNINT :
/*
* On to the next level in the shadow chain
*/
- cur_offset += cur_object->shadow_offset;
+ cur_offset += cur_object->vo_shadow_offset;
new_object = cur_object->shadow;
/*
error_code = 0;
+ result_page = VM_PAGE_NULL;
kr = vm_fault_page(object, offset, fault_type,
(change_wiring && !wired),
+ FALSE, /* page not looked up */
&prot, &result_page, &top_page,
&type_of_fault,
&error_code, map->no_zero_fill,
caller_pmap,
caller_pmap_addr,
prot,
+ fault_type,
wired,
change_wiring,
fault_info.no_cache,
+ fault_info.cs_bypass,
+ NULL,
&type_of_fault);
} else {
kr = vm_fault_enter(m,
pmap,
vaddr,
prot,
+ fault_type,
wired,
change_wiring,
fault_info.no_cache,
+ fault_info.cs_bypass,
+ NULL,
&type_of_fault);
}
if (kr != KERN_SUCCESS) {
/* to execute, we return with a protection failure. */
if ((fault_type & VM_PROT_EXECUTE) &&
- (!pmap_eligible_for_execute((ppnum_t)(object->shadow_offset >> 12)))) {
+ (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) {
vm_map_verify_done(map, &version);
assert((uint32_t)((ldelta + hdelta) >> 12) == ((ldelta + hdelta) >> 12));
pmap_map_block(caller_pmap,
(addr64_t)(caller_pmap_addr - ldelta),
- (ppnum_t)((((vm_map_offset_t) (entry->object.vm_object->shadow_offset)) +
+ (ppnum_t)((((vm_map_offset_t) (entry->object.vm_object->vo_shadow_offset)) +
entry->offset + (laddr - entry->vme_start) - ldelta) >> 12),
(uint32_t)((ldelta + hdelta) >> 12), prot,
(VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
assert((uint32_t)((ldelta + hdelta) >> 12) == ((ldelta + hdelta) >> 12));
pmap_map_block(real_map->pmap,
(addr64_t)(vaddr - ldelta),
- (ppnum_t)((((vm_map_offset_t)(entry->object.vm_object->shadow_offset)) +
+ (ppnum_t)((((vm_map_offset_t)(entry->object.vm_object->vo_shadow_offset)) +
entry->offset + (laddr - entry->vme_start) - ldelta) >> 12),
(uint32_t)((ldelta + hdelta) >> 12), prot,
(VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
done:
thread_interrupt_level(interruptible_state);
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
- (int)((uint64_t)vaddr >> 32),
- (int)vaddr,
+ /*
+ * Only throttle on faults which cause a pagein.
+ */
+ if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
+ throttle_lowpri_io(1);
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
+ ((uint64_t)vaddr >> 32),
+ vaddr,
kr,
type_of_fault,
0);
fault_info.hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
fault_info.no_cache = entry->no_cache;
fault_info.stealth = TRUE;
+ fault_info.io_sync = FALSE;
+ fault_info.cs_bypass = FALSE;
+ fault_info.mark_zf_absent = FALSE;
+ fault_info.batch_pmap_op = FALSE;
/*
* Since the pages are wired down, we must be able to
XPR(XPR_VM_FAULT,
"vm_fault_unwire -> vm_fault_page\n",
0,0,0,0,0);
+ result_page = VM_PAGE_NULL;
result = vm_fault_page(
object,
entry->offset + (va - entry->vme_start),
VM_PROT_NONE, TRUE,
+ FALSE, /* page not looked up */
&prot, &result_page, &top_page,
(int *)0,
NULL, map->no_zero_fill,
if (result == VM_FAULT_MEMORY_ERROR && !object->alive)
continue;
+ if (result == VM_FAULT_MEMORY_ERROR &&
+ object == kernel_object) {
+ /*
+ * This must have been allocated with
+ * KMA_KOBJECT and KMA_VAONLY and there's
+ * no physical page at this offset.
+ * We're done (no page to free).
+ */
+ assert(deallocate);
+ continue;
+ }
+
if (result != VM_FAULT_SUCCESS)
panic("vm_fault_unwire: failure");
result_object = result_page->object;
- if ((pmap) && (result_page->phys_page != vm_page_guard_addr)) {
- pmap_change_wiring(pmap,
- pmap_addr + (va - entry->vme_start), FALSE);
- }
if (deallocate) {
assert(result_page->phys_page !=
vm_page_fictitious_addr);
pmap_disconnect(result_page->phys_page);
VM_PAGE_FREE(result_page);
} else {
+ if ((pmap) && (result_page->phys_page != vm_page_guard_addr))
+ pmap_change_wiring(pmap,
+ pmap_addr + (va - entry->vme_start), FALSE);
+
+
if (VM_PAGE_WIRED(result_page)) {
vm_page_lockspin_queues();
- vm_page_unwire(result_page);
+ vm_page_unwire(result_page, TRUE);
vm_page_unlock_queues();
}
if(entry->zero_wired_pages) {
#define RELEASE_PAGE(m) { \
PAGE_WAKEUP_DONE(m); \
vm_page_lockspin_queues(); \
- vm_page_unwire(m); \
+ vm_page_unwire(m, TRUE); \
vm_page_unlock_queues(); \
}
pmap,
pmap_addr,
prot,
+ prot,
TRUE,
FALSE,
FALSE,
+ FALSE,
+ NULL,
&type_of_fault);
done:
object = page->object;
vm_object_lock(object);
vm_page_lockspin_queues();
- vm_page_unwire(page);
+ vm_page_unwire(page, TRUE);
vm_page_unlock_queues();
vm_object_paging_end(object);
vm_object_unlock(object);
fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
fault_info_src.no_cache = FALSE;
fault_info_src.stealth = TRUE;
+ fault_info_src.io_sync = FALSE;
+ fault_info_src.cs_bypass = FALSE;
+ fault_info_src.mark_zf_absent = FALSE;
+ fault_info_src.batch_pmap_op = FALSE;
fault_info_dst.interruptible = interruptible;
fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
fault_info_dst.no_cache = FALSE;
fault_info_dst.stealth = TRUE;
+ fault_info_dst.io_sync = FALSE;
+ fault_info_dst.cs_bypass = FALSE;
+ fault_info_dst.mark_zf_absent = FALSE;
+ fault_info_dst.batch_pmap_op = FALSE;
do { /* while (amount_left > 0) */
/*
}
XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0);
+ dst_page = VM_PAGE_NULL;
result = vm_fault_page(dst_object,
vm_object_trunc_page(dst_offset),
VM_PROT_WRITE|VM_PROT_READ,
FALSE,
+ FALSE, /* page not looked up */
&dst_prot, &dst_page, &dst_top_page,
(int *)0,
&error,
XPR(XPR_VM_FAULT,
"vm_fault_copy(2) -> vm_fault_page\n",
0,0,0,0,0);
+ result_page = VM_PAGE_NULL;
result = vm_fault_page(
src_object,
vm_object_trunc_page(src_offset),
VM_PROT_READ, FALSE,
+ FALSE, /* page not looked up */
&src_prot,
&result_page, &src_top_page,
(int *)0, &error, FALSE,
(vm_size_t)part_size);
if(!dst_page->dirty){
vm_object_lock(dst_object);
- dst_page->dirty = TRUE;
+ SET_PAGE_DIRTY(dst_page, TRUE);
vm_object_unlock(dst_page->object);
}
if (result_page == VM_PAGE_NULL)
vm_page_zero_fill(dst_page);
else{
+ vm_object_lock(result_page->object);
vm_page_copy(result_page, dst_page);
+ vm_object_unlock(result_page->object);
+
if(!dst_page->dirty){
vm_object_lock(dst_object);
- dst_page->dirty = TRUE;
+ SET_PAGE_DIRTY(dst_page, TRUE);
vm_object_unlock(dst_page->object);
}
}
break;
}
- offset += object->shadow_offset;
+ offset += object->vo_shadow_offset;
object = object->shadow;
level++;
continue;
#endif /* VM_FAULT_CLASSIFY */
-extern int cs_validation;
-
void
vm_page_validate_cs_mapped(
vm_page_t page,
/* verify the SHA1 hash for this page */
validated = cs_validate_page(blobs,
+ pager,
offset + object->paging_offset,
(const void *)kaddr,
&tainted);
}
}
+extern int panic_on_cs_killed;
void
vm_page_validate_cs(
vm_page_t page)
vm_offset_t kaddr;
kern_return_t kr;
boolean_t busy_page;
+ boolean_t need_unmap;
vm_object_lock_assert_held(page->object);
return;
}
+ if (panic_on_cs_killed &&
+ page->slid) {
+ panic("vm_page_validate_cs(%p): page is slid\n", page);
+ }
+ assert(!page->slid);
+
+#if CHECK_CS_VALIDATION_BITMAP
+ if ( vnode_pager_cs_check_validation_bitmap( page->object->pager, trunc_page(page->offset + page->object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) {
+ page->cs_validated = TRUE;
+ page->cs_tainted = FALSE;
+ vm_cs_bitmap_validated++;
+ return;
+ }
+#endif
vm_object_lock_assert_exclusive(page->object);
object = page->object;
vm_object_paging_begin(object);
/* map the page in the kernel address space */
- koffset = 0;
ksize = PAGE_SIZE_64;
- kr = vm_paging_map_object(&koffset,
- page,
+ koffset = 0;
+ need_unmap = FALSE;
+ kr = vm_paging_map_object(page,
object,
offset,
- &ksize,
VM_PROT_READ,
- FALSE); /* can't unlock object ! */
+ FALSE, /* can't unlock object ! */
+ &ksize,
+ &koffset,
+ &need_unmap);
if (kr != KERN_SUCCESS) {
panic("vm_page_validate_cs: could not map page: 0x%x\n", kr);
}
/* validate the mapped page */
vm_page_validate_cs_mapped(page, (const void *) kaddr);
+#if CHECK_CS_VALIDATION_BITMAP
+ if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) {
+ vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET );
+ }
+#endif
assert(page->busy);
assert(object == page->object);
vm_object_lock_assert_exclusive(object);
if (!busy_page) {
PAGE_WAKEUP_DONE(page);
}
- if (koffset != 0) {
+ if (need_unmap) {
/* unmap the map from the kernel address space */
vm_paging_unmap_object(object, koffset, koffset + ksize);
koffset = 0;