/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <debug.h>
#include <mach_pagemap.h>
#include <mach_cluster_stats.h>
-#include <advisory_pageout.h>
#include <mach/mach_types.h>
#include <mach/memory_object.h>
#include <kern/thread.h>
#include <kern/xpr.h>
#include <kern/kalloc.h>
+#include <kern/policy_internal.h>
#include <machine/vm_tuning.h>
#include <machine/commpage.h>
#include <vm/vm_shared_region.h>
#include <vm/vm_compressor.h>
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
/*
* ENCRYPTED SWAP:
*/
#include <libkern/OSDebug.h>
#endif
+extern void m_drain(void);
+
+#if VM_PRESSURE_EVENTS
+extern unsigned int memorystatus_available_pages;
+extern unsigned int memorystatus_available_pages_pressure;
+extern unsigned int memorystatus_available_pages_critical;
+extern unsigned int memorystatus_frozen_count;
+extern unsigned int memorystatus_suspended_count;
+
extern vm_pressure_level_t memorystatus_vm_pressure_level;
int memorystatus_purge_on_warning = 2;
int memorystatus_purge_on_urgent = 5;
int memorystatus_purge_on_critical = 8;
-#if VM_PRESSURE_EVENTS
void vm_pressure_response(void);
boolean_t vm_pressure_thread_running = FALSE;
extern void consider_vm_pressure_events(void);
-#endif
+
+#define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
+#endif /* VM_PRESSURE_EVENTS */
+
boolean_t vm_pressure_changed = FALSE;
#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
#endif /* VM_PAGEOUT_LAUNDRY_MAX */
#ifndef VM_PAGEOUT_BURST_WAIT
-#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds */
+#define VM_PAGEOUT_BURST_WAIT 10 /* milliseconds */
#endif /* VM_PAGEOUT_BURST_WAIT */
#ifndef VM_PAGEOUT_EMPTY_WAIT
#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
#endif /* VM_PAGE_FREE_MIN */
-#define VM_PAGE_FREE_RESERVED_LIMIT 100
-#define VM_PAGE_FREE_MIN_LIMIT 1500
-#define VM_PAGE_FREE_TARGET_LIMIT 2000
-
+#define VM_PAGE_FREE_RESERVED_LIMIT 1700
+#define VM_PAGE_FREE_MIN_LIMIT 3500
+#define VM_PAGE_FREE_TARGET_LIMIT 4000
/*
* When vm_page_free_count falls below vm_page_free_reserved,
#ifndef VM_PAGE_REACTIVATE_LIMIT
#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
#endif /* VM_PAGE_REACTIVATE_LIMIT */
-#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
+#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
extern boolean_t hibernate_cleaning_in_progress;
struct vm_pageout_queue *q;
void *current_chead;
char *scratch_buf;
+ int id;
};
+#define MAX_COMPRESSOR_THREAD_COUNT 8
+
+struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT];
+
+void *vm_pageout_immediate_chead;
+char *vm_pageout_immediate_scratch_buf;
#if VM_PRESSURE_EVENTS
void vm_pressure_thread(void);
+
+boolean_t VM_PRESSURE_NORMAL_TO_WARNING(void);
+boolean_t VM_PRESSURE_WARNING_TO_CRITICAL(void);
+
+boolean_t VM_PRESSURE_WARNING_TO_NORMAL(void);
+boolean_t VM_PRESSURE_CRITICAL_TO_WARNING(void);
#endif
static void vm_pageout_garbage_collect(int);
-static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
static void vm_pageout_iothread_external(void);
static void vm_pageout_iothread_internal(struct cq *cq);
static void vm_pageout_adjust_io_throttles(struct vm_pageout_queue *, struct vm_pageout_queue *, boolean_t);
extern void vm_pageout_continue(void);
extern void vm_pageout_scan(void);
+static void vm_pageout_immediate(vm_page_t, boolean_t);
+boolean_t vm_compressor_immediate_preferred = FALSE;
+boolean_t vm_compressor_immediate_preferred_override = FALSE;
+boolean_t vm_restricted_to_single_processor = FALSE;
+static boolean_t vm_pageout_waiter = FALSE;
+static boolean_t vm_pageout_running = FALSE;
+
+
static thread_t vm_pageout_external_iothread = THREAD_NULL;
static thread_t vm_pageout_internal_iothread = THREAD_NULL;
*/
unsigned int vm_pageout_active = 0; /* debugging */
-unsigned int vm_pageout_active_busy = 0; /* debugging */
unsigned int vm_pageout_inactive = 0; /* debugging */
unsigned int vm_pageout_inactive_throttled = 0; /* debugging */
unsigned int vm_pageout_inactive_forced = 0; /* debugging */
unsigned int vm_pageout_inactive_deactivated = 0; /* debugging */
unsigned int vm_pageout_inactive_anonymous = 0; /* debugging */
unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
-unsigned int vm_pageout_purged_objects = 0; /* debugging */
+unsigned int vm_pageout_purged_objects = 0; /* used for sysctl vm stats */
unsigned int vm_stat_discard = 0; /* debugging */
unsigned int vm_stat_discard_sent = 0; /* debugging */
unsigned int vm_stat_discard_failure = 0; /* debugging */
unsigned int vm_pageout_scan_active_throttle_success = 0; /* debugging */
unsigned int vm_pageout_scan_inactive_throttle_success = 0; /* debugging */
unsigned int vm_pageout_inactive_external_forced_jetsam_count = 0; /* debugging */
+unsigned int vm_pageout_scan_throttle_deferred = 0; /* debugging */
+unsigned int vm_pageout_scan_yield_unthrottled = 0; /* debugging */
unsigned int vm_page_speculative_count_drifts = 0;
unsigned int vm_page_speculative_count_drift_max = 0;
unsigned int vm_page_steal_pageout_page = 0;
+struct vm_config vm_config;
+
/*
* ENCRYPTED SWAP:
* counters and statistics...
unsigned long vm_page_encrypt_already_encrypted_counter = 0;
boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
-struct vm_pageout_queue vm_pageout_queue_internal;
-struct vm_pageout_queue vm_pageout_queue_external;
+struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
+struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT)));
unsigned int vm_page_speculative_target = 0;
#endif
extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async);
extern void memorystatus_on_pageout_scan_end(void);
+
+uint32_t vm_pageout_memorystatus_fb_factor_nr = 5;
+uint32_t vm_pageout_memorystatus_fb_factor_dr = 2;
+#if DEVELOPMENT || DEBUG
+uint32_t vm_grab_anon_overrides = 0;
+uint32_t vm_grab_anon_nops = 0;
#endif
-boolean_t vm_page_compressions_failing = FALSE;
+#endif
/*
* Routine: vm_backing_store_disable
shadow_object = object->shadow;
vm_object_lock(shadow_object);
- while (!queue_empty(&object->memq)) {
+ while (!vm_page_queue_empty(&object->memq)) {
vm_page_t p, m;
vm_object_offset_t offset;
- p = (vm_page_t) queue_first(&object->memq);
+ p = (vm_page_t) vm_page_queue_first(&object->memq);
assert(p->private);
- assert(p->pageout);
- p->pageout = FALSE;
+ assert(p->free_when_done);
+ p->free_when_done = FALSE;
assert(!p->cleaning);
assert(!p->laundry);
* Also decrement the burst throttle (if external).
*/
vm_page_lock_queues();
- if (m->pageout_queue)
+ if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)
vm_pageout_throttle_up(m);
/*
* pages may have been modified between the selection as an
* adjacent page and conversion to a target.
*/
- if (m->pageout) {
+ if (m->free_when_done) {
assert(m->busy);
+ assert(m->vm_page_q_state == VM_PAGE_IS_WIRED);
assert(m->wire_count == 1);
m->cleaning = FALSE;
m->encrypted_cleaning = FALSE;
- m->pageout = FALSE;
+ m->free_when_done = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
* can detect whether the page was redirtied during
* pageout by checking the modify state.
*/
- if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED) {
+ if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
} else {
m->dirty = FALSE;
* If prep_pin_count is nonzero, then someone is using the
* page, so make it active.
*/
- if (!m->active && !m->inactive && !m->throttled && !m->private) {
+ if ((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) && !m->private) {
if (m->reference)
vm_page_activate(m);
else
* will take care of resetting dirty. We clear the
* modify however for the Programmed I/O case.
*/
- pmap_clear_modify(m->phys_page);
+ pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
m->busy = FALSE;
m->absent = FALSE;
* consulted if m->dirty is false.
*/
#if MACH_CLUSTER_STATS
- m->dirty = pmap_is_modified(m->phys_page);
+ m->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
* must be locked.
*
*/
-void
+static void
vm_pageclean_setup(
vm_page_t m,
vm_page_t new_m,
#endif
XPR(XPR_VM_PAGEOUT,
- "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
- m->object, m->offset, m,
+ "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
+ VM_PAGE_OBJECT(m), m->offset, m,
new_m, new_offset);
- pmap_clear_modify(m->phys_page);
+ pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
/*
* Mark original page as cleaning in place.
* the real page.
*/
assert(new_m->fictitious);
- assert(new_m->phys_page == vm_page_fictitious_addr);
+ assert(VM_PAGE_GET_PHYS_PAGE(new_m) == vm_page_fictitious_addr);
new_m->fictitious = FALSE;
new_m->private = TRUE;
- new_m->pageout = TRUE;
- new_m->phys_page = m->phys_page;
+ new_m->free_when_done = TRUE;
+ VM_PAGE_SET_PHYS_PAGE(new_m, VM_PAGE_GET_PHYS_PAGE(m));
vm_page_lockspin_queues();
- vm_page_wire(new_m);
+ vm_page_wire(new_m, VM_KERN_MEMORY_NONE, TRUE);
vm_page_unlock_queues();
- vm_page_insert(new_m, new_object, new_offset);
+ vm_page_insert_wired(new_m, new_object, new_offset, VM_KERN_MEMORY_NONE);
assert(!new_m->wanted);
new_m->busy = FALSE;
}
XPR(XPR_VM_PAGEOUT,
"vm_pageout_initialize_page, page 0x%X\n",
m, 0, 0, 0, 0);
+
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ object = VM_PAGE_OBJECT(m);
+
assert(m->busy);
+ assert(object->internal);
/*
* Verify that we really want to clean this page
/*
* Create a paging reference to let us play with the object.
*/
- object = m->object;
paging_offset = m->offset + object->paging_offset;
if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
- VM_PAGE_FREE(m);
panic("reservation without pageout?"); /* alan */
+
+ VM_PAGE_FREE(m);
vm_object_unlock(object);
return;
pager = object->pager;
if (pager == MEMORY_OBJECT_NULL) {
- VM_PAGE_FREE(m);
panic("missing pager for copy object");
+
+ VM_PAGE_FREE(m);
return;
}
/*
* set the page for future call to vm_fault_list_request
*/
- pmap_clear_modify(m->phys_page);
+ pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m));
SET_PAGE_DIRTY(m, FALSE);
- m->pageout = TRUE;
/*
* keep the object from collapsing or terminating
* The page must not be on any pageout queue.
*/
-void
-vm_pageout_cluster(vm_page_t m, boolean_t pageout)
+int
+vm_pageout_cluster(vm_page_t m, boolean_t immediate_ok, boolean_t keep_object_locked)
{
- vm_object_t object = m->object;
+ vm_object_t object = VM_PAGE_OBJECT(m);
struct vm_pageout_queue *q;
object, m->offset, m, 0, 0);
VM_PAGE_CHECK(m);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_object_lock_assert_exclusive(object);
/*
* Only a certain kind of page is appreciated here.
*/
assert((m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
- assert(!m->cleaning && !m->pageout && !m->laundry);
-#ifndef CONFIG_FREEZE
- assert(!m->inactive && !m->active);
- assert(!m->throttled);
-#endif
+ assert(!m->cleaning && !m->laundry);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
/*
* protect the object from collapse or termination
*/
vm_object_activity_begin(object);
- m->pageout = pageout;
-
if (object->internal == TRUE) {
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
- m->busy = TRUE;
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ m->busy = TRUE;
+
+ if (vm_compressor_immediate_preferred == TRUE && immediate_ok == TRUE) {
+ panic("immediate compressor mode no longer supported\n");
+
+ if (keep_object_locked == FALSE)
+ vm_object_unlock(object);
+ vm_page_unlock_queues();
+ vm_pageout_immediate(m, keep_object_locked);
+
+ return (1);
+ }
q = &vm_pageout_queue_internal;
} else
q = &vm_pageout_queue_external;
m->laundry = TRUE;
q->pgo_laundry++;
- m->pageout_queue = TRUE;
- queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_PAGEOUT_Q;
+ vm_page_queue_enter(&q->pgo_pending, m, vm_page_t, pageq);
if (q->pgo_idle == TRUE) {
q->pgo_idle = FALSE;
thread_wakeup((event_t) &q->pgo_pending);
}
VM_PAGE_CHECK(m);
+
+ return (0);
}
vm_page_t m)
{
struct vm_pageout_queue *q;
+ vm_object_t m_object;
- assert(m->object != VM_OBJECT_NULL);
- assert(m->object != kernel_object);
+ m_object = VM_PAGE_OBJECT(m);
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
- vm_object_lock_assert_exclusive(m->object);
-#endif
+ assert(m_object != VM_OBJECT_NULL);
+ assert(m_object != kernel_object);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ vm_object_lock_assert_exclusive(m_object);
vm_pageout_throttle_up_count++;
- if (m->object->internal == TRUE)
+ if (m_object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
- if (m->pageout_queue == TRUE) {
+ if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
- queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
- m->pageout_queue = FALSE;
+ vm_page_queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- m->pageq.next = NULL;
- m->pageq.prev = NULL;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
- vm_object_activity_end(m->object);
+ vm_object_activity_end(m_object);
}
if (m->laundry == TRUE) {
struct vm_pageout_queue *q,
int batch_cnt)
{
-#if DEBUG
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_pageout_throttle_up_count += batch_cnt;
#define VM_PAGEOUT_STAT_AFTER(i) \
(((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
+#if VM_PAGE_BUCKETS_CHECK
+int vm_page_buckets_check_interval = 10; /* in seconds */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
/*
* Called from compute_averages().
*/
{
unsigned int vm_pageout_next;
+#if VM_PAGE_BUCKETS_CHECK
+ /* check the consistency of VM page buckets at regular interval */
+ static int counter = 0;
+ if ((++counter % vm_page_buckets_check_interval) == 0) {
+ vm_page_buckets_check();
+ }
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
vm_memory_pressure =
vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
}
/* provide number of pages reclaimed in the last "nsecs_monitored" */
- do {
- vm_pageout_now = vm_pageout_stat_now;
- pages_reclaimed = 0;
- for (vm_pageout_then =
- VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
- vm_pageout_then != vm_pageout_now &&
- nsecs_monitored-- != 0;
- vm_pageout_then =
- VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
- pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
- }
- } while (vm_pageout_now != vm_pageout_stat_now);
+ vm_pageout_now = vm_pageout_stat_now;
+ pages_reclaimed = 0;
+ for (vm_pageout_then =
+ VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
+ vm_pageout_then != vm_pageout_now &&
+ nsecs_monitored-- != 0;
+ vm_pageout_then =
+ VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
+ pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
+ }
*pages_reclaimed_p = pages_reclaimed;
return KERN_SUCCESS;
+#if DEVELOPMENT || DEBUG
+
+static void
+vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int);
+
/*
- * function in BSD to apply I/O throttle to the pageout thread
+ * condition variable used to make sure there is
+ * only a single sweep going on at a time
*/
-extern void vm_pageout_io_throttle(void);
+boolean_t vm_pageout_disconnect_all_pages_active = FALSE;
+
+
+void
+vm_pageout_disconnect_all_pages()
+{
+ vm_page_lock_queues();
+
+ if (vm_pageout_disconnect_all_pages_active == TRUE) {
+ vm_page_unlock_queues();
+ return;
+ }
+ vm_pageout_disconnect_all_pages_active = TRUE;
+ vm_page_unlock_queues();
+
+ vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled, vm_page_throttled_count);
+ vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
+ vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active, vm_page_active_count);
+
+ vm_pageout_disconnect_all_pages_active = FALSE;
+}
+
+
+void
+vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount)
+{
+ vm_page_t m;
+ vm_object_t t_object = NULL;
+ vm_object_t l_object = NULL;
+ vm_object_t m_object = NULL;
+ int delayed_unlock = 0;
+ int try_failed_count = 0;
+ int disconnected_count = 0;
+ int paused_count = 0;
+ int object_locked_count = 0;
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START,
+ q, qcount, 0, 0, 0);
+
+ vm_page_lock_queues();
+
+ while (qcount && !vm_page_queue_empty(q)) {
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ m = (vm_page_t) vm_page_queue_first(q);
+ m_object = VM_PAGE_OBJECT(m);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != l_object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ if (m_object != t_object)
+ try_failed_count = 0;
+
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run...
+ */
+ if ( !vm_object_lock_try_scan(m_object)) {
+
+ if (try_failed_count > 20) {
+ goto reenter_pg_on_q;
+ }
+ vm_page_unlock_queues();
+ mutex_pause(try_failed_count++);
+ vm_page_lock_queues();
+ delayed_unlock = 0;
+
+ paused_count++;
+
+ t_object = m_object;
+ continue;
+ }
+ object_locked_count++;
+
+ l_object = m_object;
+ }
+ if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error || m->free_when_done) {
+ /*
+ * put it back on the head of its queue
+ */
+ goto reenter_pg_on_q;
+ }
+ if (m->pmapped == TRUE) {
+
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+
+ disconnected_count++;
+ }
+reenter_pg_on_q:
+ vm_page_queue_remove(q, m, vm_page_t, pageq);
+ vm_page_queue_enter(q, m, vm_page_t, pageq);
+
+ qcount--;
+ try_failed_count = 0;
+
+ if (delayed_unlock++ > 128) {
+
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ lck_mtx_yield(&vm_page_queue_lock);
+ delayed_unlock = 0;
+ }
+ }
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ vm_page_unlock_queues();
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END,
+ q, disconnected_count, object_locked_count, paused_count, 0);
+}
+
+#endif
-#if LATENCY_JETSAM
-boolean_t jlp_init = FALSE;
-uint64_t jlp_time = 0, jlp_current = 0;
-struct vm_page jetsam_latency_page[NUM_OF_JETSAM_LATENCY_TOKENS];
-unsigned int latency_jetsam_wakeup = 0;
-#endif /* LATENCY_JETSAM */
+static void
+vm_pageout_page_queue(vm_page_queue_head_t *, int);
/*
- * Page States: Used below to maintain the page state
- * before it's removed from it's Q. This saved state
- * helps us do the right accounting in certain cases
+ * condition variable used to make sure there is
+ * only a single sweep going on at a time
*/
-#define PAGE_STATE_SPECULATIVE 1
-#define PAGE_STATE_ANONYMOUS 2
-#define PAGE_STATE_INACTIVE 3
-#define PAGE_STATE_INACTIVE_FIRST 4
-#define PAGE_STATE_CLEAN 5
+boolean_t vm_pageout_anonymous_pages_active = FALSE;
+
+
+void
+vm_pageout_anonymous_pages()
+{
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+
+ vm_page_lock_queues();
+
+ if (vm_pageout_anonymous_pages_active == TRUE) {
+ vm_page_unlock_queues();
+ return;
+ }
+ vm_pageout_anonymous_pages_active = TRUE;
+ vm_page_unlock_queues();
+
+ vm_pageout_page_queue(&vm_page_queue_throttled, vm_page_throttled_count);
+ vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count);
+ vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count);
+
+ if (VM_CONFIG_SWAP_IS_PRESENT)
+ vm_consider_swapping();
+
+ vm_page_lock_queues();
+ vm_pageout_anonymous_pages_active = FALSE;
+ vm_page_unlock_queues();
+ }
+}
+
+
+void
+vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount)
+{
+ vm_page_t m;
+ vm_object_t t_object = NULL;
+ vm_object_t l_object = NULL;
+ vm_object_t m_object = NULL;
+ int delayed_unlock = 0;
+ int try_failed_count = 0;
+ int refmod_state;
+ int pmap_options;
+ struct vm_pageout_queue *iq;
+ ppnum_t phys_page;
+
+
+ iq = &vm_pageout_queue_internal;
+
+ vm_page_lock_queues();
+
+ while (qcount && !vm_page_queue_empty(q)) {
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (VM_PAGE_Q_THROTTLED(iq)) {
+
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ iq->pgo_draining = TRUE;
+
+ assert_wait((event_t) (&iq->pgo_laundry + 1), THREAD_INTERRUPTIBLE);
+ vm_page_unlock_queues();
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ vm_page_lock_queues();
+ delayed_unlock = 0;
+ continue;
+ }
+ m = (vm_page_t) vm_page_queue_first(q);
+ m_object = VM_PAGE_OBJECT(m);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != l_object) {
+ if ( !m_object->internal)
+ goto reenter_pg_on_q;
+
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ if (m_object != t_object)
+ try_failed_count = 0;
+
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run...
+ */
+ if ( !vm_object_lock_try_scan(m_object)) {
+ if (try_failed_count > 20) {
+ goto reenter_pg_on_q;
+ }
+ vm_page_unlock_queues();
+ mutex_pause(try_failed_count++);
+ vm_page_lock_queues();
+ delayed_unlock = 0;
+
+ t_object = m_object;
+ continue;
+ }
+ l_object = m_object;
+ }
+ if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error || m->free_when_done) {
+ /*
+ * page is not to be cleaned
+ * put it back on the head of its queue
+ */
+ goto reenter_pg_on_q;
+ }
+ phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+ if (m->reference == FALSE && m->pmapped == TRUE) {
+ refmod_state = pmap_get_refmod(phys_page);
+
+ if (refmod_state & VM_MEM_REFERENCED)
+ m->reference = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ if (m->reference == TRUE) {
+ m->reference = FALSE;
+ pmap_clear_refmod_options(phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+ goto reenter_pg_on_q;
+ }
+ if (m->pmapped == TRUE) {
+ if (m->dirty || m->precious) {
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
+ } else {
+ pmap_options = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ }
+ refmod_state = pmap_disconnect_options(phys_page, pmap_options, NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ if ( !m->dirty && !m->precious) {
+ vm_page_unlock_queues();
+ VM_PAGE_FREE(m);
+ vm_page_lock_queues();
+ delayed_unlock = 0;
+
+ goto next_pg;
+ }
+ if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) {
+
+ if (!m_object->pager_initialized) {
+
+ vm_page_unlock_queues();
-#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
+ vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE);
+
+ if (!m_object->pager_initialized)
+ vm_object_compressor_pager_create(m_object);
+
+ vm_page_lock_queues();
+ delayed_unlock = 0;
+ }
+ if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL)
+ goto reenter_pg_on_q;
+ /*
+ * vm_object_compressor_pager_create will drop the object lock
+ * which means 'm' may no longer be valid to use
+ */
+ continue;
+ }
+ /*
+ * we've already factored out pages in the laundry which
+ * means this page can't be on the pageout queue so it's
+ * safe to do the vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ vm_pageout_cluster(m, FALSE, FALSE);
+
+ goto next_pg;
+
+reenter_pg_on_q:
+ vm_page_queue_remove(q, m, vm_page_t, pageq);
+ vm_page_queue_enter(q, m, vm_page_t, pageq);
+next_pg:
+ qcount--;
+ try_failed_count = 0;
+
+ if (delayed_unlock++ > 128) {
+
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ lck_mtx_yield(&vm_page_queue_lock);
+ delayed_unlock = 0;
+ }
+ }
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ vm_page_unlock_queues();
+}
+
+
+
+/*
+ * function in BSD to apply I/O throttle to the pageout thread
+ */
+extern void vm_pageout_io_throttle(void);
+
+#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
MACRO_BEGIN \
/* \
* If a "reusable" page somehow made it back into \
* as "all re-used" instead of converting it to \
* "partially re-used", which could be expensive. \
*/ \
+ assert(VM_PAGE_OBJECT((m)) == (obj)); \
if ((m)->reusable || \
- (m)->object->all_reusable) { \
- vm_object_reuse_pages((m)->object, \
+ (obj)->all_reusable) { \
+ vm_object_reuse_pages((obj), \
(m)->offset, \
(m)->offset + PAGE_SIZE_64, \
FALSE); \
mach_timespec_t ts;
};
+#if CONFIG_BACKGROUND_QUEUE
+uint64_t vm_pageout_considered_bq_internal = 0;
+uint64_t vm_pageout_considered_bq_external = 0;
+uint64_t vm_pageout_rejected_bq_internal = 0;
+uint64_t vm_pageout_rejected_bq_external = 0;
+#endif
uint32_t vm_pageout_considered_page = 0;
uint32_t vm_page_filecache_min = 0;
-#define VM_PAGE_FILECACHE_MIN 50000
#define ANONS_GRABBED_LIMIT 2
+#if CONFIG_SECLUDED_MEMORY
+extern vm_page_t vm_page_grab_secluded(void);
+uint64_t vm_pageout_freed_from_secluded = 0;
+uint64_t vm_pageout_secluded_reactivated = 0; /* debugging; how many secluded pages are found to be referenced on pageout (and are therefore reactivated) */
+uint64_t vm_pageout_secluded_burst_count = 0;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
* It returns with both vm_page_queue_free_lock and vm_page_queue_lock
vm_object_t last_object_tried;
uint32_t catch_up_count = 0;
uint32_t inactive_reclaim_run;
- boolean_t forced_reclaim;
boolean_t exceeded_burst_throttle;
boolean_t grab_anonymous = FALSE;
boolean_t force_anonymous = FALSE;
int anons_grabbed = 0;
- int page_prev_state = 0;
+ int page_prev_q_state = 0;
+ boolean_t requeue_insert_first = FALSE;
+#if CONFIG_BACKGROUND_QUEUE
+ boolean_t ignore_reference = FALSE;
+#endif
+#if CONFIG_SECLUDED_MEMORY
+ boolean_t ignore_reference_secluded;
+#endif /* CONFIG_SECLUDED_MEMORY */
int cache_evict_throttle = 0;
uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
+ int force_purge = 0;
+#define DELAY_SPECULATIVE_AGE 1000
+ int delay_speculative_age = 0;
+ vm_object_t m_object = VM_OBJECT_NULL;
+
+#if VM_PRESSURE_EVENTS
vm_pressure_level_t pressure_level;
+#endif /* VM_PRESSURE_EVENTS */
- VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
vm_pageout_speculative_clean, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
-#if LATENCY_JETSAM
- if (jlp_init == FALSE) {
- int i=0;
- vm_page_t jlp;
- for(; i < NUM_OF_JETSAM_LATENCY_TOKENS; i++) {
- jlp = &jetsam_latency_page[i];
- jlp->fictitious = TRUE;
- jlp->offset = 0;
-
- }
- jlp = &jetsam_latency_page[0];
- queue_enter(&vm_page_queue_active, jlp, vm_page_t, pageq);
- jlp->active = TRUE;
-
- jlp->offset = mach_absolute_time();
- jlp_time = jlp->offset;
- jlp_current++;
- jlp_init = TRUE;
- }
-#endif /* LATENCY_JETSAM */
-
flow_control.state = FCS_IDLE;
iq = &vm_pageout_queue_internal;
eq = &vm_pageout_queue_external;
Restart:
+
+
assert(delayed_unlock!=0);
/*
DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
- if (delayed_unlock == 0) {
- vm_page_lock_queues();
+#if CONFIG_SECLUDED_MEMORY
+ if (vm_page_secluded_count > vm_page_secluded_target &&
+ object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+
+ /*
+ * Deal with secluded_q overflow.
+ */
+ if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_FIFO) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_FIFO:
+ * No aging, just reclaim the excess pages
+ * at the tail of the secluded queue.
+ * We're reclaiming pages and we're not hogging
+ * any global lock, so no need for throttling.
+ */
+
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ /* transfer to free queue */
+ vm_page_unlock_queues();
+ while (secluded_overflow--) {
+ secluded_page = vm_page_grab_secluded();
+ if (secluded_page == VM_PAGE_NULL) {
+ break;
+ }
+ assert(secluded_page->busy);
+ assert(secluded_page->pageq.next == 0 &&
+ secluded_page->pageq.prev == 0);
+
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ secluded_page = VM_PAGE_NULL;
+ }
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_ALONG_ACTIVE) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_ALONG_ACTIVE:
+ * There might be free pages at the tail of the
+ * secluded queue:
+ * just move them to the free queue (in batches).
+ * There can also be an excessive number of "inuse"
+ * pages:
+ * we age them by resetting their "referenced" bit and
+ * moving them to the inactive queue. Their trip
+ * through the secluded queue was equivalent to a trip
+ * through the active queue.
+ *
+ * We're holding the page queue lock, so we need
+ * to throttle and give someone else a chance to
+ * grab that lock if needed.
+ *
+ * We're also limiting the number of secluded "inuse"
+ * pages that get moved to the inactive queue, using
+ * the same "active_bust_count" method we use when
+ * balancing the active and inactive queues, because
+ * there can be a large number
+ * of extra "inuse" pages and handling them gets in the
+ * way of actually reclaiming memory.
+ */
+
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of inactive queue */
+ pmap_clear_refmod_options(
+ VM_PAGE_GET_PHYS_PAGE(secluded_page),
+ VM_MEM_REFERENCED,
+ PMAP_OPTIONS_NOFLUSH,
+ (void *)NULL);
+ vm_page_enqueue_inactive(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
+ delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_AFTER_INACTIVE) {
+ /*
+ * SECLUDED_AGING_AFTER_INACTIVE:
+ * No balancing needed at this point: when we get to
+ * the "choose a victim" part below, we'll consider the
+ * extra secluded pages before any inactive page.
+ */
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_BEFORE_ACTIVE) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_BEFORE_ACTIVE:
+ * Excess secluded pages go to the active queue and
+ * will later go to the inactive queue.
+ */
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of active queue */
+ vm_page_enqueue_active(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target) {
+ panic("unsupported secluded_aging_policy %d\n",
+ secluded_aging_policy);
+ }
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ 0,
+ 0);
+ vm_page_free_list(local_freeq, TRUE);
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count, 0, 0, 0);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
}
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ assert(delayed_unlock);
+
if (vm_upl_wait_for_pages < 0)
vm_upl_wait_for_pages = 0;
memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_START);
- while (!queue_empty(&vm_page_queue_active) && active_burst_count--) {
+ while (!vm_page_queue_empty(&vm_page_queue_active) && active_burst_count--) {
vm_pageout_active++;
- m = (vm_page_t) queue_first(&vm_page_queue_active);
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
- assert(m->active && !m->inactive);
+ assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
assert(!m->laundry);
- assert(m->object != kernel_object);
- assert(m->phys_page != vm_page_guard_addr);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
-#if LATENCY_JETSAM
- if (m->fictitious) {
- const uint32_t FREE_TARGET_MULTIPLIER = 2;
-
- uint64_t now = mach_absolute_time();
- uint64_t delta = now - m->offset;
- clock_sec_t jl_secs = 0;
- clock_usec_t jl_usecs = 0;
- boolean_t issue_jetsam = FALSE;
-
- absolutetime_to_microtime(delta, &jl_secs, &jl_usecs);
- jl_usecs += jl_secs * USEC_PER_SEC;
+ /*
+ * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+ *
+ * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+ * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+ * new reference happens. If no futher references happen on the page after that remote TLB flushes
+ * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+ * by pageout_scan, which is just fine since the last reference would have happened quite far
+ * in the past (TLB caches don't hang around for very long), and of course could just as easily
+ * have happened before we moved the page
+ */
+ pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
- /* Jetsam only if the token hasn't aged sufficiently and the free count is close to the target (avoiding spurious triggers) */
- if ((jl_usecs <= JETSAM_AGE_NOTIFY_CRITICAL) && (vm_page_free_count < (FREE_TARGET_MULTIPLIER * vm_page_free_target))) {
- issue_jetsam = TRUE;
- }
-
- VM_DEBUG_EVENT(vm_pageout_page_token, VM_PAGEOUT_PAGE_TOKEN, DBG_FUNC_NONE,
- vm_page_active_count, vm_page_inactive_count, vm_page_free_count, jl_usecs);
-
- m->offset = 0;
- queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
- queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ /*
+ * The page might be absent or busy,
+ * but vm_page_deactivate can handle that.
+ * FALSE indicates that we don't want a H/W clear reference
+ */
+ vm_page_deactivate_internal(m, FALSE);
- m->offset = now;
- jlp_time = now;
-
- if (issue_jetsam) {
- vm_page_unlock_queues();
+ if (delayed_unlock++ > delayed_unlock_limit) {
- if (local_freeq) {
- vm_page_free_list(local_freeq, TRUE);
- local_freeq = NULL;
- local_freed = 0;
- }
-
- VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
- vm_page_active_count, vm_page_inactive_count, vm_page_free_count, 0);
-
- assert_wait_timeout(&latency_jetsam_wakeup, THREAD_INTERRUPTIBLE, 10 /* msecs */, 1000*NSEC_PER_USEC);
- /* Kill the top process asynchronously */
- memorystatus_kill_on_VM_page_shortage(TRUE);
- thread_block(THREAD_CONTINUE_NULL);
+ if (local_freeq) {
+ vm_page_unlock_queues();
- VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 1);
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, 0, 0, 1);
+
+ local_freeq = NULL;
+ local_freed = 0;
vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
}
- } else {
-#endif /* LATENCY_JETSAM */
- /*
- * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
- *
- * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
- * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
- * new reference happens. If no futher references happen on the page after that remote TLB flushes
- * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
- * by pageout_scan, which is just fine since the last reference would have happened quite far
- * in the past (TLB caches don't hang around for very long), and of course could just as easily
- * have happened before we moved the page
- */
- pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+
+ delayed_unlock = 1;
/*
- * The page might be absent or busy,
- * but vm_page_deactivate can handle that.
- * FALSE indicates that we don't want a H/W clear reference
+ * continue the while loop processing
+ * the active queue... need to hold
+ * the page queues lock
*/
- vm_page_deactivate_internal(m, FALSE);
-
- if (delayed_unlock++ > delayed_unlock_limit) {
-
- if (local_freeq) {
- vm_page_unlock_queues();
-
- VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
- vm_page_free_count, local_freed, delayed_unlock_limit, 1);
-
- vm_page_free_list(local_freeq, TRUE);
-
- VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
- vm_page_free_count, 0, 0, 1);
-
- local_freeq = NULL;
- local_freed = 0;
- vm_page_lock_queues();
- } else
- lck_mtx_yield(&vm_page_queue_lock);
-
- delayed_unlock = 1;
-
- /*
- * continue the while loop processing
- * the active queue... need to hold
- * the page queues lock
- */
- }
-#if LATENCY_JETSAM
}
-#endif /* LATENCY_JETSAM */
}
VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_END,
done_moving_active_pages:
- if (vm_page_free_count + local_freed >= vm_page_free_target) {
+#if CONFIG_BACKGROUND_QUEUE
+ if ((vm_page_free_count + local_freed >= vm_page_free_target) &&
+ ((vm_page_background_mode < VM_PAGE_BG_LEVEL_2) || (vm_page_background_count <= vm_page_background_target)))
+#else
+ if (vm_page_free_count + local_freed >= vm_page_free_target)
+#endif
+ {
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
}
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ vm_page_unlock_queues();
+
if (local_freeq) {
- vm_page_unlock_queues();
VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
vm_page_free_count, local_freed, delayed_unlock_limit, 2);
local_freeq = NULL;
local_freed = 0;
- vm_page_lock_queues();
}
+ vm_consider_waking_compactor_swapper();
+
+ vm_page_lock_queues();
+
/*
* make sure the pageout I/O threads are running
* throttled in case there are still requests
vm_page_inactive_count +
vm_page_speculative_count);
if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
- !queue_empty(&vm_page_queue_active)) {
+ !vm_page_queue_empty(&vm_page_queue_active)) {
/*
* inactive target still not met... keep going
* until we get the queues balanced...
return_from_scan:
assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
- VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
vm_pageout_inactive, vm_pageout_inactive_used, 0, 0);
- VM_DEBUG_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
vm_pageout_speculative_clean, vm_pageout_inactive_clean,
vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
* If the purge succeeds, go back to the top and reevalute
* the new memory situation.
*/
- pressure_level = memorystatus_vm_pressure_level;
+
assert (available_for_purge>=0);
+ force_purge = 0; /* no force-purging */
- if (available_for_purge
- || pressure_level > kVMPressureNormal
- ) {
- int force_purge;
-
- if (object != NULL) {
- vm_object_unlock(object);
- object = NULL;
- }
+#if VM_PRESSURE_EVENTS
+ pressure_level = memorystatus_vm_pressure_level;
- VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
- memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
+ if (pressure_level > kVMPressureNormal) {
- force_purge = 0; /* no force-purging */
if (pressure_level >= kVMPressureCritical) {
force_purge = memorystatus_purge_on_critical;
} else if (pressure_level >= kVMPressureUrgent) {
force_purge = memorystatus_purge_on_urgent;
} else if (pressure_level >= kVMPressureWarning) {
force_purge = memorystatus_purge_on_warning;
- } else {
- force_purge = 0;
}
- if (vm_purgeable_object_purge_one(force_purge)) {
+ }
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (available_for_purge || force_purge) {
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+
+ memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
+ VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
+ if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
+ vm_pageout_purged_objects++;
VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
continue;
VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
}
- if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
+
+ if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
/*
* try to pull pages from the aging bins...
* see vm_page.h for an explanation of how
* this mechanism works
*/
struct vm_speculative_age_q *aq;
- mach_timespec_t ts_fully_aged;
boolean_t can_steal = FALSE;
int num_scanned_queues;
aq = &vm_page_queue_speculative[speculative_steal_index];
num_scanned_queues = 0;
- while (queue_empty(&aq->age_q) &&
+ while (vm_page_queue_empty(&aq->age_q) &&
num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
speculative_steal_index++;
if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
vm_page_speculative_count_drift_max = vm_page_speculative_count;
vm_page_speculative_count_drifts++;
-#if 6553678
- Debugger("vm_pageout_scan: no speculative pages");
-#endif
+#if DEVELOPMENT || DEBUG
+ panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
+#endif /* DEVELOPMENT || DEBUG */
/* readjust... */
vm_page_speculative_count = 0;
/* ... and continue */
if (vm_page_speculative_count > vm_page_speculative_target)
can_steal = TRUE;
else {
- ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
- ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
- * 1000 * NSEC_PER_USEC;
+ if (!delay_speculative_age) {
+ mach_timespec_t ts_fully_aged;
- ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+ ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
+ ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
+ * 1000 * NSEC_PER_USEC;
- clock_sec_t sec;
- clock_nsec_t nsec;
- clock_get_system_nanotime(&sec, &nsec);
- ts.tv_sec = (unsigned int) sec;
- ts.tv_nsec = nsec;
+ ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
- if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
- can_steal = TRUE;
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
+ can_steal = TRUE;
+ else
+ delay_speculative_age++;
+ } else {
+ delay_speculative_age++;
+ if (delay_speculative_age == DELAY_SPECULATIVE_AGE)
+ delay_speculative_age = 0;
+ }
}
if (can_steal == TRUE)
- vm_page_speculate_ageit(aq);
+ vm_page_speculate_ageit(aq);
}
- if (queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0 &&
+ ((vm_page_background_mode == VM_PAGE_BG_DISABLED) || (vm_page_background_count <= vm_page_background_target)))
+#else
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0)
+#endif
+ {
int pages_evicted;
if (object != NULL) {
if (cache_evict_throttle)
cache_evict_throttle--;
+#if CONFIG_JETSAM
+ /*
+ * don't let the filecache_min fall below 15% of available memory
+ * on systems with an active compressor that isn't nearing its
+ * limits w/r to accepting new data
+ *
+ * on systems w/o the compressor/swapper, the filecache is always
+ * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
+ * since most (if not all) of the anonymous pages are in the
+ * throttled queue (which isn't counted as available) which
+ * effectively disables this filter
+ */
+ if (vm_compressor_low_on_space())
+ vm_page_filecache_min = 0;
+ else
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 7);
+#else
+ /*
+ * don't let the filecache_min fall below 33% of available memory...
+ */
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 3);
+#endif
+ if (vm_page_free_count < (vm_page_free_reserved / 4))
+ vm_page_filecache_min = 0;
exceeded_burst_throttle = FALSE;
/*
* within the last vm_pageout_burst_inactive_throttle iterations
* 3) Flow control - default pageout queue is full
*/
- if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_anonymous) && queue_empty(&sq->age_q)) {
+ if (vm_page_queue_empty(&vm_page_queue_inactive) &&
+ vm_page_queue_empty(&vm_page_queue_anonymous) &&
+ vm_page_queue_empty(&sq->age_q)) {
vm_pageout_scan_empty_throttle++;
msecs = vm_pageout_empty_wait;
goto vm_pageout_scan_delay;
goto vm_pageout_scan_delay;
} else if (VM_PAGE_Q_THROTTLED(iq) &&
- VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
+ VM_DYNAMIC_PAGING_ENABLED()) {
clock_sec_t sec;
clock_nsec_t nsec;
case FCS_IDLE:
if ((vm_page_free_count + local_freed) < vm_page_free_target) {
- if (vm_page_pageable_external_count > vm_page_filecache_min && !queue_empty(&vm_page_queue_inactive)) {
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ if (local_freeq) {
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 3);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 3);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ thread_yield_internal(1);
+
+ vm_page_lock_queues();
+
+ if (!VM_PAGE_Q_THROTTLED(iq)) {
+ vm_pageout_scan_yield_unthrottled++;
+ continue;
+ }
+ if (vm_page_pageable_external_count > vm_page_filecache_min &&
+ !vm_page_queue_empty(&vm_page_queue_inactive)) {
anons_grabbed = ANONS_GRABBED_LIMIT;
+ vm_pageout_scan_throttle_deferred++;
goto consider_inactive;
}
if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) && vm_page_active_count)
}
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ vm_page_unlock_queues();
+
if (local_freeq) {
- vm_page_unlock_queues();
VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
vm_page_free_count, local_freed, delayed_unlock_limit, 3);
local_freeq = NULL;
local_freed = 0;
- vm_page_lock_queues();
+ }
+ vm_consider_waking_compactor_swapper();
- if (flow_control.state == FCS_DELAYED &&
- !VM_PAGE_Q_THROTTLED(iq)) {
- flow_control.state = FCS_IDLE;
- goto consider_inactive;
- }
+ vm_page_lock_queues();
+
+ if (flow_control.state == FCS_DELAYED &&
+ !VM_PAGE_Q_THROTTLED(iq)) {
+ flow_control.state = FCS_IDLE;
+ goto consider_inactive;
}
if (vm_page_free_count >= vm_page_free_target) {
vm_pageout_scan_throttle++;
iq->pgo_throttled = TRUE;
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
- vm_consider_waking_compactor_swapper();
-
assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
counter(c_vm_pageout_scan_block++);
* Choose a victim.
*/
while (1) {
+ uint32_t inactive_external_count;
+
+#if CONFIG_BACKGROUND_QUEUE
+ ignore_reference = FALSE;
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
m = NULL;
+ m_object = VM_OBJECT_NULL;
- if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) {
+ if (VM_DYNAMIC_PAGING_ENABLED()) {
assert(vm_page_throttled_count == 0);
- assert(queue_empty(&vm_page_queue_throttled));
+ assert(vm_page_queue_empty(&vm_page_queue_throttled));
+ }
+
+
+#if CONFIG_SECLUDED_MEMORY
+ if ((secluded_aging_policy ==
+ SECLUDED_AGING_AFTER_INACTIVE) &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ /*
+ * SECLUDED_AGING_AFTER_INACTIVE:
+ * Secluded pages have already been aged
+ * through the active and inactive queues, and
+ * we now have too many of them, so let's
+ * balance that queue by considering reclaiming
+ * the oldest page in the secluded queue.
+ */
+ assert(!vm_page_queue_empty(&vm_page_queue_secluded));
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_secluded);
+ if (m->vm_page_object == 0) {
+ /*
+ * It's already a free page:
+ * just move it to a free queue.
+ */
+ vm_page_queues_remove(m, TRUE);
+ assert(m->busy);
+ assert(m->pageq.next == 0);
+ assert(m->pageq.prev == 0);
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+ goto done_with_inactivepage;
+ }
+ /*
+ * Not a free page: we've found our next
+ * "victim".
+ */
+ break;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
+ vm_object_t bg_m_object = NULL;
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
+
+ bg_m_object = VM_PAGE_OBJECT(m);
+
+ if (!VM_PAGE_PAGEABLE(m)) {
+ /*
+ * This page is on the background queue
+ * but not on a pageable queue. This is
+ * likely a transient state and whoever
+ * took it out of its pageable queue
+ * will likely put it back on a pageable
+ * queue soon but we can't deal with it
+ * at this point, so let's ignore this
+ * page.
+ */
+ } else if (force_anonymous == FALSE || bg_m_object->internal) {
+ ignore_reference = TRUE;
+
+ if (bg_m_object->internal)
+ vm_pageout_considered_bq_internal++;
+ else
+ vm_pageout_considered_bq_external++;
+
+ break;
+ }
}
+#endif
+
/*
* The most eligible pages are ones we paged in speculatively,
* but which have not yet been touched.
*/
- if (!queue_empty(&sq->age_q) ) {
- m = (vm_page_t) queue_first(&sq->age_q);
+ if (!vm_page_queue_empty(&sq->age_q) && force_anonymous == FALSE) {
+ m = (vm_page_t) vm_page_queue_first(&sq->age_q);
- page_prev_state = PAGE_STATE_SPECULATIVE;
+ assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
break;
}
/*
* Try a clean-queue inactive page.
*/
- if (!queue_empty(&vm_page_queue_cleaned)) {
- m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
- page_prev_state = PAGE_STATE_CLEAN;
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
break;
}
grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
+ inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
- if (vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) {
+ if ((vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) ||
+ ((inactive_external_count < vm_page_anonymous_count) && (inactive_external_count < (vm_page_pageable_external_count / 3)))) {
grab_anonymous = TRUE;
anons_grabbed = 0;
}
+#if CONFIG_JETSAM
+ /* If the file-backed pool has accumulated
+ * significantly more pages than the jetsam
+ * threshold, prefer to reclaim those
+ * inline to minimise compute overhead of reclaiming
+ * anonymous pages.
+ * This calculation does not account for the CPU local
+ * external page queues, as those are expected to be
+ * much smaller relative to the global pools.
+ */
+ if (grab_anonymous) {
+ if (vm_page_pageable_external_count >
+ vm_page_filecache_min) {
+ if ((vm_page_pageable_external_count *
+ vm_pageout_memorystatus_fb_factor_dr) >
+ (memorystatus_available_pages_critical *
+ vm_pageout_memorystatus_fb_factor_nr)) {
+ grab_anonymous = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_grab_anon_overrides++;
+#endif
+ }
+ }
+#if DEVELOPMENT || DEBUG
+ if (grab_anonymous) {
+ vm_grab_anon_nops++;
- if (grab_anonymous == TRUE && vm_compression_available() == FALSE)
- grab_anonymous = FALSE;
-
- if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || queue_empty(&vm_page_queue_anonymous)) {
+ }
+#endif
+ }
+#endif /* CONFIG_JETSAM */
- if ( !queue_empty(&vm_page_queue_inactive) ) {
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
+
+ if ( !vm_page_queue_empty(&vm_page_queue_inactive) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
- page_prev_state = PAGE_STATE_INACTIVE;
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
anons_grabbed = 0;
+ if (vm_page_pageable_external_count < vm_page_filecache_min) {
+ if ((++reactivated_this_call % 100))
+ goto must_activate_page;
+ /*
+ * steal 1% of the file backed pages even if
+ * we are under the limit that has been set
+ * for a healthy filecache
+ */
+ }
break;
}
}
- if ( !queue_empty(&vm_page_queue_anonymous) ) {
- m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+ if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
- page_prev_state = PAGE_STATE_ANONYMOUS;
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
anons_grabbed++;
break;
vm_page_lock_queues();
delayed_unlock = 1;
+ force_anonymous = FALSE;
+
if ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target)
goto Restart;
+ if (!vm_page_queue_empty(&sq->age_q))
+ goto Restart;
+
panic("vm_pageout: no victim");
/* NOTREACHED */
}
+ assert(VM_PAGE_PAGEABLE(m));
+ m_object = VM_PAGE_OBJECT(m);
force_anonymous = FALSE;
+ page_prev_q_state = m->vm_page_q_state;
+ requeue_insert_first = FALSE;
/*
* we just found this page on one of our queues...
* it can't also be on the pageout queue, so safe
- * to call VM_PAGE_QUEUES_REMOVE
+ * to call vm_page_queues_remove
*/
- assert(!m->pageout_queue);
-
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m, TRUE);
assert(!m->laundry);
assert(!m->private);
assert(!m->fictitious);
- assert(m->object != kernel_object);
- assert(m->phys_page != vm_page_guard_addr);
+ assert(m_object != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
- if (page_prev_state != PAGE_STATE_SPECULATIVE)
+ if (page_prev_q_state != VM_PAGE_ON_SPECULATIVE_Q &&
+ page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)
vm_pageout_stats[vm_pageout_stat_now].considered++;
DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
* with the same object... if so, we've
* already got the lock
*/
- if (m->object != object) {
+ if (m_object != object) {
/*
* the object associated with candidate page is
* different from the one we were just working
* the queue... clumps of pages associated with the same
* object are fairly typical on the inactive and active queues
*/
- if (!vm_object_lock_try_scan(m->object)) {
+ if (!vm_object_lock_try_scan(m_object)) {
vm_page_t m_want = NULL;
vm_pageout_inactive_nolock++;
- if (page_prev_state == PAGE_STATE_CLEAN)
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_nolock++;
- if (page_prev_state == PAGE_STATE_SPECULATIVE)
- page_prev_state = PAGE_STATE_INACTIVE_FIRST;
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ requeue_insert_first = TRUE;
- pmap_clear_reference(m->phys_page);
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
m->reference = FALSE;
/*
* is possible for the value to be a bit non-determistic, but that's ok
* since it's only used as a hint
*/
- m->object->scan_collisions++;
-
- if ( !queue_empty(&sq->age_q) )
- m_want = (vm_page_t) queue_first(&sq->age_q);
- else if ( !queue_empty(&vm_page_queue_cleaned))
- m_want = (vm_page_t) queue_first(&vm_page_queue_cleaned);
- else if (anons_grabbed >= ANONS_GRABBED_LIMIT || queue_empty(&vm_page_queue_anonymous))
- m_want = (vm_page_t) queue_first(&vm_page_queue_inactive);
- else if ( !queue_empty(&vm_page_queue_anonymous))
- m_want = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+ m_object->scan_collisions = 1;
+
+ if ( !vm_page_queue_empty(&sq->age_q) )
+ m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+ else if ( !vm_page_queue_empty(&vm_page_queue_cleaned))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ else if ( !vm_page_queue_empty(&vm_page_queue_inactive) &&
+ (anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ else if ( !vm_page_queue_empty(&vm_page_queue_anonymous))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
/*
* this is the next object we're going to be interested in
* returns control
*/
if (m_want)
- vm_pageout_scan_wants_object = m_want->object;
+ vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
/*
* force us to dump any collected free pages
goto requeue_page;
}
- object = m->object;
+ object = m_object;
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
try_failed = FALSE;
}
+ assert(m_object == object);
+ assert(VM_PAGE_OBJECT(m) == m_object);
+
if (catch_up_count)
catch_up_count--;
*/
vm_pageout_inactive_busy++;
- if (page_prev_state == PAGE_STATE_CLEAN)
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_busy++;
requeue_page:
- switch (page_prev_state) {
-
- case PAGE_STATE_SPECULATIVE:
- vm_page_speculate(m, FALSE);
- break;
-
- case PAGE_STATE_ANONYMOUS:
- case PAGE_STATE_CLEAN:
- case PAGE_STATE_INACTIVE:
- VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
- break;
-
- case PAGE_STATE_INACTIVE_FIRST:
- VM_PAGE_ENQUEUE_INACTIVE(m, TRUE);
- break;
+ if (requeue_insert_first)
+ vm_page_enqueue_inactive(m, TRUE);
+ else
+ vm_page_enqueue_inactive(m, FALSE);
+#if CONFIG_BACKGROUND_QUEUE
+ if (ignore_reference == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
}
+#endif
goto done_with_inactivepage;
}
if (m->tabled)
vm_page_remove(m, TRUE);
- assert(m->pageq.next == NULL &&
- m->pageq.prev == NULL);
- m->pageq.next = (queue_entry_t)local_freeq;
+ assert(m->pageq.next == 0 && m->pageq.prev == 0);
+ m->snext = local_freeq;
local_freeq = m;
local_freed++;
- if (page_prev_state == PAGE_STATE_SPECULATIVE)
+#if CONFIG_SECLUDED_MEMORY
+ if (page_prev_q_state == VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_freed_from_secluded++;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_pageout_freed_from_speculative++;
- else if (page_prev_state == PAGE_STATE_CLEAN)
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_freed_from_cleaned++;
else
vm_pageout_freed_from_inactive_clean++;
- if (page_prev_state != PAGE_STATE_SPECULATIVE)
+ if (page_prev_q_state != VM_PAGE_ON_SPECULATIVE_Q &&
+ page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)
vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
+ inactive_burst_count = 0;
goto done_with_inactivepage;
}
/*
if (object->purgable == VM_PURGABLE_EMPTY) {
if (m->pmapped == TRUE) {
/* unmap the page */
- refmod_state = pmap_disconnect(m->phys_page);
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
goto reclaim_page;
}
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
/*
* With the VM compressor, the cost of
* reclaiming a page is much lower (no I/O),
/* just stick it back on! */
reactivated_this_call++;
- if (page_prev_state == PAGE_STATE_CLEAN)
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_volatile_reactivated++;
goto reactivate_page;
refmod_state = -1;
if (m->reference == FALSE && m->pmapped == TRUE) {
- refmod_state = pmap_get_refmod(m->phys_page);
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_REFERENCED)
m->reference = TRUE;
}
/*
- * if (m->cleaning && !m->pageout)
+ * if (m->cleaning && !m->free_when_done)
* If already cleaning this page in place and it hasn't
* been recently referenced, just pull off the queue.
* We can leave the page mapped, and upl_commit_range
* m->cleaning == TRUE
* and we'll handle it here
*
- * if (m->pageout && !m->cleaning)
+ * if (m->free_when_done && !m->cleaning)
* an msync INVALIDATE is in progress...
* this page has been marked for destruction
* after it has been cleaned,
* where 'cleaning' will be set...
* just leave it off the paging queues
*
- * if (m->pageout && m->clenaing)
+ * if (m->free_when_done && m->clenaing)
* an msync INVALIDATE is in progress
* and the UPL has already gathered this page...
* just leave it off the paging queues
*/
/*
- * page with m->pageout and still on the queues means that an
+ * page with m->free_when_done and still on the queues means that an
* MS_INVALIDATE is in progress on this page... leave it alone
*/
- if (m->pageout) {
+ if (m->free_when_done) {
goto done_with_inactivepage;
}
if (m->reference || m->dirty) {
/* deal with a rogue "reusable" page */
- VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
}
- if (m->reference && !m->no_cache) {
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target > 0 &&
+ m_object->eligible_for_secluded &&
+ secluded_aging_policy == SECLUDED_AGING_FIFO) {
+ /*
+ * SECLUDED_AGING_FIFO:
+ * This victim page is eligible for the secluded pool
+ * and we're not aging secluded pages, so let's not
+ * reactivate it if it's been re-referenced.
+ * Later on, we'll move it to the secluded queue
+ * instead of freeing it.
+ */
+ ignore_reference_secluded = TRUE;
+ } else {
+ ignore_reference_secluded = FALSE;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ if (!m->no_cache &&
+#if CONFIG_BACKGROUND_QUEUE
+ ignore_reference == FALSE &&
+#endif
+#if CONFIG_SECLUDED_MEMORY
+ ignore_reference_secluded == FALSE &&
+#endif /* CONFIG_SECLUDED_MEMORY */
+ (m->reference ||
+ (m->xpmapped && !object->internal && (vm_page_xpmapped_external_count < (vm_page_external_count / 4))))) {
/*
* The page we pulled off the inactive list has
* been referenced. It is possible for other
} else {
uint32_t isinuse;
- if (page_prev_state == PAGE_STATE_CLEAN)
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reference_reactivated++;
reactivate_page:
vm_page_deactivate(m);
vm_pageout_inactive_deactivated++;
} else {
+must_activate_page:
/*
* The page was/is being used, so put back on active list.
*/
vm_page_activate(m);
VM_STAT_INCR(reactivations);
+ inactive_burst_count = 0;
}
-
- if (page_prev_state == PAGE_STATE_CLEAN)
+#if CONFIG_BACKGROUND_QUEUE
+ if (ignore_reference == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
+ }
+#endif
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reactivated++;
+#if CONFIG_SECLUDED_MEMORY
+ if (page_prev_q_state == VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_secluded_reactivated++;
+#endif /* CONFIG_SECLUDED_MEMORY */
vm_pageout_inactive_used++;
* the dirty bit.
*/
if ((refmod_state == -1) && !m->dirty && m->pmapped) {
- refmod_state = pmap_get_refmod(m->phys_page);
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
}
- forced_reclaim = TRUE;
- } else {
- forced_reclaim = FALSE;
}
XPR(XPR_VM_PAGEOUT,
}
}
throttle_inactive:
- if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
object->internal && m->dirty &&
(object->purgable == VM_PURGABLE_DENY ||
object->purgable == VM_PURGABLE_NONVOLATILE ||
object->purgable == VM_PURGABLE_VOLATILE)) {
- queue_enter(&vm_page_queue_throttled, m,
- vm_page_t, pageq);
- m->throttled = TRUE;
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_throttled, m,
+ vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
vm_page_throttled_count++;
vm_pageout_scan_reclaimed_throttled++;
+ inactive_burst_count = 0;
goto done_with_inactivepage;
}
if (inactive_throttled == TRUE) {
* that we can try to find clean pages in the active/inactive queues before
* deciding to jetsam a process
*/
- vm_pageout_scan_inactive_throttled_external++;
+ vm_pageout_scan_inactive_throttled_external++;
- queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
- m->active = TRUE;
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
vm_page_active_count++;
- if (m->object->internal) {
- vm_page_pageable_internal_count++;
- } else {
- vm_page_pageable_external_count++;
- }
+ vm_page_pageable_external_count++;
vm_pageout_adjust_io_throttles(iq, eq, FALSE);
object = VM_OBJECT_NULL;
vm_page_unlock_queues();
- VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
/* Kill first suitable process */
panic("vm_pageout_scan: Jetsam request failed\n");
}
- VM_DEBUG_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
vm_pageout_inactive_external_forced_jetsam_count++;
vm_page_lock_queues();
#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
force_anonymous = TRUE;
#endif
+ inactive_burst_count = 0;
goto done_with_inactivepage;
} else {
- if (page_prev_state == PAGE_STATE_SPECULATIVE)
- page_prev_state = PAGE_STATE_INACTIVE;
-
vm_pageout_scan_inactive_throttled_internal++;
- goto requeue_page;
+ goto must_activate_page;
}
}
* of likely usage of the page.
*/
if (m->pmapped == TRUE) {
+ int pmap_options;
- if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE || object->internal == FALSE) {
- refmod_state = pmap_disconnect_options(m->phys_page, 0, NULL);
+ /*
+ * Don't count this page as going into the compressor
+ * if any of these are true:
+ * 1) compressed pager isn't enabled
+ * 2) Freezer enabled device with compressed pager
+ * backend (exclusive use) i.e. most of the VM system
+ * (including vm_pageout_scan) has no knowledge of
+ * the compressor
+ * 3) This page belongs to a file and hence will not be
+ * sent into the compressor
+ */
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
+ object->internal == FALSE) {
+ pmap_options = 0;
+ } else if (m->dirty || m->precious) {
+ /*
+ * VM knows that this page is dirty (or
+ * precious) and needs to be compressed
+ * rather than freed.
+ * Tell the pmap layer to count this page
+ * as "compressed".
+ */
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
} else {
- refmod_state = pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
+ /*
+ * VM does not know if the page needs to
+ * be preserved but the pmap layer might tell
+ * us if any mapping has "modified" it.
+ * Let's the pmap layer to count this page
+ * as compressed if and only if it has been
+ * modified.
+ */
+ pmap_options =
+ PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
}
-
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
+ pmap_options,
+ NULL);
if (refmod_state & VM_MEM_MODIFIED) {
SET_PAGE_DIRTY(m, FALSE);
}
*/
if (!m->dirty && !m->precious) {
- if (page_prev_state == PAGE_STATE_SPECULATIVE)
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
vm_pageout_speculative_clean++;
else {
- if (page_prev_state == PAGE_STATE_ANONYMOUS)
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
vm_pageout_inactive_anonymous++;
- else if (page_prev_state == PAGE_STATE_CLEAN)
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
vm_pageout_cleaned_reclaimed++;
- if (m->was_dirty) {
- /* page on clean queue used to be dirty; we should increment the vm_stat pageout count here */
- VM_STAT_INCR(pageouts);
- DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
- }
vm_pageout_inactive_clean++;
}
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target > 0 &&
+ !m->fictitious &&
+ m_object->eligible_for_secluded &&
+ num_tasks_can_use_secluded_mem == 0 &&
+ (secluded_aging_policy == SECLUDED_AGING_FIFO ||
+ ((secluded_aging_policy ==
+ SECLUDED_AGING_AFTER_INACTIVE) &&
+ (page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)))) {
+ assert(page_prev_q_state != VM_PAGE_ON_SECLUDED_Q);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ LCK_MTX_ASSERT(&vm_page_queue_lock,
+ LCK_MTX_ASSERT_OWNED);
+ vm_page_queue_enter(&vm_page_queue_secluded,
+ m,
+ vm_page_t,
+ pageq);
+ m->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_object_unlock(m_object);
+ object = VM_OBJECT_NULL;
+ vm_page_secluded_count++;
+ vm_page_secluded_count_inuse++;
+ assert(!m_object->internal);
+// vm_page_pageable_external_count++;
+ m = VM_PAGE_NULL;
+ goto done_with_inactivepage;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
/*
* OK, at this point we have found a page we are going to free.
*/
+#if CONFIG_PHANTOM_CACHE
+ if (!object->internal)
+ vm_phantom_cache_add_ghost(m);
+#endif
goto reclaim_page;
}
if (inactive_throttled == TRUE)
goto throttle_inactive;
-#if VM_PRESSURE_EVENTS
- vm_pressure_response();
-#endif /* VM_PRESSURE_EVENTS */
-
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+
/*
- * do NOT set the pageout bit!
- * sure, we might need free pages, but this page is going to take time to become free
- * anyway, so we may as well put it on the clean queue first and take it from there later
- * if necessary. that way, we'll ensure we don't free up too much. -mj
+ * If Jetsam is enabled, then the sending
+ * of memory pressure notifications is handled
+ * from the same thread that takes care of high-water
+ * and other jetsams i.e. the memorystatus_thread.
*/
- vm_pageout_cluster(m, FALSE);
- if (page_prev_state == PAGE_STATE_ANONYMOUS)
+#else /* CONFIG_JETSAM */
+
+ vm_pressure_response();
+
+#endif /* CONFIG_JETSAM */
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
vm_pageout_inactive_anonymous++;
if (object->internal)
vm_pageout_inactive_dirty_internal++;
else
vm_pageout_inactive_dirty_external++;
+ /*
+ * do NOT set the pageout bit!
+ * sure, we might need free pages, but this page is going to take time to become free
+ * anyway, so we may as well put it on the clean queue first and take it from there later
+ * if necessary. that way, we'll ensure we don't free up too much. -mj
+ */
+ vm_pageout_cluster(m, FALSE, FALSE);
done_with_inactivepage:
- inactive_burst_count = 0;
if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
+ boolean_t need_delay = TRUE;
if (object != NULL) {
vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_object_unlock(object);
object = NULL;
}
+ vm_page_unlock_queues();
+
if (local_freeq) {
- vm_page_unlock_queues();
VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
vm_page_free_count, local_freed, delayed_unlock_limit, 4);
local_freeq = NULL;
local_freed = 0;
- vm_page_lock_queues();
- } else
+ need_delay = FALSE;
+ }
+ vm_consider_waking_compactor_swapper();
+
+ vm_page_lock_queues();
+
+ if (need_delay == TRUE)
lck_mtx_yield(&vm_page_queue_lock);
delayed_unlock = 1;
}
vm_pageout_considered_page++;
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
- vm_consider_waking_compactor_swapper();
-
/*
* back to top of pageout scan loop
*/
{
int free_after_reserve;
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT))
vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
if (vm_page_free_target < vm_page_free_min + 5)
vm_page_free_target = vm_page_free_min + 5;
- vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
- vm_page_creation_throttle = vm_page_free_target * 3;
+ vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
}
/*
DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
vm_pageout_scan_event_counter++;
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ vm_pageout_running = TRUE;
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
vm_pageout_scan();
/*
* we hold both the vm_page_queue_free_lock
assert(vm_page_free_wanted_privileged == 0);
assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
+ vm_pageout_running = FALSE;
+ if (vm_pageout_waiter) {
+ vm_pageout_waiter = FALSE;
+ thread_wakeup((event_t)&vm_pageout_waiter);
+ }
+
lck_mtx_unlock(&vm_page_queue_free_lock);
vm_page_unlock_queues();
/*NOTREACHED*/
}
-
-#ifdef FAKE_DEADLOCK
-
-#define FAKE_COUNT 5000
-
-int internal_count = 0;
-int fake_deadlock = 0;
-
-#endif
-
-static void
-vm_pageout_iothread_continue(struct vm_pageout_queue *q)
+kern_return_t
+vm_pageout_wait(uint64_t deadline)
{
- vm_page_t m = NULL;
- vm_object_t object;
- vm_object_offset_t offset;
- memory_object_t pager;
- thread_t self = current_thread();
-
- if ((vm_pageout_internal_iothread != THREAD_NULL)
- && (self == vm_pageout_external_iothread )
- && (self->options & TH_OPT_VMPRIV))
- self->options &= ~TH_OPT_VMPRIV;
-
- vm_page_lockspin_queues();
-
- while ( !queue_empty(&q->pgo_pending) ) {
-
- q->pgo_busy = TRUE;
- queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
- if (m->object->object_slid) {
- panic("slid page %p not allowed on this path\n", m);
- }
- VM_PAGE_CHECK(m);
- m->pageout_queue = FALSE;
- m->pageq.next = NULL;
- m->pageq.prev = NULL;
-
- /*
- * grab a snapshot of the object and offset this
- * page is tabled in so that we can relookup this
- * page after we've taken the object lock - these
- * fields are stable while we hold the page queues lock
- * but as soon as we drop it, there is nothing to keep
- * this page in this object... we hold an activity_in_progress
- * on this object which will keep it from terminating
- */
- object = m->object;
- offset = m->offset;
-
- vm_page_unlock_queues();
-
-#ifdef FAKE_DEADLOCK
- if (q == &vm_pageout_queue_internal) {
- vm_offset_t addr;
- int pg_count;
-
- internal_count++;
-
- if ((internal_count == FAKE_COUNT)) {
-
- pg_count = vm_page_free_count + vm_page_free_reserved;
-
- if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
- kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
- }
- internal_count = 0;
- fake_deadlock++;
- }
- }
-#endif
- vm_object_lock(object);
-
- m = vm_page_lookup(object, offset);
-
- if (m == NULL ||
- m->busy || m->cleaning || m->pageout_queue || !m->laundry) {
- /*
- * it's either the same page that someone else has
- * started cleaning (or it's finished cleaning or
- * been put back on the pageout queue), or
- * the page has been freed or we have found a
- * new page at this offset... in all of these cases
- * we merely need to release the activity_in_progress
- * we took when we put the page on the pageout queue
- */
- vm_object_activity_end(object);
- vm_object_unlock(object);
-
- vm_page_lockspin_queues();
- continue;
- }
- if (!object->pager_initialized) {
-
- /*
- * If there is no memory object for the page, create
- * one and hand it to the default pager.
- */
-
- if (!object->pager_initialized)
- vm_object_collapse(object,
- (vm_object_offset_t) 0,
- TRUE);
- if (!object->pager_initialized)
- vm_object_pager_create(object);
- if (!object->pager_initialized) {
- /*
- * Still no pager for the object.
- * Reactivate the page.
- *
- * Should only happen if there is no
- * default pager.
- */
- m->pageout = FALSE;
-
- vm_page_lockspin_queues();
-
- vm_pageout_throttle_up(m);
- vm_page_activate(m);
- vm_pageout_dirty_no_pager++;
-
- vm_page_unlock_queues();
-
- /*
- * And we are done with it.
- */
- vm_object_activity_end(object);
- vm_object_unlock(object);
-
- vm_page_lockspin_queues();
- continue;
- }
- }
- pager = object->pager;
-
- if (pager == MEMORY_OBJECT_NULL) {
- /*
- * This pager has been destroyed by either
- * memory_object_destroy or vm_object_destroy, and
- * so there is nowhere for the page to go.
- */
- if (m->pageout) {
- /*
- * Just free the page... VM_PAGE_FREE takes
- * care of cleaning up all the state...
- * including doing the vm_pageout_throttle_up
- */
- VM_PAGE_FREE(m);
- } else {
- vm_page_lockspin_queues();
-
- vm_pageout_throttle_up(m);
- vm_page_activate(m);
-
- vm_page_unlock_queues();
-
- /*
- * And we are done with it.
- */
- }
- vm_object_activity_end(object);
- vm_object_unlock(object);
-
- vm_page_lockspin_queues();
- continue;
- }
-#if 0
- /*
- * we don't hold the page queue lock
- * so this check isn't safe to make
- */
- VM_PAGE_CHECK(m);
-#endif
- /*
- * give back the activity_in_progress reference we
- * took when we queued up this page and replace it
- * it with a paging_in_progress reference that will
- * also hold the paging offset from changing and
- * prevent the object from terminating
- */
- vm_object_activity_end(object);
- vm_object_paging_begin(object);
- vm_object_unlock(object);
-
- /*
- * Send the data to the pager.
- * any pageout clustering happens there
- */
- memory_object_data_return(pager,
- m->offset + object->paging_offset,
- PAGE_SIZE,
- NULL,
- NULL,
- FALSE,
- FALSE,
- 0);
-
- vm_object_lock(object);
- vm_object_paging_end(object);
- vm_object_unlock(object);
-
- vm_pageout_io_throttle();
+ kern_return_t kr;
- vm_page_lockspin_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr); ) {
+ vm_pageout_waiter = TRUE;
+ if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
+ &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
+ (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
+ kr = KERN_OPERATION_TIMED_OUT;
+ }
}
- q->pgo_busy = FALSE;
- q->pgo_idle = TRUE;
-
- assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
- vm_page_unlock_queues();
+ lck_mtx_unlock(&vm_page_queue_free_lock);
- thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) q);
- /*NOTREACHED*/
+ return (kr);
}
vm_page_lockspin_queues();
- while ( !queue_empty(&q->pgo_pending) ) {
+ while ( !vm_page_queue_empty(&q->pgo_pending) ) {
q->pgo_busy = TRUE;
- queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
- if (m->object->object_slid) {
- panic("slid page %p not allowed on this path\n", m);
- }
- VM_PAGE_CHECK(m);
- m->pageout_queue = FALSE;
- m->pageq.next = NULL;
- m->pageq.prev = NULL;
+ vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
+ VM_PAGE_CHECK(m);
/*
* grab a snapshot of the object and offset this
* page is tabled in so that we can relookup this
* this page in this object... we hold an activity_in_progress
* on this object which will keep it from terminating
*/
- object = m->object;
+ object = VM_PAGE_OBJECT(m);
offset = m->offset;
+ if (object->object_slid) {
+ panic("slid page %p not allowed on this path\n", m);
+ }
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+
vm_page_unlock_queues();
vm_object_lock(object);
m = vm_page_lookup(object, offset);
if (m == NULL ||
- m->busy || m->cleaning || m->pageout_queue || !m->laundry) {
+ m->busy || m->cleaning || !m->laundry || (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
/*
* it's either the same page that someone else has
* started cleaning (or it's finished cleaning or
* memory_object_destroy or vm_object_destroy, and
* so there is nowhere for the page to go.
*/
- if (m->pageout) {
+ if (m->free_when_done) {
/*
* Just free the page... VM_PAGE_FREE takes
* care of cleaning up all the state...
uint32_t vm_compressor_failed;
+#define MAX_FREE_BATCH 32
+uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
+ * this thread.
+ */
+uint64_t vm_compressor_thread_runtime;
+
static void
vm_pageout_iothread_internal_continue(struct cq *cq)
{
struct vm_pageout_queue *q;
vm_page_t m = NULL;
- vm_object_t object;
- memory_object_t pager;
boolean_t pgo_draining;
vm_page_t local_q;
int local_cnt;
vm_page_t local_freeq = NULL;
int local_freed = 0;
int local_batch_size;
- kern_return_t retval;
KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
q = cq->q;
- local_batch_size = q->pgo_maxlaundry / (vm_compressor_thread_count * 4);
+ local_batch_size = q->pgo_maxlaundry / (vm_compressor_thread_count * 2);
+#if RECORD_THE_COMPRESSED_DATA
+ if (q->pgo_laundry)
+ c_compressed_record_init();
+#endif
while (TRUE) {
+ int pages_left_on_q = 0;
local_cnt = 0;
local_q = NULL;
KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
- KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, 0, 0, 0, 0, 0);
-
- while ( !queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
- queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ while ( !vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
+ vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
VM_PAGE_CHECK(m);
+
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+ m->laundry = FALSE;
- m->pageout_queue = FALSE;
- m->pageq.prev = NULL;
-
- m->pageq.next = (queue_entry_t)local_q;
+ m->snext = local_q;
local_q = m;
local_cnt++;
}
q->pgo_busy = TRUE;
- if ((pgo_draining = q->pgo_draining) == FALSE)
+ if ((pgo_draining = q->pgo_draining) == FALSE) {
vm_pageout_throttle_up_batch(q, local_cnt);
+ pages_left_on_q = q->pgo_laundry;
+ } else
+ pages_left_on_q = q->pgo_laundry - local_cnt;
vm_page_unlock_queues();
- KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+#if !RECORD_THE_COMPRESSED_DATA
+ if (pages_left_on_q >= local_batch_size && cq->id < (vm_compressor_thread_count - 1))
+ thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
+#endif
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
while (local_q) {
-
- m = local_q;
- local_q = (vm_page_t)m->pageq.next;
- m->pageq.next = NULL;
-
- if (m->object->object_slid) {
- panic("slid page %p not allowed on this path\n", m);
- }
-
- object = m->object;
- pager = object->pager;
-
- if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
-
- KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
-
- vm_object_lock(object);
- /*
- * If there is no memory object for the page, create
- * one and hand it to the compression pager.
- */
-
- if (!object->pager_initialized)
- vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
- if (!object->pager_initialized)
- vm_object_compressor_pager_create(object);
-
- if (!object->pager_initialized) {
- /*
- * Still no pager for the object.
- * Reactivate the page.
- *
- * Should only happen if there is no
- * compression pager
- */
- m->pageout = FALSE;
- m->laundry = FALSE;
- PAGE_WAKEUP_DONE(m);
-
- vm_page_lockspin_queues();
- vm_page_activate(m);
- vm_pageout_dirty_no_pager++;
- vm_page_unlock_queues();
-
- /*
- * And we are done with it.
- */
- vm_object_activity_end(object);
- vm_object_unlock(object);
+ KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
- continue;
- }
- pager = object->pager;
+ m = local_q;
+ local_q = m->snext;
+ m->snext = NULL;
- if (pager == MEMORY_OBJECT_NULL) {
- /*
- * This pager has been destroyed by either
- * memory_object_destroy or vm_object_destroy, and
- * so there is nowhere for the page to go.
- */
- if (m->pageout) {
- /*
- * Just free the page... VM_PAGE_FREE takes
- * care of cleaning up all the state...
- * including doing the vm_pageout_throttle_up
- */
- VM_PAGE_FREE(m);
- } else {
- m->laundry = FALSE;
- PAGE_WAKEUP_DONE(m);
+ if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m, FALSE) == KERN_SUCCESS) {
- vm_page_lockspin_queues();
- vm_page_activate(m);
- vm_page_unlock_queues();
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
- /*
- * And we are done with it.
- */
- }
- vm_object_activity_end(object);
- vm_object_unlock(object);
+ if (local_freed >= MAX_FREE_BATCH) {
- continue;
+ vm_page_free_list(local_freeq, TRUE);
+ local_freeq = NULL;
+ local_freed = 0;
}
- vm_object_unlock(object);
-
- KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
}
- while (vm_page_free_count < (vm_page_free_reserved - COMPRESSOR_FREE_RESERVED_LIMIT)) {
+#if !CONFIG_JETSAM
+ while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
kern_return_t wait_result;
int need_wakeup = 0;
}
lck_mtx_lock_spin(&vm_page_queue_free_lock);
- if (vm_page_free_count < (vm_page_free_reserved - COMPRESSOR_FREE_RESERVED_LIMIT)) {
-
+ if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+
if (vm_page_free_wanted_privileged++ == 0)
need_wakeup = 1;
wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
if (need_wakeup)
thread_wakeup((event_t)&vm_page_free_wanted);
- if (wait_result == THREAD_WAITING)
- thread_block(THREAD_CONTINUE_NULL);
- } else
- lck_mtx_unlock(&vm_page_queue_free_lock);
- }
- retval = vm_compressor_pager_put(pager, m->offset + object->paging_offset, m->phys_page, &cq->current_chead, cq->scratch_buf);
+ if (wait_result == THREAD_WAITING)
+
+ thread_block(THREAD_CONTINUE_NULL);
+ } else
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+#endif
+ }
+ if (local_freeq) {
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ if (pgo_draining == TRUE) {
+ vm_page_lockspin_queues();
+ vm_pageout_throttle_up_batch(q, local_cnt);
+ vm_page_unlock_queues();
+ }
+ }
+ KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ /*
+ * queue lock is held and our q is empty
+ */
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+
+ assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
+ vm_page_unlock_queues();
+
+ if (__improbable(vm_compressor_time_thread)) {
+ vm_compressor_thread_runtime = thread_get_runtime_self();
+ }
+
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
+ /*NOTREACHED*/
+}
+
+
+
+static void
+vm_pageout_immediate(vm_page_t m, boolean_t object_locked_by_caller)
+{
+ assert(vm_pageout_immediate_scratch_buf);
+
+ if (vm_pageout_compress_page(&vm_pageout_immediate_chead, vm_pageout_immediate_scratch_buf, m, object_locked_by_caller) == KERN_SUCCESS) {
+
+ vm_page_free_prepare_object(m, TRUE);
+ vm_page_release(m, TRUE);
+ }
+}
+
+
+kern_return_t
+vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m, boolean_t object_locked_by_caller)
+{
+ vm_object_t object;
+ memory_object_t pager;
+ int compressed_count_delta;
+ kern_return_t retval;
+
+ object = VM_PAGE_OBJECT(m);
+
+ if (object->object_slid) {
+ panic("slid page %p not allowed on this path\n", m);
+ }
+ assert(!m->free_when_done);
+ assert(!m->laundry);
+
+ pager = object->pager;
+
+ if (object_locked_by_caller == FALSE && (!object->pager_initialized || pager == MEMORY_OBJECT_NULL)) {
+
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
+
+ vm_object_lock(object);
+
+ /*
+ * If there is no memory object for the page, create
+ * one and hand it to the compression pager.
+ */
+
+ if (!object->pager_initialized)
+ vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
+ if (!object->pager_initialized)
+ vm_object_compressor_pager_create(object);
+
+ pager = object->pager;
- vm_object_lock(object);
- m->laundry = FALSE;
- m->pageout = FALSE;
+ if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+ /*
+ * Still no pager for the object,
+ * or the pager has been destroyed.
+ * Reactivate the page.
+ *
+ * Should only happen if there is no
+ * compression pager
+ */
+ PAGE_WAKEUP_DONE(m);
- if (retval == KERN_SUCCESS) {
+ vm_page_lockspin_queues();
+ vm_page_activate(m);
+ vm_pageout_dirty_no_pager++;
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
- vm_page_compressions_failing = FALSE;
+ return KERN_FAILURE;
+ }
+ vm_object_unlock(object);
- VM_STAT_INCR(compressions);
-
- if (m->tabled)
- vm_page_remove(m, TRUE);
- vm_object_activity_end(object);
- vm_object_unlock(object);
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
+ }
+ assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
- m->pageq.next = (queue_entry_t)local_freeq;
- local_freeq = m;
- local_freed++;
+ if (object_locked_by_caller == FALSE)
+ assert(object->activity_in_progress > 0);
- } else {
- PAGE_WAKEUP_DONE(m);
+ retval = vm_compressor_pager_put(
+ pager,
+ m->offset + object->paging_offset,
+ VM_PAGE_GET_PHYS_PAGE(m),
+ current_chead,
+ scratch_buf,
+ &compressed_count_delta);
- vm_page_lockspin_queues();
+ if (object_locked_by_caller == FALSE) {
+ vm_object_lock(object);
- vm_page_activate(m);
- vm_compressor_failed++;
+ assert(object->activity_in_progress > 0);
+ assert(VM_PAGE_OBJECT(m) == object);
+ }
- vm_page_compressions_failing = TRUE;
+ vm_compressor_pager_count(pager,
+ compressed_count_delta,
+ FALSE, /* shared_lock */
+ object);
- vm_page_unlock_queues();
+ assert( !VM_PAGE_WIRED(m));
- vm_object_activity_end(object);
- vm_object_unlock(object);
- }
- }
- if (local_freeq) {
- vm_page_free_list(local_freeq, TRUE);
-
- local_freeq = NULL;
- local_freed = 0;
- }
- if (pgo_draining == TRUE) {
- vm_page_lockspin_queues();
- vm_pageout_throttle_up_batch(q, local_cnt);
- vm_page_unlock_queues();
+ if (retval == KERN_SUCCESS) {
+ /*
+ * If the object is purgeable, its owner's
+ * purgeable ledgers will be updated in
+ * vm_page_remove() but the page still
+ * contributes to the owner's memory footprint,
+ * so account for it as such.
+ */
+ if (object->purgable != VM_PURGABLE_DENY &&
+ object->vo_purgeable_owner != NULL) {
+ /* one more compressed purgeable page */
+ vm_purgeable_compressed_update(object,
+ +1);
}
- }
- KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ VM_STAT_INCR(compressions);
+
+ if (m->tabled)
+ vm_page_remove(m, TRUE);
- /*
- * queue lock is held and our q is empty
- */
- q->pgo_busy = FALSE;
- q->pgo_idle = TRUE;
+ } else {
+ PAGE_WAKEUP_DONE(m);
- assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
- vm_page_unlock_queues();
+ vm_page_lockspin_queues();
- KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+ vm_page_activate(m);
+ vm_compressor_failed++;
- thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
- /*NOTREACHED*/
+ vm_page_unlock_queues();
+ }
+ if (object_locked_by_caller == FALSE) {
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+ }
+ return retval;
}
-
static void
vm_pageout_adjust_io_throttles(struct vm_pageout_queue *iq, struct vm_pageout_queue *eq, boolean_t req_lowpriority)
{
if (hibernate_cleaning_in_progress == TRUE)
req_lowpriority = FALSE;
- if ((DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) && iq->pgo_inited == TRUE && iq->pgo_lowpriority != req_lowpriority)
- set_iq = TRUE;
-
if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority)
set_eq = TRUE;
DTRACE_VM(laundryunthrottle);
}
if (set_iq == TRUE) {
- proc_set_task_policy_thread(kernel_task, iq->pgo_tid, TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+ proc_set_thread_policy_with_tid(kernel_task, iq->pgo_tid,
+ TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
iq->pgo_lowpriority = req_lowpriority;
}
if (set_eq == TRUE) {
- proc_set_task_policy_thread(kernel_task, eq->pgo_tid, TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+ proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
+ TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
eq->pgo_lowpriority = req_lowpriority;
}
self->options |= TH_OPT_VMPRIV;
- DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
- proc_set_task_policy_thread(kernel_task, self->thread_id, TASK_POLICY_EXTERNAL,
- TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
+ proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
+ TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
vm_page_lock_queues();
vm_page_unlock_queues();
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
- vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
- else
- vm_pageout_iothread_continue(&vm_pageout_queue_external);
+ vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
/*NOTREACHED*/
}
self->options |= TH_OPT_VMPRIV;
- if (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE) {
- DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
-
- proc_set_task_policy_thread(kernel_task, self->thread_id, TASK_POLICY_EXTERNAL,
- TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
- }
vm_page_lock_queues();
vm_pageout_queue_internal.pgo_tid = self->thread_id;
vm_page_unlock_queues();
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
- cq->q = &vm_pageout_queue_internal;
- cq->current_chead = NULL;
- cq->scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
+ if (vm_restricted_to_single_processor == TRUE)
+ thread_vm_bind_group_add();
- vm_pageout_iothread_internal_continue(cq);
- } else
- vm_pageout_iothread_continue(&vm_pageout_queue_internal);
+ vm_pageout_iothread_internal_continue(cq);
/*NOTREACHED*/
}
}
}
-
extern boolean_t memorystatus_manual_testing_on;
extern unsigned int memorystatus_level;
-
#if VM_PRESSURE_EVENTS
+boolean_t vm_pressure_events_enabled = FALSE;
+
void
vm_pressure_response(void)
{
-
vm_pressure_level_t old_level = kVMPressureNormal;
int new_level = -1;
+ unsigned int total_pages;
+ uint64_t available_memory = 0;
+
+ if (vm_pressure_events_enabled == FALSE)
+ return;
- uint64_t available_memory = (((uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY) * 100);
- memorystatus_level = (unsigned int) (available_memory / atop_64(max_mem));
+ available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
+
+
+ total_pages = (unsigned int) atop_64(max_mem);
+#if CONFIG_SECLUDED_MEMORY
+ total_pages -= vm_page_secluded_count;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
if (memorystatus_manual_testing_on) {
return;
if (new_level != -1) {
memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
- if (old_level != new_level) {
+ if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != new_level)) {
if (vm_pressure_thread_running == FALSE) {
thread_wakeup(&vm_pressure_thread);
}
- thread_wakeup(&vm_pressure_changed);
+
+ if (old_level != new_level) {
+ thread_wakeup(&vm_pressure_changed);
+ }
}
}
mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) {
#if !VM_PRESSURE_EVENTS
-
+
return KERN_FAILURE;
#else /* VM_PRESSURE_EVENTS */
#if VM_PRESSURE_EVENTS
void
vm_pressure_thread(void) {
- static boolean_t set_up_thread = FALSE;
+ static boolean_t thread_initialized = FALSE;
- if (set_up_thread) {
+ if (thread_initialized == TRUE) {
vm_pressure_thread_running = TRUE;
consider_vm_pressure_events();
vm_pressure_thread_running = FALSE;
}
- set_up_thread = TRUE;
+ thread_initialized = TRUE;
assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
thread_block((thread_continue_t)vm_pressure_thread);
}
* called once per-second via "compute_averages"
*/
void
-compute_pageout_gc_throttle()
+compute_pageout_gc_throttle(__unused void *arg)
{
if (vm_pageout_considered_page != vm_pageout_considered_page_last) {
stack_collect();
consider_machine_collect();
+ m_drain();
do {
if (consider_buffer_cache_collect != NULL) {
* consider_zone_gc should be last, because the other operations
* might return memory to zones.
*/
- consider_zone_gc(buf_large_zfree);
+ consider_zone_gc();
}
first_try = FALSE;
}
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+
+#define FBDP_TEST_COLLAPSE_COMPRESSOR 0
+#define FBDP_TEST_WIRE_AND_EXTRACT 0
+#define FBDP_TEST_PAGE_WIRE_OVERFLOW 0
+
+#if FBDP_TEST_COLLAPSE_COMPRESSOR
+extern boolean_t vm_object_collapse_compressor_allowed;
+#include <IOKit/IOLib.h>
+#endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
+
+#if FBDP_TEST_WIRE_AND_EXTRACT
+extern ledger_template_t task_ledger_template;
+#include <mach/mach_vm.h>
+extern ppnum_t vm_map_get_phys_page(vm_map_t map,
+ vm_offset_t offset);
+#endif /* FBDP_TEST_WIRE_AND_EXTRACT */
+
+
+void
+vm_set_restrictions()
+{
+ host_basic_info_data_t hinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+
+#define BSD_HOST 1
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+ assert(hinfo.max_cpus > 0);
+
+ if (hinfo.max_cpus <= 3) {
+ /*
+ * on systems with a limited number of CPUS, bind the
+ * 4 major threads that can free memory and that tend to use
+ * a fair bit of CPU under pressured conditions to a single processor.
+ * This insures that these threads don't hog all of the available CPUs
+ * (important for camera launch), while allowing them to run independently
+ * w/r to locks... the 4 threads are
+ * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
+ * vm_compressor_swap_trigger_thread (minor and major compactions),
+ * memorystatus_thread (jetsams).
+ *
+ * the first time the thread is run, it is responsible for checking the
+ * state of vm_restricted_to_single_processor, and if TRUE it calls
+ * thread_bind_master... someday this should be replaced with a group
+ * scheduling mechanism and KPI.
+ */
+ vm_restricted_to_single_processor = TRUE;
+ }
+}
+
void
vm_pageout(void)
* Set thread privileges.
*/
s = splsched();
+
thread_lock(self);
- self->priority = BASEPRI_PREEMPT - 1;
- set_sched_pri(self, self->priority);
+ self->options |= TH_OPT_VMPRIV;
+ sched_set_thread_base_priority(self, BASEPRI_PREEMPT - 1);
thread_unlock(self);
if (!self->reserved_stack)
self->reserved_stack = self->kernel_stack;
+ if (vm_restricted_to_single_processor == TRUE)
+ thread_vm_bind_group_add();
+
splx(s);
/*
if (vm_pageout_burst_inactive_throttle == 0)
vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
-#if !CONFIG_JETSAM
- vm_page_filecache_min = (uint32_t) (max_mem / PAGE_SIZE) / 20;
- if (vm_page_filecache_min < VM_PAGE_FILECACHE_MIN)
- vm_page_filecache_min = VM_PAGE_FILECACHE_MIN;
-#endif
-
/*
* Set kernel task to low backing store privileged
* status
vm_page_free_reserve(0);
- queue_init(&vm_pageout_queue_external.pgo_pending);
+ vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
vm_pageout_queue_external.pgo_laundry = 0;
vm_pageout_queue_external.pgo_idle = FALSE;
vm_pageout_queue_external.pgo_tid = -1;
vm_pageout_queue_external.pgo_inited = FALSE;
-
- queue_init(&vm_pageout_queue_internal.pgo_pending);
+ vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
vm_pageout_queue_internal.pgo_maxlaundry = 0;
vm_pageout_queue_internal.pgo_laundry = 0;
vm_pageout_queue_internal.pgo_idle = FALSE;
#endif
vm_object_reaper_init();
-
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
+
+
+ bzero(&vm_config, sizeof(vm_config));
+
+ switch(vm_compressor_mode) {
+
+ case VM_PAGER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
+
+ case VM_PAGER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ break;
+
+ case VM_PAGER_FREEZER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
+
+ case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.freezer_swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_NOT_CONFIGURED:
+ break;
+
+ default:
+ printf("unknown compressor mode - %x\n", vm_compressor_mode);
+ break;
+ }
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT)
vm_compressor_pager_init();
+#if VM_PRESSURE_EVENTS
+ vm_pressure_events_enabled = TRUE;
+#endif /* VM_PRESSURE_EVENTS */
+
+#if CONFIG_PHANTOM_CACHE
+ vm_phantom_cache_init();
+#endif
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ pmap_protect(kernel_pmap,
+ vm_page_fake_buckets_start,
+ vm_page_fake_buckets_end,
+ VM_PROT_READ);
+// *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+#if VM_OBJECT_TRACKING
+ vm_object_tracking_init();
+#endif /* VM_OBJECT_TRACKING */
+
+
+#if FBDP_TEST_COLLAPSE_COMPRESSOR
+ vm_object_size_t backing_size, top_size;
+ vm_object_t backing_object, top_object;
+ vm_map_offset_t backing_offset, top_offset;
+ unsigned char *backing_address, *top_address;
+ kern_return_t kr;
+
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR:\n");
+
+ /* create backing object */
+ backing_size = 15 * PAGE_SIZE;
+ backing_object = vm_object_allocate(backing_size);
+ assert(backing_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
+ backing_object);
+ /* map backing object */
+ backing_offset = 0;
+ kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
+ VM_FLAGS_ANYWHERE, backing_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ backing_address = (unsigned char *) backing_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped backing object %p at 0x%llx\n",
+ backing_object, (uint64_t) backing_offset);
+ /* populate with pages to be compressed in backing object */
+ backing_address[0x1*PAGE_SIZE] = 0xB1;
+ backing_address[0x4*PAGE_SIZE] = 0xB4;
+ backing_address[0x7*PAGE_SIZE] = 0xB7;
+ backing_address[0xa*PAGE_SIZE] = 0xBA;
+ backing_address[0xd*PAGE_SIZE] = 0xBD;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "backing_object %p\n", backing_object);
+ /* compress backing object */
+ vm_object_pageout(backing_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
+ backing_object);
+ /* wait for all the pages to be gone */
+ while (*(volatile int *)&backing_object->resident_page_count != 0)
+ IODelay(10);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
+ backing_object);
+ /* populate with pages to be resident in backing object */
+ backing_address[0x0*PAGE_SIZE] = 0xB0;
+ backing_address[0x3*PAGE_SIZE] = 0xB3;
+ backing_address[0x6*PAGE_SIZE] = 0xB6;
+ backing_address[0x9*PAGE_SIZE] = 0xB9;
+ backing_address[0xc*PAGE_SIZE] = 0xBC;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "backing_object %p\n", backing_object);
+ /* leave the other pages absent */
+ /* mess with the paging_offset of the backing_object */
+ assert(backing_object->paging_offset == 0);
+ backing_object->paging_offset = 0x3000;
+
+ /* create top object */
+ top_size = 9 * PAGE_SIZE;
+ top_object = vm_object_allocate(top_size);
+ assert(top_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
+ top_object);
+ /* map top object */
+ top_offset = 0;
+ kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
+ VM_FLAGS_ANYWHERE, top_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ top_address = (unsigned char *) top_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped top object %p at 0x%llx\n",
+ top_object, (uint64_t) top_offset);
+ /* populate with pages to be compressed in top object */
+ top_address[0x3*PAGE_SIZE] = 0xA3;
+ top_address[0x4*PAGE_SIZE] = 0xA4;
+ top_address[0x5*PAGE_SIZE] = 0xA5;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "top_object %p\n", top_object);
+ /* compress top object */
+ vm_object_pageout(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
+ top_object);
+ /* wait for all the pages to be gone */
+ while (top_object->resident_page_count != 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
+ top_object);
+ /* populate with pages to be resident in top object */
+ top_address[0x0*PAGE_SIZE] = 0xA0;
+ top_address[0x1*PAGE_SIZE] = 0xA1;
+ top_address[0x2*PAGE_SIZE] = 0xA2;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "top_object %p\n", top_object);
+ /* leave the other pages absent */
+
+ /* link the 2 objects */
+ vm_object_reference(backing_object);
+ top_object->shadow = backing_object;
+ top_object->vo_shadow_offset = 0x3000;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
+ top_object, backing_object);
+
+ /* unmap backing object */
+ vm_map_remove(kernel_map,
+ backing_offset,
+ backing_offset + backing_size,
+ 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "unmapped backing_object %p [0x%llx:0x%llx]\n",
+ backing_object,
+ (uint64_t) backing_offset,
+ (uint64_t) (backing_offset + backing_size));
+
+ /* collapse */
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
+ vm_object_lock(top_object);
+ vm_object_collapse(top_object, 0, FALSE);
+ vm_object_unlock(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
+
+ /* did it work? */
+ if (top_object->shadow != VM_OBJECT_NULL) {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ if (vm_object_collapse_compressor_allowed) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ }
+ } else {
+ /* check the contents of the mapping */
+ unsigned char expect[9] =
+ { 0xA0, 0xA1, 0xA2, /* resident in top */
+ 0xA3, 0xA4, 0xA5, /* compressed in top */
+ 0xB9, /* resident in backing + shadow_offset */
+ 0xBD, /* compressed in backing + shadow_offset + paging_offset */
+ 0x00 }; /* absent in both */
+ unsigned char actual[9];
+ unsigned int i, errors;
+
+ errors = 0;
+ for (i = 0; i < sizeof (actual); i++) {
+ actual[i] = (unsigned char) top_address[i*PAGE_SIZE];
+ if (actual[i] != expect[i]) {
+ errors++;
+ }
+ }
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "actual [%x %x %x %x %x %x %x %x %x] "
+ "expect [%x %x %x %x %x %x %x %x %x] "
+ "%d errors\n",
+ actual[0], actual[1], actual[2], actual[3],
+ actual[4], actual[5], actual[6], actual[7],
+ actual[8],
+ expect[0], expect[1], expect[2], expect[3],
+ expect[4], expect[5], expect[6], expect[7],
+ expect[8],
+ errors);
+ if (errors) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ } else {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: PASS\n");
+ }
+ }
+#endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
+
+#if FBDP_TEST_WIRE_AND_EXTRACT
+ ledger_t ledger;
+ vm_map_t user_map, wire_map;
+ mach_vm_address_t user_addr, wire_addr;
+ mach_vm_size_t user_size, wire_size;
+ mach_vm_offset_t cur_offset;
+ vm_prot_t cur_prot, max_prot;
+ ppnum_t user_ppnum, wire_ppnum;
+ kern_return_t kr;
+
+ ledger = ledger_instantiate(task_ledger_template,
+ LEDGER_CREATE_ACTIVE_ENTRIES);
+ user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT),
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ wire_map = vm_map_create(NULL,
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ user_addr = 0;
+ user_size = 0x10000;
+ kr = mach_vm_allocate(user_map,
+ &user_addr,
+ user_size,
+ VM_FLAGS_ANYWHERE);
+ assert(kr == KERN_SUCCESS);
+ wire_addr = 0;
+ wire_size = user_size;
+ kr = mach_vm_remap(wire_map,
+ &wire_addr,
+ wire_size,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ user_map,
+ user_addr,
+ FALSE,
+ &cur_prot,
+ &max_prot,
+ VM_INHERIT_NONE);
+ assert(kr == KERN_SUCCESS);
+ for (cur_offset = 0;
+ cur_offset < wire_size;
+ cur_offset += PAGE_SIZE) {
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ user_ppnum = vm_map_get_phys_page(user_map,
+ user_addr + cur_offset);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+ }
+ cur_offset -= PAGE_SIZE;
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT,
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: PASS\n");
+#endif /* FBDP_TEST_WIRE_AND_EXTRACT */
+
+#if FBDP_TEST_PAGE_WIRE_OVERFLOW
+ vm_object_t fbdp_object;
+ vm_page_t fbdp_page;
+
+ printf("FBDP_TEST_PAGE_WIRE_OVERFLOW: starting...\n");
+
+ fbdp_object = vm_object_allocate(PAGE_SIZE);
+ vm_object_lock(fbdp_object);
+ fbdp_page = vm_page_alloc(fbdp_object, 0x0);
+ vm_page_lock_queues();
+ do {
+ vm_page_wire(fbdp_page, 1, FALSE);
+ } while (fbdp_page->wire_count != 0);
+ vm_page_unlock_queues();
+ vm_object_unlock(fbdp_object);
+ panic("FBDP(%p,%p): wire_count overflow not detected\n",
+ fbdp_object, fbdp_page);
+#endif /* FBDP_TEST_PAGE_WIRE_OVERFLOW */
+
vm_pageout_continue();
/*
-#define MAX_COMRPESSOR_THREAD_COUNT 8
-
-struct cq ciq[MAX_COMRPESSOR_THREAD_COUNT];
-
int vm_compressor_thread_count = 2;
kern_return_t
int i;
host_basic_info_data_t hinfo;
- if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
- mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ assert (VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
#define BSD_HOST 1
- host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
- assert(hinfo.max_cpus > 0);
+ assert(hinfo.max_cpus > 0);
- if (vm_compressor_thread_count >= hinfo.max_cpus)
- vm_compressor_thread_count = hinfo.max_cpus - 1;
- if (vm_compressor_thread_count <= 0)
- vm_compressor_thread_count = 1;
- else if (vm_compressor_thread_count > MAX_COMRPESSOR_THREAD_COUNT)
- vm_compressor_thread_count = MAX_COMRPESSOR_THREAD_COUNT;
+ if (vm_compressor_thread_count >= hinfo.max_cpus)
+ vm_compressor_thread_count = hinfo.max_cpus - 1;
+ if (vm_compressor_thread_count <= 0)
+ vm_compressor_thread_count = 1;
+ else if (vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT)
+ vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
+
+ if (vm_compressor_immediate_preferred == TRUE) {
+ vm_pageout_immediate_chead = NULL;
+ vm_pageout_immediate_scratch_buf = kalloc(vm_compressor_get_encode_scratch_size());
- vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
- } else {
vm_compressor_thread_count = 1;
- vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
}
+ vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
+
for (i = 0; i < vm_compressor_thread_count; i++) {
+ ciq[i].id = i;
+ ciq[i].q = &vm_pageout_queue_internal;
+ ciq[i].current_chead = NULL;
+ ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
+
if (result == KERN_SUCCESS)
thread_deallocate(vm_pageout_internal_iothread);
else
return result;
}
+#if CONFIG_IOSCHED
+/*
+ * To support I/O Expedite for compressed files we mark the upls with special flags.
+ * The way decmpfs works is that we create a big upl which marks all the pages needed to
+ * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
+ * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
+ * being held in the big original UPL. We mark each of these smaller UPLs with the flag
+ * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
+ * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
+ * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
+ * unless the real I/O upl is being destroyed).
+ */
+
+
+static void
+upl_set_decmp_info(upl_t upl, upl_t src_upl)
+{
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+
+ upl_lock(src_upl);
+ if (src_upl->decmp_io_upl) {
+ /*
+ * If there is already an alive real I/O UPL, ignore this new UPL.
+ * This case should rarely happen and even if it does, it just means
+ * that we might issue a spurious expedite which the driver is expected
+ * to handle.
+ */
+ upl_unlock(src_upl);
+ return;
+ }
+ src_upl->decmp_io_upl = (void *)upl;
+ src_upl->ref_count++;
+
+ upl->flags |= UPL_DECMP_REAL_IO;
+ upl->decmp_io_upl = (void *)src_upl;
+ upl_unlock(src_upl);
+}
+#endif /* CONFIG_IOSCHED */
+
+#if UPL_DEBUG
+int upl_debug_enabled = 1;
+#else
+int upl_debug_enabled = 0;
+#endif
static upl_t
upl_create(int type, int flags, upl_size_t size)
bzero((char *)upl + upl_size, page_field_size);
upl->flags = upl_flags | flags;
- upl->src_object = NULL;
upl->kaddr = (vm_offset_t)0;
upl->size = 0;
upl->map_object = NULL;
upl->highest_page = 0;
upl_lock_init(upl);
upl->vector_upl = NULL;
+ upl->associated_upl = NULL;
+#if CONFIG_IOSCHED
+ if (type & UPL_CREATE_IO_TRACKING) {
+ upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
+ }
+
+ upl->upl_reprio_info = 0;
+ upl->decmp_io_upl = 0;
+ if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
+ /* Only support expedite on internal UPLs */
+ thread_t curthread = current_thread();
+ upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
+ bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
+ upl->flags |= UPL_EXPEDITE_SUPPORTED;
+ if (curthread->decmp_upl != NULL)
+ upl_set_decmp_info(upl, curthread->decmp_upl);
+ }
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
+ upl->upl_creator = current_thread();
+ upl->uplq.next = 0;
+ upl->uplq.prev = 0;
+ upl->flags |= UPL_TRACKED_BY_OBJECT;
+ }
+#endif
+
#if UPL_DEBUG
upl->ubc_alias1 = 0;
upl->ubc_alias2 = 0;
- upl->upl_creator = current_thread();
upl->upl_state = 0;
upl->upl_commit_index = 0;
bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
- upl->uplq.next = 0;
- upl->uplq.prev = 0;
-
(void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
#endif /* UPL_DEBUG */
panic("upl(%p) ext_ref_count", upl);
}
-#if UPL_DEBUG
- if ( !(upl->flags & UPL_VECTOR)) {
+#if CONFIG_IOSCHED
+ if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
+ upl_t src_upl;
+ src_upl = upl->decmp_io_upl;
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+ upl_lock(src_upl);
+ src_upl->decmp_io_upl = NULL;
+ upl_unlock(src_upl);
+ upl_deallocate(src_upl);
+ }
+#endif /* CONFIG_IOSCHED */
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) {
vm_object_t object;
if (upl->flags & UPL_SHADOWED) {
} else {
object = upl->map_object;
}
+
vm_object_lock(object);
queue_remove(&object->uplq, upl, upl_t, uplq);
vm_object_activity_end(object);
vm_object_collapse(object, 0, TRUE);
vm_object_unlock(object);
}
-#endif /* UPL_DEBUG */
+#endif
/*
* drop a reference on the map_object whether or
* not a pageout object is inserted
upl_lock_destroy(upl);
upl->vector_upl = (vector_upl_t) 0xfeedbeef;
+#if CONFIG_IOSCHED
+ if (upl->flags & UPL_EXPEDITE_SUPPORTED)
+ kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size/PAGE_SIZE));
+#endif
+
if (upl->flags & UPL_INTERNAL) {
kfree(upl,
sizeof(struct upl) +
void
upl_deallocate(upl_t upl)
{
+ upl_lock(upl);
if (--upl->ref_count == 0) {
if(vector_upl_is_valid(upl))
vector_upl_deallocate(upl);
+ upl_unlock(upl);
upl_destroy(upl);
}
+ else
+ upl_unlock(upl);
+}
+
+#if CONFIG_IOSCHED
+void
+upl_mark_decmp(upl_t upl)
+{
+ if (upl->flags & UPL_TRACKED_BY_OBJECT) {
+ upl->flags |= UPL_DECMP_REQ;
+ upl->upl_creator->decmp_upl = (void *)upl;
+ }
+}
+
+void
+upl_unmark_decmp(upl_t upl)
+{
+ if(upl && (upl->flags & UPL_DECMP_REQ)) {
+ upl->upl_creator->decmp_upl = NULL;
+ }
+}
+
+#endif /* CONFIG_IOSCHED */
+
+#define VM_PAGE_Q_BACKING_UP(q) \
+ ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
+
+boolean_t must_throttle_writes(void);
+
+boolean_t
+must_throttle_writes()
+{
+ if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) &&
+ vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10)
+ return (TRUE);
+
+ return (FALSE);
}
+
#if DEVELOPMENT || DEBUG
/*/*
* Statistics about UPL enforcement of copy-on-write obligations.
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
- int cntrl_flags)
+ upl_control_flags_t cntrl_flags)
{
vm_page_t dst_page = VM_PAGE_NULL;
vm_object_offset_t dst_offset;
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
+ int io_tracking_flag = 0;
+ int grab_options;
+ ppnum_t phys_page;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
/*
panic("vm_object_upl_request: contiguous object specified\n");
- if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
- size = MAX_UPL_SIZE * PAGE_SIZE;
+ if (size > MAX_UPL_SIZE_BYTES)
+ size = MAX_UPL_SIZE_BYTES;
if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
- *page_list_count = MAX_UPL_SIZE;
+ *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (object->io_tracking || upl_debug_enabled)
+ io_tracking_flag |= UPL_CREATE_IO_TRACKING;
+#endif
+#if CONFIG_IOSCHED
+ if (object->io_tracking)
+ io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+#endif
if (cntrl_flags & UPL_SET_INTERNAL) {
if (cntrl_flags & UPL_SET_LITE) {
- upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t)
lite_list = NULL;
}
} else {
- upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
+ upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
} else {
if (cntrl_flags & UPL_SET_LITE) {
- upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
+ upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
lite_list = NULL;
}
} else {
- upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
+ upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size);
}
}
*upl_ptr = upl;
vm_object_lock(object);
vm_object_activity_begin(object);
+ grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+ if (object->can_grab_secluded) {
+ grab_options |= VM_PAGE_GRAB_SECLUDED;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
/*
* we can lock in the paging_offset once paging_in_progress is set
*/
upl->size = size;
upl->offset = offset + object->paging_offset;
-#if UPL_DEBUG
- vm_object_activity_begin(object);
- queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
-
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (object->io_tracking || upl_debug_enabled) {
+ vm_object_activity_begin(object);
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+ }
+#endif
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
/*
* Honor copy-on-write obligations
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
if (vm_page_free_count > (vm_page_free_target + size_in_pages) ||
- object->resident_page_count < (MAX_UPL_SIZE * 2))
+ object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT))
object->scan_collisions = 0;
+ if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) {
+ boolean_t isSSD = FALSE;
+
+ vnode_pager_get_isSSD(object->pager, &isSSD);
+ vm_object_unlock(object);
+
+ OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+
+ if (isSSD == TRUE)
+ delay(1000 * size_in_pages);
+ else
+ delay(5000 * size_in_pages);
+ OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+
+ vm_object_lock(object);
+ }
+
while (xfer_size) {
dwp->dw_mask = 0;
goto try_next_page;
}
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
/*
* grab this up front...
* a high percentange of the time we're going to
* the pmap layer by grabbing it here and recording it
*/
if (dst_page->pmapped)
- refmod_state = pmap_get_refmod(dst_page->phys_page);
+ refmod_state = pmap_get_refmod(phys_page);
else
refmod_state = 0;
- if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
+ if ( (refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) {
/*
* page is on inactive list and referenced...
* reactivate it now... this gets it out of the
* can't have been referenced recently...
*/
if ( (hibernate_cleaning_in_progress == TRUE ||
- (!((refmod_state & VM_MEM_REFERENCED) || dst_page->reference) || dst_page->throttled)) &&
- ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
+ (!((refmod_state & VM_MEM_REFERENCED) || dst_page->reference) ||
+ (dst_page->vm_page_q_state == VM_PAGE_ON_THROTTLED_Q))) &&
+ ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
goto check_busy;
}
dont_return:
check_busy:
if (dst_page->busy) {
if (cntrl_flags & UPL_NOBLOCK) {
- if (user_page_list)
+ if (user_page_list)
user_page_list[entry].phys_addr = 0;
+ dwp->dw_mask = 0;
goto try_next_page;
}
*/
dst_page->busy = was_busy;
}
- if (dst_page->pageout_queue == TRUE) {
+ if (dst_page->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
vm_page_lockspin_queues();
- if (dst_page->pageout_queue == TRUE) {
+ if (dst_page->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) {
/*
* we've buddied up a page for a clustered pageout
* that has already been moved to the pageout
*/
if (dst_page->pageout)
encountered_lrp = TRUE;
- if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious))) {
+ if ((dst_page->dirty || (object->internal && dst_page->precious))) {
if (encountered_lrp)
CLUSTER_STAT(pages_at_higher_offsets++;)
else
hw_dirty = refmod_state & VM_MEM_MODIFIED;
dirty = hw_dirty ? TRUE : dst_page->dirty;
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
+ if (phys_page > upl->highest_page)
+ upl->highest_page = phys_page;
+
+ assert (!pmap_is_noencrypt(phys_page));
if (cntrl_flags & UPL_SET_LITE) {
unsigned int pg_num;
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
- pmap_clear_modify(dst_page->phys_page);
+ pmap_clear_modify(phys_page);
/*
* Mark original page as cleaning
alias_page->absent = FALSE;
alias_page = NULL;
}
-#if MACH_PAGEMAP
- /*
- * Record that this page has been
- * written out
- */
- vm_external_state_set(object->existence_map, dst_page->offset);
-#endif /*MACH_PAGEMAP*/
if (dirty) {
SET_PAGE_DIRTY(dst_page, FALSE);
} else {
}
if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
if ( !VM_PAGE_WIRED(dst_page))
- dst_page->pageout = TRUE;
+ dst_page->free_when_done = TRUE;
}
} else {
if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
continue;
}
- if (dst_page->laundry) {
- dst_page->pageout = FALSE;
-
+ if (dst_page->laundry)
vm_pageout_steal_laundry(dst_page, FALSE);
- }
} else {
if (object->private) {
/*
dst_page = vm_object_page_grab(object);
if (dst_page != VM_PAGE_NULL)
- vm_page_release(dst_page);
+ vm_page_release(dst_page,
+ FALSE);
dst_page = vm_object_page_grab(object);
}
/*
* need to allocate a page
*/
- dst_page = vm_page_grab();
+ dst_page = vm_page_grab_options(grab_options);
}
if (dst_page == VM_PAGE_NULL) {
if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
* speculative list
*/
dst_page->clustered = TRUE;
+
+ if ( !(cntrl_flags & UPL_FILE_IO))
+ VM_STAT_INCR(pageins);
}
}
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
/*
* ENCRYPTED SWAP:
*/
* eliminate all mappings from the
* original object and its prodigy
*/
- refmod_state = pmap_disconnect(dst_page->phys_page);
+ refmod_state = pmap_disconnect(phys_page);
else
- refmod_state = pmap_get_refmod(dst_page->phys_page);
+ refmod_state = pmap_get_refmod(phys_page);
} else
refmod_state = 0;
lite_list[pg_num>>5] |= 1 << (pg_num & 31);
if (hw_dirty)
- pmap_clear_modify(dst_page->phys_page);
+ pmap_clear_modify(phys_page);
/*
* Mark original page as cleaning
dwp->dw_mask |= DW_set_reference;
}
if (cntrl_flags & UPL_PRECIOUS) {
- if (dst_page->object->internal) {
+ if (object->internal) {
SET_PAGE_DIRTY(dst_page, FALSE);
dst_page->precious = FALSE;
} else {
if (dst_page->busy)
upl->flags |= UPL_HAS_BUSY;
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
+ if (phys_page > upl->highest_page)
+ upl->highest_page = phys_page;
+ assert (!pmap_is_noencrypt(phys_page));
if (user_page_list) {
- user_page_list[entry].phys_addr = dst_page->phys_page;
- user_page_list[entry].pageout = dst_page->pageout;
+ user_page_list[entry].phys_addr = phys_page;
+ user_page_list[entry].free_when_done = dst_page->free_when_done;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
user_page_list[entry].needed = FALSE;
if (dst_page->clustered == TRUE)
- user_page_list[entry].speculative = dst_page->speculative;
+ user_page_list[entry].speculative = (dst_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+ user_page_list[entry].cs_nx = dst_page->cs_nx;
+ user_page_list[entry].mark = FALSE;
}
/*
* if UPL_RET_ONLY_ABSENT is set, then
* update clustered and speculative state
*
*/
- VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ if (dst_page->clustered)
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
try_next_page:
if (dwp->dw_mask) {
VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
xfer_size -= PAGE_SIZE;
}
if (dw_count)
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
if (alias_page != NULL) {
VM_PAGE_FREE(alias_page);
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
- else if (*page_list_count > entry)
- *page_list_count = entry;
- }
-#if UPL_DEBUG
- upl->upl_state = 1;
-#endif
- vm_object_unlock(object);
-
- return KERN_SUCCESS;
-}
-
-/* JMM - Backward compatability for now */
-kern_return_t
-vm_fault_list_request( /* forward */
- memory_object_control_t control,
- vm_object_offset_t offset,
- upl_size_t size,
- upl_t *upl_ptr,
- upl_page_info_t **user_page_list_ptr,
- unsigned int page_list_count,
- int cntrl_flags);
-kern_return_t
-vm_fault_list_request(
- memory_object_control_t control,
- vm_object_offset_t offset,
- upl_size_t size,
- upl_t *upl_ptr,
- upl_page_info_t **user_page_list_ptr,
- unsigned int page_list_count,
- int cntrl_flags)
-{
- unsigned int local_list_count;
- upl_page_info_t *user_page_list;
- kern_return_t kr;
-
- if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
- return KERN_INVALID_ARGUMENT;
-
- if (user_page_list_ptr != NULL) {
- local_list_count = page_list_count;
- user_page_list = *user_page_list_ptr;
- } else {
- local_list_count = 0;
- user_page_list = NULL;
- }
- kr = memory_object_upl_request(control,
- offset,
- size,
- upl_ptr,
- user_page_list,
- &local_list_count,
- cntrl_flags);
-
- if(kr != KERN_SUCCESS)
- return kr;
-
- if ((user_page_list_ptr != NULL) && (cntrl_flags & UPL_INTERNAL)) {
- *user_page_list_ptr = UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr);
+ else if (*page_list_count > entry)
+ *page_list_count = entry;
}
+#if UPL_DEBUG
+ upl->upl_state = 1;
+#endif
+ vm_object_unlock(object);
return KERN_SUCCESS;
}
-
-
/*
* Routine: vm_object_super_upl_request
* Purpose:
upl_t *upl,
upl_page_info_t *user_page_list,
unsigned int *page_list_count,
- int cntrl_flags)
+ upl_control_flags_t cntrl_flags)
{
if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
return KERN_FAILURE;
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
- int *flags)
+ upl_control_flags_t *flags)
{
- vm_map_entry_t entry;
- int caller_flags;
- int force_data_sync;
- int sync_cow_data;
- vm_object_t local_object;
- vm_map_offset_t local_offset;
- vm_map_offset_t local_start;
- kern_return_t ret;
+ vm_map_entry_t entry;
+ upl_control_flags_t caller_flags;
+ int force_data_sync;
+ int sync_cow_data;
+ vm_object_t local_object;
+ vm_map_offset_t local_offset;
+ vm_map_offset_t local_start;
+ kern_return_t ret;
+
+ assert(page_aligned(offset));
caller_flags = *flags;
REDISCOVER_ENTRY:
vm_map_lock_read(map);
- if (vm_map_lookup_entry(map, offset, &entry)) {
+ if (!vm_map_lookup_entry(map, offset, &entry)) {
+ vm_map_unlock_read(map);
+ return KERN_FAILURE;
+ }
- if ((entry->vme_end - offset) < *upl_size) {
- *upl_size = (upl_size_t) (entry->vme_end - offset);
- assert(*upl_size == entry->vme_end - offset);
+ if ((entry->vme_end - offset) < *upl_size) {
+ *upl_size = (upl_size_t) (entry->vme_end - offset);
+ assert(*upl_size == entry->vme_end - offset);
+ }
+
+ if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
+ *flags = 0;
+
+ if (!entry->is_sub_map &&
+ VME_OBJECT(entry) != VM_OBJECT_NULL) {
+ if (VME_OBJECT(entry)->private)
+ *flags = UPL_DEV_MEMORY;
+
+ if (VME_OBJECT(entry)->phys_contiguous)
+ *flags |= UPL_PHYS_CONTIG;
}
+ vm_map_unlock_read(map);
+ return KERN_SUCCESS;
+ }
- if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
- *flags = 0;
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL ||
+ !VME_OBJECT(entry)->phys_contiguous) {
+ if (*upl_size > MAX_UPL_SIZE_BYTES)
+ *upl_size = MAX_UPL_SIZE_BYTES;
+ }
- if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
- if (entry->object.vm_object->private)
- *flags = UPL_DEV_MEMORY;
+ /*
+ * Create an object if necessary.
+ */
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
- if (entry->object.vm_object->phys_contiguous)
- *flags |= UPL_PHYS_CONTIG;
- }
- vm_map_unlock_read(map);
+ if (vm_map_lock_read_to_write(map))
+ goto REDISCOVER_ENTRY;
- return KERN_SUCCESS;
- }
+ VME_OBJECT_SET(entry,
+ vm_object_allocate((vm_size_t)
+ (entry->vme_end -
+ entry->vme_start)));
+ VME_OFFSET_SET(entry, 0);
- if (entry->is_sub_map) {
- vm_map_t submap;
+ vm_map_lock_write_to_read(map);
+ }
- submap = entry->object.sub_map;
- local_start = entry->vme_start;
- local_offset = entry->offset;
+ if (!(caller_flags & UPL_COPYOUT_FROM) &&
+ !(entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock_read(map);
+ return KERN_PROTECTION_FAILURE;
+ }
- vm_map_reference(submap);
- vm_map_unlock_read(map);
- ret = vm_map_create_upl(submap,
- local_offset + (offset - local_start),
- upl_size, upl, page_list, count, flags);
- vm_map_deallocate(submap);
+ local_object = VME_OBJECT(entry);
+ assert(local_object != VM_OBJECT_NULL);
- return ret;
- }
+ if (!entry->is_sub_map &&
+ !entry->needs_copy &&
+ *upl_size != 0 &&
+ local_object->vo_size > *upl_size && /* partial UPL */
+ entry->wired_count == 0 && /* No COW for entries that are wired */
+ (map->pmap != kernel_pmap) && /* alias checks */
+ (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */
+ ||
+ (/* case 2 */
+ local_object->internal &&
+ (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) &&
+ local_object->ref_count > 1))) {
+ vm_prot_t prot;
- if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
- if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
- *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
- }
/*
- * Create an object if necessary.
+ * Case 1:
+ * Set up the targeted range for copy-on-write to avoid
+ * applying true_share/copy_delay to the entire object.
+ *
+ * Case 2:
+ * This map entry covers only part of an internal
+ * object. There could be other map entries covering
+ * other areas of this object and some of these map
+ * entries could be marked as "needs_copy", which
+ * assumes that the object is COPY_SYMMETRIC.
+ * To avoid marking this object as COPY_DELAY and
+ * "true_share", let's shadow it and mark the new
+ * (smaller) object as "true_share" and COPY_DELAY.
*/
- if (entry->object.vm_object == VM_OBJECT_NULL) {
- if (vm_map_lock_read_to_write(map))
- goto REDISCOVER_ENTRY;
+ if (vm_map_lock_read_to_write(map)) {
+ goto REDISCOVER_ENTRY;
+ }
+ vm_map_lock_assert_exclusive(map);
+ assert(VME_OBJECT(entry) == local_object);
+
+ vm_map_clip_start(map,
+ entry,
+ vm_map_trunc_page(offset,
+ VM_MAP_PAGE_MASK(map)));
+ vm_map_clip_end(map,
+ entry,
+ vm_map_round_page(offset + *upl_size,
+ VM_MAP_PAGE_MASK(map)));
+ if ((entry->vme_end - offset) < *upl_size) {
+ *upl_size = (upl_size_t) (entry->vme_end - offset);
+ assert(*upl_size == entry->vme_end - offset);
+ }
+
+ prot = entry->protection & ~VM_PROT_WRITE;
+ if (override_nx(map, VME_ALIAS(entry)) && prot)
+ prot |= VM_PROT_EXECUTE;
+ vm_object_pmap_protect(local_object,
+ VME_OFFSET(entry),
+ entry->vme_end - entry->vme_start,
+ ((entry->is_shared ||
+ map->mapped_in_other_pmaps)
+ ? PMAP_NULL
+ : map->pmap),
+ entry->vme_start,
+ prot);
- entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
- entry->offset = 0;
+ assert(entry->wired_count == 0);
- vm_map_lock_write_to_read(map);
+ /*
+ * Lock the VM object and re-check its status: if it's mapped
+ * in another address space, we could still be racing with
+ * another thread holding that other VM map exclusively.
+ */
+ vm_object_lock(local_object);
+ if (local_object->true_share) {
+ /* object is already in proper state: no COW needed */
+ assert(local_object->copy_strategy !=
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ } else {
+ /* not true_share: ask for copy-on-write below */
+ assert(local_object->copy_strategy ==
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ entry->needs_copy = TRUE;
}
- if (!(caller_flags & UPL_COPYOUT_FROM)) {
- if (!(entry->protection & VM_PROT_WRITE)) {
- vm_map_unlock_read(map);
- return KERN_PROTECTION_FAILURE;
- }
+ vm_object_unlock(local_object);
- local_object = entry->object.vm_object;
- if (vm_map_entry_should_cow_for_true_share(entry) &&
- local_object->vo_size > *upl_size &&
- *upl_size != 0) {
- vm_prot_t prot;
+ vm_map_lock_write_to_read(map);
+ }
- /*
- * Set up the targeted range for copy-on-write to avoid
- * applying true_share/copy_delay to the entire object.
- */
+ if (entry->needs_copy) {
+ /*
+ * Honor copy-on-write for COPY_SYMMETRIC
+ * strategy.
+ */
+ vm_map_t local_map;
+ vm_object_t object;
+ vm_object_offset_t new_offset;
+ vm_prot_t prot;
+ boolean_t wired;
+ vm_map_version_t version;
+ vm_map_t real_map;
+ vm_prot_t fault_type;
+
+ local_map = map;
+
+ if (caller_flags & UPL_COPYOUT_FROM) {
+ fault_type = VM_PROT_READ | VM_PROT_COPY;
+ vm_counters.create_upl_extra_cow++;
+ vm_counters.create_upl_extra_cow_pages +=
+ (entry->vme_end - entry->vme_start) / PAGE_SIZE;
+ } else {
+ fault_type = VM_PROT_WRITE;
+ }
+ if (vm_map_lookup_locked(&local_map,
+ offset, fault_type,
+ OBJECT_LOCK_EXCLUSIVE,
+ &version, &object,
+ &new_offset, &prot, &wired,
+ NULL,
+ &real_map) != KERN_SUCCESS) {
+ if (fault_type == VM_PROT_WRITE) {
+ vm_counters.create_upl_lookup_failure_write++;
+ } else {
+ vm_counters.create_upl_lookup_failure_copy++;
+ }
+ vm_map_unlock_read(local_map);
+ return KERN_FAILURE;
+ }
+ if (real_map != map)
+ vm_map_unlock(real_map);
+ vm_map_unlock_read(local_map);
- if (vm_map_lock_read_to_write(map)) {
- goto REDISCOVER_ENTRY;
- }
+ vm_object_unlock(object);
- vm_map_clip_start(map,
- entry,
- vm_map_trunc_page(offset,
- VM_MAP_PAGE_MASK(map)));
- vm_map_clip_end(map,
- entry,
- vm_map_round_page(offset + *upl_size,
- VM_MAP_PAGE_MASK(map)));
- prot = entry->protection & ~VM_PROT_WRITE;
- if (override_nx(map, entry->alias) && prot)
- prot |= VM_PROT_EXECUTE;
- vm_object_pmap_protect(local_object,
- entry->offset,
- entry->vme_end - entry->vme_start,
- ((entry->is_shared || map->mapped_in_other_pmaps)
- ? PMAP_NULL
- : map->pmap),
- entry->vme_start,
- prot);
- entry->needs_copy = TRUE;
-
- vm_map_lock_write_to_read(map);
- }
-
- if (entry->needs_copy) {
- /*
- * Honor copy-on-write for COPY_SYMMETRIC
- * strategy.
- */
- vm_map_t local_map;
- vm_object_t object;
- vm_object_offset_t new_offset;
- vm_prot_t prot;
- boolean_t wired;
- vm_map_version_t version;
- vm_map_t real_map;
-
- local_map = map;
-
- if (vm_map_lookup_locked(&local_map,
- offset, VM_PROT_WRITE,
- OBJECT_LOCK_EXCLUSIVE,
- &version, &object,
- &new_offset, &prot, &wired,
- NULL,
- &real_map) != KERN_SUCCESS) {
- vm_map_unlock_read(local_map);
- return KERN_FAILURE;
- }
- if (real_map != map)
- vm_map_unlock(real_map);
- vm_map_unlock_read(local_map);
+ goto REDISCOVER_ENTRY;
+ }
- vm_object_unlock(object);
+ if (entry->is_sub_map) {
+ vm_map_t submap;
- goto REDISCOVER_ENTRY;
- }
- }
- if (sync_cow_data) {
- if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
- local_object = entry->object.vm_object;
- local_start = entry->vme_start;
- local_offset = entry->offset;
+ submap = VME_SUBMAP(entry);
+ local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
- vm_object_reference(local_object);
- vm_map_unlock_read(map);
+ vm_map_reference(submap);
+ vm_map_unlock_read(map);
- if (local_object->shadow && local_object->copy) {
- vm_object_lock_request(
- local_object->shadow,
- (vm_object_offset_t)
- ((offset - local_start) +
- local_offset) +
- local_object->vo_shadow_offset,
- *upl_size, FALSE,
- MEMORY_OBJECT_DATA_SYNC,
- VM_PROT_NO_CHANGE);
- }
- sync_cow_data = FALSE;
- vm_object_deallocate(local_object);
+ ret = vm_map_create_upl(submap,
+ local_offset + (offset - local_start),
+ upl_size, upl, page_list, count, flags);
+ vm_map_deallocate(submap);
- goto REDISCOVER_ENTRY;
- }
- }
- if (force_data_sync) {
- local_object = entry->object.vm_object;
- local_start = entry->vme_start;
- local_offset = entry->offset;
+ return ret;
+ }
+
+ if (sync_cow_data &&
+ (VME_OBJECT(entry)->shadow ||
+ VME_OBJECT(entry)->copy)) {
+ local_object = VME_OBJECT(entry);
+ local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
- vm_object_reference(local_object);
- vm_map_unlock_read(map);
+ vm_object_reference(local_object);
+ vm_map_unlock_read(map);
- vm_object_lock_request(
- local_object,
- (vm_object_offset_t)
- ((offset - local_start) + local_offset),
- (vm_object_size_t)*upl_size, FALSE,
+ if (local_object->shadow && local_object->copy) {
+ vm_object_lock_request(local_object->shadow,
+ ((vm_object_offset_t)
+ ((offset - local_start) +
+ local_offset) +
+ local_object->vo_shadow_offset),
+ *upl_size, FALSE,
MEMORY_OBJECT_DATA_SYNC,
VM_PROT_NO_CHANGE);
-
- force_data_sync = FALSE;
- vm_object_deallocate(local_object);
-
- goto REDISCOVER_ENTRY;
}
- if (entry->object.vm_object->private)
- *flags = UPL_DEV_MEMORY;
- else
- *flags = 0;
-
- if (entry->object.vm_object->phys_contiguous)
- *flags |= UPL_PHYS_CONTIG;
+ sync_cow_data = FALSE;
+ vm_object_deallocate(local_object);
- local_object = entry->object.vm_object;
- local_offset = entry->offset;
+ goto REDISCOVER_ENTRY;
+ }
+ if (force_data_sync) {
+ local_object = VME_OBJECT(entry);
local_start = entry->vme_start;
+ local_offset = VME_OFFSET(entry);
vm_object_reference(local_object);
vm_map_unlock_read(map);
- ret = vm_object_iopl_request(local_object,
- (vm_object_offset_t) ((offset - local_start) + local_offset),
- *upl_size,
- upl,
- page_list,
- count,
- caller_flags);
+ vm_object_lock_request(local_object,
+ ((vm_object_offset_t)
+ ((offset - local_start) +
+ local_offset)),
+ (vm_object_size_t)*upl_size,
+ FALSE,
+ MEMORY_OBJECT_DATA_SYNC,
+ VM_PROT_NO_CHANGE);
+
+ force_data_sync = FALSE;
vm_object_deallocate(local_object);
- return(ret);
- }
+ goto REDISCOVER_ENTRY;
+ }
+ if (VME_OBJECT(entry)->private)
+ *flags = UPL_DEV_MEMORY;
+ else
+ *flags = 0;
+
+ if (VME_OBJECT(entry)->phys_contiguous)
+ *flags |= UPL_PHYS_CONTIG;
+
+ local_object = VME_OBJECT(entry);
+ local_offset = VME_OFFSET(entry);
+ local_start = entry->vme_start;
+
+
+ vm_object_lock(local_object);
+
+ /*
+ * Ensure that this object is "true_share" and "copy_delay" now,
+ * while we're still holding the VM map lock. After we unlock the map,
+ * anything could happen to that mapping, including some copy-on-write
+ * activity. We need to make sure that the IOPL will point at the
+ * same memory as the mapping.
+ */
+ if (local_object->true_share) {
+ assert(local_object->copy_strategy !=
+ MEMORY_OBJECT_COPY_SYMMETRIC);
+ } else if (local_object != kernel_object &&
+ local_object != compressor_object &&
+ !local_object->phys_contiguous) {
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+ if (!local_object->true_share &&
+ vm_object_tracking_inited) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int num = 0;
+ num = OSBacktrace(bt,
+ VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ local_object,
+ VM_OBJECT_TRACKING_OP_TRUESHARE,
+ bt,
+ num);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+ local_object->true_share = TRUE;
+ if (local_object->copy_strategy ==
+ MEMORY_OBJECT_COPY_SYMMETRIC) {
+ local_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+ }
+
+ vm_object_reference_locked(local_object);
+ vm_object_unlock(local_object);
+
vm_map_unlock_read(map);
- return(KERN_FAILURE);
+ ret = vm_object_iopl_request(local_object,
+ ((vm_object_offset_t)
+ ((offset - local_start) + local_offset)),
+ *upl_size,
+ upl,
+ page_list,
+ count,
+ caller_flags);
+ vm_object_deallocate(local_object);
+
+ return ret;
}
/*
assert(alias_page->fictitious);
alias_page->fictitious = FALSE;
alias_page->private = TRUE;
- alias_page->pageout = TRUE;
+ alias_page->free_when_done = TRUE;
/*
* since m is a page in the upl it must
* already be wired or BUSY, so it's
* safe to assign the underlying physical
* page to the alias
*/
- alias_page->phys_page = m->phys_page;
+ VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m));
vm_object_unlock(object);
vm_page_lockspin_queues();
- vm_page_wire(alias_page);
+ vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE);
vm_page_unlock_queues();
/*
* ENCRYPTED SWAP:
* The virtual page ("m") has to be wired in some way
- * here or its physical page ("m->phys_page") could
+ * here or its backing physical page could
* be recycled at any time.
* Assuming this is enforced by the caller, we can't
* get an encrypted page here. Since the encryption
*/
ASSERT_PAGE_DECRYPTED(m);
- vm_page_insert(alias_page, upl->map_object, new_offset);
+ vm_page_insert_wired(alias_page, upl->map_object, new_offset, VM_KERN_MEMORY_NONE);
assert(!alias_page->wanted);
alias_page->busy = FALSE;
* NEED A UPL_MAP ALIAS
*/
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
- VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+ upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
+ vm_object_deallocate(upl->map_object);
upl_unlock(upl);
return(kr);
}
}
else {
kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
- VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
+ VM_FLAGS_FIXED | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
+ upl->map_object, offset, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if(kr)
panic("vm_map_enter failed for a Vector UPL\n");
* but only in kernel space. If this was on a user map,
* we'd have to set the wpmapped bit. */
/* m->wpmapped = TRUE; */
- assert(map==kernel_map);
+ assert(map->pmap == kernel_pmap);
- PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, VM_PROT_NONE, 0, TRUE);
+ PMAP_ENTER(map->pmap, addr, m, VM_PROT_DEFAULT, VM_PROT_NONE, 0, TRUE);
}
offset += PAGE_SIZE_64;
}
upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
vm_object_t object;
+ vm_object_t m_object;
vm_object_offset_t target_offset;
upl_offset_t subupl_offset = offset;
int entry;
upl_t vector_upl = NULL;
boolean_t should_be_throttled = FALSE;
+ vm_page_t nxt_page = VM_PAGE_NULL;
+ int fast_path_possible = 0;
+ int fast_path_full_commit = 0;
+ int throttle_page = 0;
+ int unwired_count = 0;
+ int local_queue_count = 0;
+ vm_page_t first_local, last_local;
+
*empty = FALSE;
if (upl == UPL_NULL)
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
+ assert(!(target_offset & PAGE_MASK));
+ assert(!(xfer_size & PAGE_MASK));
+
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
*/
flags &= ~UPL_COMMIT_CS_VALIDATED;
}
- if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && shadow_object->internal)
+ if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal)
should_be_throttled = TRUE;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+ if ((upl->flags & UPL_IO_WIRE) &&
+ !(flags & UPL_COMMIT_FREE_ABSENT) &&
+ !isVectorUPL &&
+ shadow_object->purgable != VM_PURGABLE_VOLATILE &&
+ shadow_object->purgable != VM_PURGABLE_EMPTY) {
+
+ if (!vm_page_queue_empty(&shadow_object->memq)) {
+
+ if (size == shadow_object->vo_size) {
+ nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq);
+ fast_path_full_commit = 1;
+ }
+ fast_path_possible = 1;
+
+ if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal &&
+ (shadow_object->purgable == VM_PURGABLE_DENY ||
+ shadow_object->purgable == VM_PURGABLE_NONVOLATILE ||
+ shadow_object->purgable == VM_PURGABLE_VOLATILE)) {
+ throttle_page = 1;
+ }
+ }
+ }
+ first_local = VM_PAGE_NULL;
+ last_local = VM_PAGE_NULL;
+
while (xfer_size) {
vm_page_t t, m;
if (upl->flags & UPL_LITE) {
unsigned int pg_num;
+ if (nxt_page != VM_PAGE_NULL) {
+ m = nxt_page;
+ nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->listq);
+ target_offset = m->offset;
+ }
pg_num = (unsigned int) (target_offset/PAGE_SIZE);
assert(pg_num == target_offset/PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
- if (!(upl->flags & UPL_KERNEL_OBJECT))
+ if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
- }
+ } else
+ m = NULL;
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
- t->pageout = FALSE;
+ t->free_when_done = FALSE;
VM_PAGE_FREE(t);
- if (m == VM_PAGE_NULL)
+ if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL)
m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset);
}
}
- if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
+ if (m == VM_PAGE_NULL)
goto commit_next_page;
- if (m->compressor) {
+ m_object = VM_PAGE_OBJECT(m);
+
+ if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
assert(m->busy);
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
*/
m->cs_validated = page_list[entry].cs_validated;
m->cs_tainted = page_list[entry].cs_tainted;
+ m->cs_nx = page_list[entry].cs_nx;
}
+ if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL)
+ m->written_by_kernel = TRUE;
+
if (upl->flags & UPL_IO_WIRE) {
if (page_list)
* so it will need to be
* re-validated.
*/
+ if (m->slid) {
+ panic("upl_commit_range(%p): page %p was slid\n",
+ upl, m);
+ }
+ assert(!m->slid);
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
- pmap_disconnect(m->phys_page);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
}
clear_refmod |= VM_MEM_MODIFIED;
}
- if (flags & UPL_COMMIT_INACTIVATE) {
- dwp->dw_mask |= DW_vm_page_deactivate_internal;
- clear_refmod |= VM_MEM_REFERENCED;
- }
if (upl->flags & UPL_ACCESS_BLOCKED) {
/*
* We blocked access to the pages in this UPL.
*/
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
- if (m->absent) {
- if (flags & UPL_COMMIT_FREE_ABSENT)
- dwp->dw_mask |= DW_vm_page_free;
- else {
+ if (fast_path_possible) {
+ assert(m_object->purgable != VM_PURGABLE_EMPTY);
+ assert(m_object->purgable != VM_PURGABLE_VOLATILE);
+ if (m->absent) {
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(m->wire_count == 0);
+ assert(m->busy);
+
m->absent = FALSE;
dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ } else {
+ if (m->wire_count == 0)
+ panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object);
+ assert(m->vm_page_q_state == VM_PAGE_IS_WIRED);
+
+ /*
+ * XXX FBDP need to update some other
+ * counters here (purgeable_wired_count)
+ * (ledgers), ...
+ */
+ assert(m->wire_count > 0);
+ m->wire_count--;
- if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
- dwp->dw_mask |= DW_vm_page_activate;
+ if (m->wire_count == 0) {
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ unwired_count++;
+ }
}
- } else
- dwp->dw_mask |= DW_vm_page_unwire;
+ if (m->wire_count == 0) {
+ assert(m->pageq.next == 0 && m->pageq.prev == 0);
+
+ if (last_local == VM_PAGE_NULL) {
+ assert(first_local == VM_PAGE_NULL);
+ last_local = m;
+ first_local = m;
+ } else {
+ assert(first_local != VM_PAGE_NULL);
+
+ m->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+ first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m);
+ first_local = m;
+ }
+ local_queue_count++;
+
+ if (throttle_page) {
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ if (shadow_object->internal)
+ m->vm_page_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+ else
+ m->vm_page_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+ } else
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+ }
+ }
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
+ }
+ if (m->absent) {
+ if (flags & UPL_COMMIT_FREE_ABSENT)
+ dwp->dw_mask |= DW_vm_page_free;
+ else {
+ m->absent = FALSE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+
+ if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal))
+ dwp->dw_mask |= DW_vm_page_activate;
+ }
+ } else
+ dwp->dw_mask |= DW_vm_page_unwire;
+ }
goto commit_next_page;
}
- assert(!m->compressor);
+ assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
if (page_list)
page_list[entry].phys_addr = 0;
dwp->dw_mask |= DW_vm_pageout_throttle_up;
if (VM_PAGE_WIRED(m))
- m->pageout = FALSE;
+ m->free_when_done = FALSE;
if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
m->cs_validated && !m->cs_tainted) {
* so it will need to be
* re-validated.
*/
+ if (m->slid) {
+ panic("upl_commit_range(%p): page %p was slid\n",
+ upl, m);
+ }
+ assert(!m->slid);
m->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
vm_cs_validated_resets++;
#endif
- pmap_disconnect(m->phys_page);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
}
if (m->overwriting) {
/*
* the (COPY_OUT_FROM == FALSE) request_page_list case
*/
if (m->busy) {
+#if CONFIG_PHANTOM_CACHE
+ if (m->absent && !m_object->internal)
+ dwp->dw_mask |= DW_vm_phantom_cache_update;
+#endif
m->absent = FALSE;
dwp->dw_mask |= DW_clear_busy;
}
m->cleaning = FALSE;
- if (m->pageout) {
+ if (m->free_when_done) {
/*
* With the clean queue enabled, UPL_PAGEOUT should
* no longer set the pageout bit. It's pages now go
* to the clean queue.
*/
assert(!(flags & UPL_PAGEOUT));
+ assert(!m_object->internal);
- m->pageout = FALSE;
+ m->free_when_done = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
#endif
if ((flags & UPL_COMMIT_SET_DIRTY) ||
- (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED))) {
+ (m->pmapped && (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED))) {
/*
* page was re-dirtied after we started
* the pageout... reactivate it since
* page has been successfully cleaned
* go ahead and free it for other use
*/
- if (m->object->internal) {
+ if (m_object->internal) {
DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
} else {
DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
}
#if MACH_CLUSTER_STATS
if (m->wpmapped)
- m->dirty = pmap_is_modified(m->phys_page);
+ m->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m));
if (m->dirty) vm_pageout_cluster_dirtied++;
else vm_pageout_cluster_cleaned++;
if (hibernate_cleaning_in_progress == FALSE && !m->dirty && (upl->flags & UPL_PAGEOUT)) {
pgpgout_count++;
- /* this page used to be dirty; now it's on the clean queue. */
- m->was_dirty = TRUE;
+ VM_STAT_INCR(pageouts);
+ DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
dwp->dw_mask |= DW_enqueue_cleaned;
vm_pageout_enqueued_cleaned_from_inactive_dirty++;
- } else if (should_be_throttled == TRUE && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+ } else if (should_be_throttled == TRUE && (m->vm_page_q_state == VM_PAGE_NOT_ON_Q)) {
/*
* page coming back in from being 'frozen'...
* it was dirty before it was frozen, so keep it so
dwp->dw_mask |= DW_vm_page_activate;
} else {
- if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
+ if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q)) {
dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
- } else if (!m->active && !m->inactive && !m->speculative) {
+ } else if ( !VM_PAGE_PAGEABLE(m)) {
if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
dwp->dw_mask |= DW_vm_page_speculate;
commit_next_page:
if (clear_refmod)
- pmap_clear_refmod(m->phys_page, clear_refmod);
+ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod);
target_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
if (dw_count)
- vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ if (fast_path_possible) {
+
+ assert(shadow_object->purgable != VM_PURGABLE_VOLATILE);
+ assert(shadow_object->purgable != VM_PURGABLE_EMPTY);
+
+ if (local_queue_count || unwired_count) {
+
+ if (local_queue_count) {
+ vm_page_t first_target;
+ vm_page_queue_head_t *target_queue;
+
+ if (throttle_page)
+ target_queue = &vm_page_queue_throttled;
+ else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ if (shadow_object->internal)
+ target_queue = &vm_page_queue_anonymous;
+ else
+ target_queue = &vm_page_queue_inactive;
+ } else
+ target_queue = &vm_page_queue_active;
+ }
+ /*
+ * Transfer the entire local queue to a regular LRU page queues.
+ */
+ vm_page_lockspin_queues();
+
+ first_target = (vm_page_t) vm_page_queue_first(target_queue);
+
+ if (vm_page_queue_empty(target_queue))
+ target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
+ else
+ first_target->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
+
+ target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
+ first_local->pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue);
+ last_local->pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target);
+
+ /*
+ * Adjust the global page counts.
+ */
+ if (throttle_page) {
+ vm_page_throttled_count += local_queue_count;
+ } else {
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ if (shadow_object->internal)
+ vm_page_anonymous_count += local_queue_count;
+ vm_page_inactive_count += local_queue_count;
+
+ token_new_pagecount += local_queue_count;
+ } else
+ vm_page_active_count += local_queue_count;
+
+ if (shadow_object->internal)
+ vm_page_pageable_internal_count += local_queue_count;
+ else
+ vm_page_pageable_external_count += local_queue_count;
+ }
+ } else {
+ vm_page_lockspin_queues();
+ }
+ if (unwired_count) {
+ vm_page_wire_count -= unwired_count;
+ VM_CHECK_MEMORYSTATUS;
+ }
+ vm_page_unlock_queues();
+
+ shadow_object->wired_page_count -= unwired_count;
+ if (!shadow_object->wired_page_count) {
+ VM_OBJECT_UNWIRED(shadow_object);
+ }
+ }
+ }
occupied = 1;
if (upl->flags & UPL_DEVICE_MEMORY) {
int pg_num;
int i;
- pg_num = upl->size/PAGE_SIZE;
- pg_num = (pg_num + 31) >> 5;
occupied = 0;
- for (i = 0; i < pg_num; i++) {
- if (lite_list[i] != 0) {
- occupied = 1;
- break;
+ if (!fast_path_full_commit) {
+ pg_num = upl->size/PAGE_SIZE;
+ pg_num = (pg_num + 31) >> 5;
+
+ for (i = 0; i < pg_num; i++) {
+ if (lite_list[i] != 0) {
+ occupied = 1;
+ break;
+ }
}
}
} else {
- if (queue_empty(&upl->map_object->memq))
+ if (vm_page_queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
}
goto process_upl_to_commit;
}
-
if (pgpgout_count) {
DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
}
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
+ assert(!(target_offset & PAGE_MASK));
+ assert(!(xfer_size & PAGE_MASK));
+
if (upl->flags & UPL_KERNEL_OBJECT)
vm_object_lock_shared(shadow_object);
else
}
if (upl->flags & UPL_SHADOWED) {
if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
- t->pageout = FALSE;
+ t->free_when_done = FALSE;
VM_PAGE_FREE(t);
if (m != VM_PAGE_NULL) {
- assert(!m->compressor);
+ assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR);
if (m->absent) {
boolean_t must_free = TRUE;
dwp->dw_mask |= DW_clear_busy;
}
- m->pageout = FALSE;
+ m->free_when_done = FALSE;
m->cleaning = FALSE;
-#if MACH_PAGEMAP
- vm_external_state_clr(m->object->existence_map, m->offset);
-#endif /* MACH_PAGEMAP */
+
if (error & UPL_ABORT_DUMP_PAGES) {
- pmap_disconnect(m->phys_page);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
dwp->dw_mask |= DW_vm_page_free;
} else {
*/
dwp->dw_mask |= DW_vm_page_lru;
- } else if (!m->active && !m->inactive && !m->speculative)
+ } else if ( !VM_PAGE_PAGEABLE(m))
dwp->dw_mask |= DW_vm_page_deactivate_internal;
}
dwp->dw_mask |= DW_PAGE_WAKEUP;
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
if (dw_count)
- vm_page_do_delayed_work(shadow_object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
occupied = 1;
}
}
} else {
- if (queue_empty(&upl->map_object->memq))
+ if (vm_page_queue_empty(&upl->map_object->memq))
occupied = 0;
}
if (occupied == 0) {
upl_t upl,
int error)
{
- boolean_t empty;
+ boolean_t empty;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return upl_abort_range(upl, 0, upl->size, error, &empty);
+}
+
+
+/* an option on commit should be wire */
+kern_return_t
+upl_commit(
+ upl_t upl,
+ upl_page_info_t *page_list,
+ mach_msg_type_number_t count)
+{
+ boolean_t empty;
+
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
+}
+
+
+void
+iopl_valid_data(
+ upl_t upl)
+{
+ vm_object_t object;
+ vm_offset_t offset;
+ vm_page_t m, nxt_page = VM_PAGE_NULL;
+ upl_size_t size;
+ int wired_count = 0;
+
+ if (upl == NULL)
+ panic("iopl_valid_data: NULL upl");
+ if (vector_upl_is_valid(upl))
+ panic("iopl_valid_data: vector upl");
+ if ((upl->flags & (UPL_DEVICE_MEMORY|UPL_SHADOWED|UPL_ACCESS_BLOCKED|UPL_IO_WIRE|UPL_INTERNAL)) != UPL_IO_WIRE)
+ panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags);
+
+ object = upl->map_object;
+
+ if (object == kernel_object || object == compressor_object)
+ panic("iopl_valid_data: object == kernel or compressor");
+
+ if (object->purgable == VM_PURGABLE_VOLATILE ||
+ object->purgable == VM_PURGABLE_EMPTY)
+ panic("iopl_valid_data: object %p purgable %d",
+ object, object->purgable);
+
+ size = upl->size;
+
+ vm_object_lock(object);
+
+ if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE))
+ nxt_page = (vm_page_t)vm_page_queue_first(&object->memq);
+ else
+ offset = 0 + upl->offset - object->paging_offset;
+
+ while (size) {
+
+ if (nxt_page != VM_PAGE_NULL) {
+ m = nxt_page;
+ nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->listq);
+ } else {
+ m = vm_page_lookup(object, offset);
+ offset += PAGE_SIZE;
+
+ if (m == VM_PAGE_NULL)
+ panic("iopl_valid_data: missing expected page at offset %lx", (long)offset);
+ }
+ if (m->busy) {
+ if (!m->absent)
+ panic("iopl_valid_data: busy page w/o absent");
+
+ if (m->pageq.next || m->pageq.prev)
+ panic("iopl_valid_data: busy+absent page on page queue");
+ if (m->reusable) {
+ panic("iopl_valid_data: %p is reusable", m);
+ }
+
+ m->absent = FALSE;
+ m->dirty = TRUE;
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(m->wire_count == 0);
+ m->wire_count++;
+ assert(m->wire_count);
+ if (m->wire_count == 1) {
+ m->vm_page_q_state = VM_PAGE_IS_WIRED;
+ wired_count++;
+ } else {
+ panic("iopl_valid_data: %p already wired\n", m);
+ }
+
+ PAGE_WAKEUP_DONE(m);
+ }
+ size -= PAGE_SIZE;
+ }
+ if (wired_count) {
+
+ if (!object->wired_page_count) {
+ VM_OBJECT_WIRED(object);
+ }
+ object->wired_page_count += wired_count;
+ assert(object->resident_page_count >= object->wired_page_count);
+
+ /* no need to adjust purgeable accounting for this object: */
+ assert(object->purgable != VM_PURGABLE_VOLATILE);
+ assert(object->purgable != VM_PURGABLE_EMPTY);
+
+ vm_page_lockspin_queues();
+ vm_page_wire_count += wired_count;
+ vm_page_unlock_queues();
+ }
+ vm_object_unlock(object);
+}
+
+vm_tag_t
+iopl_set_tag(
+ upl_t upl,
+ vm_tag_t tag)
+{
+ vm_object_t object;
+ vm_tag_t prior_tag;
- return upl_abort_range(upl, 0, upl->size, error, &empty);
-}
+ if (upl == NULL)
+ panic("%s: NULL upl", __FUNCTION__);
+ if (vector_upl_is_valid(upl))
+ panic("%s: vector upl", __FUNCTION__);
+ if (kernel_object == upl->map_object)
+ return (tag);
+ if ((upl->flags & (UPL_DEVICE_MEMORY|UPL_SHADOWED|UPL_ACCESS_BLOCKED|UPL_IO_WIRE|UPL_INTERNAL)) != UPL_IO_WIRE)
+ return (tag);
+ object = upl->map_object;
+ vm_object_lock(object);
-/* an option on commit should be wire */
-kern_return_t
-upl_commit(
- upl_t upl,
- upl_page_info_t *page_list,
- mach_msg_type_number_t count)
-{
- boolean_t empty;
+ prior_tag = object->wire_tag;
+ object->wire_tag = tag;
+ if (VM_KERN_MEMORY_NONE == prior_tag) prior_tag = tag;
+ vm_object_unlock(object);
- return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
+ return (prior_tag);
}
+
void
vm_object_set_pmap_cache_attr(
vm_object_t object,
}
}
+
+boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t);
+kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_object_offset_t *, int);
+
+
+
+boolean_t
+vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+ wpl_array_t lite_list, upl_control_flags_t cntrl_flags)
+{
+ vm_page_t dst_page;
+ vm_tag_t tag;
+ unsigned int entry;
+ int page_count;
+ int delayed_unlock = 0;
+ boolean_t retval = TRUE;
+ ppnum_t phys_page;
+
+ vm_object_lock_assert_exclusive(object);
+ assert(object->purgable != VM_PURGABLE_VOLATILE);
+ assert(object->purgable != VM_PURGABLE_EMPTY);
+ assert(object->pager == NULL);
+ assert(object->copy == NULL);
+ assert(object->shadow == NULL);
+
+ tag = UPL_MEMORY_TAG(cntrl_flags);
+ page_count = object->resident_page_count;
+ dst_page = (vm_page_t)vm_page_queue_first(&object->memq);
+
+ vm_page_lock_queues();
+
+ while (page_count--) {
+
+ if (dst_page->busy ||
+ dst_page->fictitious ||
+ dst_page->absent ||
+ dst_page->error ||
+ dst_page->cleaning ||
+ dst_page->restart ||
+ dst_page->encrypted ||
+ dst_page->laundry) {
+ retval = FALSE;
+ goto done;
+ }
+ if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
+ retval = FALSE;
+ goto done;
+ }
+ dst_page->reference = TRUE;
+
+ vm_page_wire(dst_page, tag, FALSE);
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ }
+ entry = (unsigned int)(dst_page->offset / PAGE_SIZE);
+ assert(entry >= 0 && entry < object->resident_page_count);
+ lite_list[entry>>5] |= 1 << (entry & 31);
+
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
+ if (phys_page > upl->highest_page)
+ upl->highest_page = phys_page;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = phys_page;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].free_when_done = dst_page->free_when_done;
+ user_page_list[entry].precious = dst_page->precious;
+ user_page_list[entry].device = FALSE;
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = FALSE;
+ user_page_list[entry].cs_tainted = FALSE;
+ user_page_list[entry].cs_nx = FALSE;
+ user_page_list[entry].needed = FALSE;
+ user_page_list[entry].mark = FALSE;
+ }
+ if (delayed_unlock++ > 256) {
+ delayed_unlock = 0;
+ lck_mtx_yield(&vm_page_queue_lock);
+
+ VM_CHECK_MEMORYSTATUS;
+ }
+ dst_page = (vm_page_t)vm_page_queue_next(&dst_page->listq);
+ }
+done:
+ vm_page_unlock_queues();
+
+ VM_CHECK_MEMORYSTATUS;
+
+ return (retval);
+}
+
+
+kern_return_t
+vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list,
+ wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_object_offset_t *dst_offset, int page_count)
+{
+ vm_page_t dst_page;
+ vm_tag_t tag;
+ boolean_t no_zero_fill = FALSE;
+ int interruptible;
+ int pages_wired = 0;
+ int pages_inserted = 0;
+ int entry = 0;
+ uint64_t delayed_ledger_update = 0;
+ kern_return_t ret = KERN_SUCCESS;
+ int grab_options;
+ ppnum_t phys_page;
+
+ vm_object_lock_assert_exclusive(object);
+ assert(object->purgable != VM_PURGABLE_VOLATILE);
+ assert(object->purgable != VM_PURGABLE_EMPTY);
+ assert(object->pager == NULL);
+ assert(object->copy == NULL);
+ assert(object->shadow == NULL);
+
+ if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
+ interruptible = THREAD_ABORTSAFE;
+ else
+ interruptible = THREAD_UNINT;
+
+ if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO))
+ no_zero_fill = TRUE;
+
+ tag = UPL_MEMORY_TAG(cntrl_flags);
+
+ grab_options = 0;
+#if CONFIG_SECLUDED_MEMORY
+ if (object->can_grab_secluded) {
+ grab_options |= VM_PAGE_GRAB_SECLUDED;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ while (page_count--) {
+
+ while ((dst_page = vm_page_grab_options(grab_options))
+ == VM_PAGE_NULL) {
+
+ OSAddAtomic(page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
+
+ if (vm_page_wait(interruptible) == FALSE) {
+ /*
+ * interrupted case
+ */
+ OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
+
+ ret = MACH_SEND_INTERRUPTED;
+ goto done;
+ }
+ OSAddAtomic(-page_count, &vm_upl_wait_for_pages);
+
+ VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
+ }
+ if (no_zero_fill == FALSE)
+ vm_page_zero_fill(dst_page);
+ else
+ dst_page->absent = TRUE;
+
+ dst_page->reference = TRUE;
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
+ SET_PAGE_DIRTY(dst_page, FALSE);
+ }
+ if (dst_page->absent == FALSE) {
+ assert(dst_page->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(dst_page->wire_count == 0);
+ dst_page->wire_count++;
+ dst_page->vm_page_q_state = VM_PAGE_IS_WIRED;
+ assert(dst_page->wire_count);
+ pages_wired++;
+ PAGE_WAKEUP_DONE(dst_page);
+ }
+ pages_inserted++;
+
+ vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update);
+
+ lite_list[entry>>5] |= 1 << (entry & 31);
+
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
+ if (phys_page > upl->highest_page)
+ upl->highest_page = phys_page;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = phys_page;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].free_when_done = FALSE;
+ user_page_list[entry].precious = FALSE;
+ user_page_list[entry].device = FALSE;
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = FALSE;
+ user_page_list[entry].cs_tainted = FALSE;
+ user_page_list[entry].cs_nx = FALSE;
+ user_page_list[entry].needed = FALSE;
+ user_page_list[entry].mark = FALSE;
+ }
+ entry++;
+ *dst_offset += PAGE_SIZE_64;
+ }
+done:
+ if (pages_wired) {
+ vm_page_lockspin_queues();
+ vm_page_wire_count += pages_wired;
+ vm_page_unlock_queues();
+ }
+ if (pages_inserted) {
+ if (object->internal) {
+ OSAddAtomic(pages_inserted, &vm_page_internal_count);
+ } else {
+ OSAddAtomic(pages_inserted, &vm_page_external_count);
+ }
+ }
+ if (delayed_ledger_update) {
+ task_t owner;
+
+ owner = object->vo_purgeable_owner;
+ assert(owner);
+
+ /* more non-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ delayed_ledger_update);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ delayed_ledger_update);
+ }
+ return (ret);
+}
+
+
unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
+
kern_return_t
vm_object_iopl_request(
vm_object_t object,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
- int cntrl_flags)
+ upl_control_flags_t cntrl_flags)
{
vm_page_t dst_page;
vm_object_offset_t dst_offset;
int dw_limit;
int dw_index;
boolean_t caller_lookup;
+ int io_tracking_flag = 0;
+ int interruptible;
+ ppnum_t phys_page;
+
+ boolean_t set_cache_attr_needed = FALSE;
+ boolean_t free_wired_pages = FALSE;
+ boolean_t fast_path_empty_req = FALSE;
+ boolean_t fast_path_full_req = FALSE;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
/*
else
prot = VM_PROT_READ | VM_PROT_WRITE;
- if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
- size = MAX_UPL_SIZE * PAGE_SIZE;
-
- if (cntrl_flags & UPL_SET_INTERNAL) {
- if (page_list_count != NULL)
- *page_list_count = MAX_UPL_SIZE;
- }
- if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
- ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
- return KERN_INVALID_ARGUMENT;
-
if ((!object->internal) && (object->paging_offset != 0))
panic("vm_object_iopl_request: external object with non-zero paging offset\n");
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((object->io_tracking && object != kernel_object) || upl_debug_enabled)
+ io_tracking_flag |= UPL_CREATE_IO_TRACKING;
+#endif
+
+#if CONFIG_IOSCHED
+ if (object->io_tracking) {
+ /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
+ if (object != kernel_object)
+ io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP;
+ }
+#endif
if (object->phys_contiguous)
psize = PAGE_SIZE;
psize = size;
if (cntrl_flags & UPL_SET_INTERNAL) {
- upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
lite_list = NULL;
}
} else {
- upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+ upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize);
lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
if (size == 0) {
upl->flags |= UPL_ACCESS_BLOCKED;
}
- if (object->phys_contiguous) {
-#if UPL_DEBUG
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (upl->flags & UPL_TRACKED_BY_OBJECT) {
vm_object_activity_begin(object);
queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
+ }
+#endif
+
+ if (object->phys_contiguous) {
if (upl->flags & UPL_ACCESS_BLOCKED) {
assert(!object->blocked_access);
/*
* Protect user space from future COW operations
*/
+#if VM_OBJECT_TRACKING_OP_TRUESHARE
+ if (!object->true_share &&
+ vm_object_tracking_inited) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int num = 0;
+
+ num = OSBacktrace(bt,
+ VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_TRUESHARE,
+ bt,
+ num);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
+
+ vm_object_lock_assert_exclusive(object);
object->true_share = TRUE;
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
-
-#if UPL_DEBUG
- vm_object_activity_begin(object);
- queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
object->copy != VM_OBJECT_NULL) {
iopl_cow_pages += size >> PAGE_SHIFT;
#endif
}
+ if (!(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS)) &&
+ object->purgable != VM_PURGABLE_VOLATILE &&
+ object->purgable != VM_PURGABLE_EMPTY &&
+ object->copy == NULL &&
+ size == object->vo_size &&
+ offset == 0 &&
+ object->shadow == NULL &&
+ object->pager == NULL)
+ {
+ if (object->resident_page_count == size_in_pages)
+ {
+ assert(object != compressor_object);
+ assert(object != kernel_object);
+ fast_path_full_req = TRUE;
+ }
+ else if (object->resident_page_count == 0)
+ {
+ assert(object != compressor_object);
+ assert(object != kernel_object);
+ fast_path_empty_req = TRUE;
+ set_cache_attr_needed = TRUE;
+ }
+ }
+ if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
+ interruptible = THREAD_ABORTSAFE;
+ else
+ interruptible = THREAD_UNINT;
entry = 0;
xfer_size = size;
dst_offset = offset;
+ dw_count = 0;
+
+ if (fast_path_full_req) {
+
+ if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags) == TRUE)
+ goto finish;
+ /*
+ * we couldn't complete the processing of this request on the fast path
+ * so fall through to the slow path and finish up
+ */
+
+ } else if (fast_path_empty_req) {
+
+ if (cntrl_flags & UPL_REQUEST_NO_FAULT) {
+ ret = KERN_MEMORY_ERROR;
+ goto return_err;
+ }
+ ret = vm_object_iopl_wire_empty(object, upl, user_page_list, lite_list, cntrl_flags, &dst_offset, size_in_pages);
+
+ if (ret) {
+ free_wired_pages = TRUE;
+ goto return_err;
+ }
+ goto finish;
+ }
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
fault_info.stealth = FALSE;
fault_info.io_sync = FALSE;
fault_info.cs_bypass = FALSE;
- fault_info.mark_zf_absent = (0 == (cntrl_flags & UPL_NOZEROFILLIO));
+ fault_info.mark_zf_absent = TRUE;
+ fault_info.interruptible = interruptible;
+ fault_info.batch_pmap_op = TRUE;
dwp = &dw_array[0];
- dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
while (xfer_size) {
vm_fault_return_t result;
- unsigned int pg_num;
dwp->dw_mask = 0;
+ if (fast_path_full_req) {
+ /*
+ * if we get here, it means that we ran into a page
+ * state we couldn't handle in the fast path and
+ * bailed out to the slow path... since the order
+ * we look at pages is different between the 2 paths,
+ * the following check is needed to determine whether
+ * this page was already processed in the fast path
+ */
+ if (lite_list[entry>>5] & (1 << (entry & 31)))
+ goto skip_page;
+ }
dst_page = vm_page_lookup(object, dst_offset);
/*
ret = KERN_MEMORY_ERROR;
goto return_err;
}
+ set_cache_attr_needed = TRUE;
/*
* We just looked up the page and the result remains valid
do {
vm_page_t top_page;
kern_return_t error_code;
- int interruptible;
- if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
- interruptible = THREAD_ABORTSAFE;
- else
- interruptible = THREAD_UNINT;
-
- fault_info.interruptible = interruptible;
fault_info.cluster_size = xfer_size;
- fault_info.batch_pmap_op = TRUE;
vm_object_paging_begin(object);
if (top_page != VM_PAGE_NULL) {
vm_object_t local_object;
- local_object = top_page->object;
-
- if (top_page->object != dst_page->object) {
+ local_object = VM_PAGE_OBJECT(top_page);
+
+ /*
+ * comparing 2 packed pointers
+ */
+ if (top_page->vm_page_object != dst_page->vm_page_object) {
vm_object_lock(local_object);
VM_PAGE_FREE(top_page);
vm_object_paging_end(local_object);
break;
case VM_FAULT_MEMORY_SHORTAGE:
- OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages);
+ OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0);
if (vm_page_wait(interruptible)) {
- OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+ OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0);
vm_object_lock(object);
break;
}
- OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages);
+ OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages);
VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1);
} while (result != VM_FAULT_SUCCESS);
}
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
+
if (upl->flags & UPL_KERNEL_OBJECT)
goto record_phys_addr;
- if (dst_page->compressor) {
+ if (dst_page->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
dst_page->busy = TRUE;
goto record_phys_addr;
}
PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
- if (dst_page->laundry) {
- dst_page->pageout = FALSE;
-
+ if (dst_page->laundry)
vm_pageout_steal_laundry(dst_page, FALSE);
- }
+
if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
- dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
+ phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
vm_page_t low_page;
int refmod;
* to find the new page being substituted.
*/
if (dst_page->pmapped)
- refmod = pmap_disconnect(dst_page->phys_page);
+ refmod = pmap_disconnect(phys_page);
else
refmod = 0;
*/
if ( !dst_page->absent)
dst_page->busy = FALSE;
+
+ phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page);
}
if ( !dst_page->busy)
dwp->dw_mask |= DW_vm_page_wire;
if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
SET_PAGE_DIRTY(dst_page, TRUE);
}
+ if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
+ pmap_sync_page_attributes_phys(phys_page);
+ dst_page->written_by_kernel = FALSE;
+ }
+
record_phys_addr:
if (dst_page->busy)
upl->flags |= UPL_HAS_BUSY;
- pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
- assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
- lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+ lite_list[entry>>5] |= 1 << (entry & 31);
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
+ if (phys_page > upl->highest_page)
+ upl->highest_page = phys_page;
if (user_page_list) {
- user_page_list[entry].phys_addr = dst_page->phys_page;
- user_page_list[entry].pageout = dst_page->pageout;
+ user_page_list[entry].phys_addr = phys_page;
+ user_page_list[entry].free_when_done = dst_page->free_when_done;
user_page_list[entry].absent = dst_page->absent;
user_page_list[entry].dirty = dst_page->dirty;
user_page_list[entry].precious = dst_page->precious;
user_page_list[entry].device = FALSE;
user_page_list[entry].needed = FALSE;
if (dst_page->clustered == TRUE)
- user_page_list[entry].speculative = dst_page->speculative;
+ user_page_list[entry].speculative = (dst_page->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE;
else
user_page_list[entry].speculative = FALSE;
user_page_list[entry].cs_validated = dst_page->cs_validated;
user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+ user_page_list[entry].cs_nx = dst_page->cs_nx;
+ user_page_list[entry].mark = FALSE;
}
if (object != kernel_object && object != compressor_object) {
/*
* update clustered and speculative state
*
*/
- VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ if (dst_page->clustered)
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
}
+skip_page:
entry++;
dst_offset += PAGE_SIZE_64;
xfer_size -= PAGE_SIZE;
VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
}
}
- if (dw_count)
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ assert(entry == size_in_pages);
- vm_object_set_pmap_cache_attr(object, user_page_list, entry, TRUE);
+ if (dw_count)
+ vm_page_do_delayed_work(object, UPL_MEMORY_TAG(cntrl_flags), &dw_array[0], dw_count);
+finish:
+ if (user_page_list && set_cache_attr_needed == TRUE)
+ vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE);
if (page_list_count != NULL) {
if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
- else if (*page_list_count > entry)
- *page_list_count = entry;
+ else if (*page_list_count > size_in_pages)
+ *page_list_count = size_in_pages;
}
vm_object_unlock(object);
assert(!object->blocked_access);
object->blocked_access = TRUE;
}
+
return KERN_SUCCESS;
return_err:
}
vm_page_lock_queues();
- if (dst_page->absent) {
+ if (dst_page->absent || free_wired_pages == TRUE) {
vm_page_free(dst_page);
need_unwire = FALSE;
* Make each UPL point to the correct VM object, i.e. the
* object holding the pages that the UPL refers to...
*/
-#if UPL_DEBUG
- queue_remove(&object1->uplq, upl1, upl_t, uplq);
- queue_remove(&object2->uplq, upl2, upl_t, uplq);
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
+ vm_object_lock(object1);
+ vm_object_lock(object2);
+ }
+ if (upl1->flags & UPL_TRACKED_BY_OBJECT)
+ queue_remove(&object1->uplq, upl1, upl_t, uplq);
+ if (upl2->flags & UPL_TRACKED_BY_OBJECT)
+ queue_remove(&object2->uplq, upl2, upl_t, uplq);
#endif
upl1->map_object = object2;
upl2->map_object = object1;
-#if UPL_DEBUG
- queue_enter(&object1->uplq, upl2, upl_t, uplq);
- queue_enter(&object2->uplq, upl1, upl_t, uplq);
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (upl1->flags & UPL_TRACKED_BY_OBJECT)
+ queue_enter(&object2->uplq, upl1, upl_t, uplq);
+ if (upl2->flags & UPL_TRACKED_BY_OBJECT)
+ queue_enter(&object1->uplq, upl2, upl_t, uplq);
+ if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) {
+ vm_object_unlock(object2);
+ vm_object_unlock(object1);
+ }
#endif
}
if (kr != KERN_SUCCESS) {
panic("vm_paging_map_init: kernel_map full\n");
}
- map_entry->object.vm_object = kernel_object;
- map_entry->offset = page_map_offset;
+ VME_OBJECT_SET(map_entry, kernel_object);
+ VME_OFFSET_SET(map_entry, page_map_offset);
map_entry->protection = VM_PROT_NONE;
map_entry->max_protection = VM_PROT_NONE;
map_entry->permanent = TRUE;
/* use permanent 1-to-1 kernel mapping of physical memory ? */
#if __x86_64__
*address = (vm_map_offset_t)
- PHYSMAP_PTOV((pmap_paddr_t)page->phys_page <<
+ PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) <<
PAGE_SHIFT);
*need_unmap = FALSE;
return KERN_SUCCESS;
*/
vm_paging_page_waiter_total++;
vm_paging_page_waiter++;
- thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
- &vm_paging_lock,
- THREAD_UNINT);
+ kr = assert_wait((event_t)&vm_paging_page_waiter, THREAD_UNINT);
+ if (kr == THREAD_WAITING) {
+ simple_unlock(&vm_paging_lock);
+ kr = thread_block(THREAD_CONTINUE_NULL);
+ simple_lock(&vm_paging_lock);
+ }
vm_paging_page_waiter--;
/* ... and try again */
}
}
page->pmapped = TRUE;
- //assert(pmap_verify_free(page->phys_page));
+ //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
PMAP_ENTER(kernel_pmap,
*address + page_map_offset,
page,
}
}
-#if CRYPTO
+#if ENCRYPTED_SWAP
/*
* Encryption data.
* "iv" is the "initial vector". Ideally, we want to
vm_map_size_t kernel_mapping_size;
boolean_t kernel_mapping_needs_unmap;
vm_offset_t kernel_vaddr;
+ vm_object_t page_object;
union {
unsigned char aes_iv[AES_BLOCK_SIZE];
struct {
ASSERT_PAGE_DECRYPTED(page);
+ page_object = VM_PAGE_OBJECT(page);
+
/*
* Take a paging-in-progress reference to keep the object
* alive even if we have to unlock it (in vm_paging_map_object()
* for example)...
*/
- vm_object_paging_begin(page->object);
+ vm_object_paging_begin(page_object);
if (kernel_mapping_offset == 0) {
/*
kernel_mapping_size = PAGE_SIZE;
kernel_mapping_needs_unmap = FALSE;
kr = vm_paging_map_object(page,
- page->object,
+ page_object,
page->offset,
VM_PROT_READ | VM_PROT_WRITE,
FALSE,
* use to break the key.
*/
bzero(&encrypt_iv.aes_iv[0], sizeof (encrypt_iv.aes_iv));
- encrypt_iv.vm.pager_object = page->object->pager;
+ encrypt_iv.vm.pager_object = page_object->pager;
encrypt_iv.vm.paging_offset =
- page->object->paging_offset + page->offset;
+ page_object->paging_offset + page->offset;
/* encrypt the "initial vector" */
aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
* the caller undo the mapping if needed.
*/
if (kernel_mapping_needs_unmap) {
- vm_paging_unmap_object(page->object,
+ vm_paging_unmap_object(page_object,
kernel_mapping_offset,
kernel_mapping_offset + kernel_mapping_size);
}
* The software bits will be reset later after the I/O
* has completed (in upl_commit_range()).
*/
- pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
+ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(page), VM_MEM_REFERENCED | VM_MEM_MODIFIED);
page->encrypted = TRUE;
- vm_object_paging_end(page->object);
+ vm_object_paging_end(page_object);
}
/*
vm_map_size_t kernel_mapping_size;
vm_offset_t kernel_vaddr;
boolean_t kernel_mapping_needs_unmap;
+ vm_object_t page_object;
union {
unsigned char aes_iv[AES_BLOCK_SIZE];
struct {
assert(page->busy);
assert(page->encrypted);
+ page_object = VM_PAGE_OBJECT(page);
was_dirty = page->dirty;
/*
* alive even if we have to unlock it (in vm_paging_map_object()
* for example)...
*/
- vm_object_paging_begin(page->object);
+ vm_object_paging_begin(page_object);
if (kernel_mapping_offset == 0) {
/*
kernel_mapping_size = PAGE_SIZE;
kernel_mapping_needs_unmap = FALSE;
kr = vm_paging_map_object(page,
- page->object,
+ page_object,
page->offset,
VM_PROT_READ | VM_PROT_WRITE,
FALSE,
* used to encrypt that page.
*/
bzero(&decrypt_iv.aes_iv[0], sizeof (decrypt_iv.aes_iv));
- decrypt_iv.vm.pager_object = page->object->pager;
+ decrypt_iv.vm.pager_object = page_object->pager;
decrypt_iv.vm.paging_offset =
- page->object->paging_offset + page->offset;
+ page_object->paging_offset + page->offset;
/* encrypt the "initial vector" */
aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
* the caller undo the mapping if needed.
*/
if (kernel_mapping_needs_unmap) {
- vm_paging_unmap_object(page->object,
+ vm_paging_unmap_object(page_object,
kernel_vaddr,
kernel_vaddr + PAGE_SIZE);
}
*/
page->dirty = FALSE;
assert (page->cs_validated == FALSE);
- pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(page), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
}
page->encrypted = FALSE;
* be part of a DMA transfer from a driver that expects the memory to
* be coherent at this point, we have to flush the data cache.
*/
- pmap_sync_page_attributes_phys(page->phys_page);
+ pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(page));
/*
* Since the page is not mapped yet, some code might assume that it
* doesn't need to invalidate the instruction cache when writing to
* that page. That code relies on "pmapped" being FALSE, so that the
* caches get synchronized when the page is first mapped.
*/
- assert(pmap_verify_free(page->phys_page));
+ assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
page->pmapped = FALSE;
page->wpmapped = FALSE;
- vm_object_paging_end(page->object);
+ vm_object_paging_end(page_object);
}
#if DEVELOPMENT || DEBUG
* encryption completes, any access will cause a
* page fault and the page gets decrypted at that time.
*/
- pmap_disconnect(page->phys_page);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
vm_page_encrypt(page, 0);
if (vm_object_lock_avoid(shadow_object)) {
goto process_upl_to_encrypt;
}
-#else /* CRYPTO */
+#else /* ENCRYPTED_SWAP */
void
upl_encrypt(
__unused upl_t upl,
{
}
-#endif /* CRYPTO */
+#endif /* ENCRYPTED_SWAP */
/*
* page->object must be locked
vm_page_lockspin_queues();
}
+ page->free_when_done = FALSE;
/*
* need to drop the laundry count...
* we may also need to remove it
upl_unlock(upl);
}
+#if CONFIG_IOSCHED
+void
+upl_set_blkno(
+ upl_t upl,
+ vm_offset_t upl_offset,
+ int io_size,
+ int64_t blkno)
+{
+ int i,j;
+ if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0)
+ return;
+
+ assert(upl->upl_reprio_info != 0);
+ for(i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) {
+ UPL_SET_REPRIO_INFO(upl, i, blkno, io_size);
+ }
+}
+#endif
+
boolean_t
vm_page_is_slideable(vm_page_t m)
{
boolean_t result = FALSE;
vm_shared_region_slide_info_t si;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(m);
- vm_object_lock_assert_held(m->object);
+ vm_object_lock_assert_held(m_object);
/* make sure our page belongs to the one object allowed to do this */
- if (!m->object->object_slid) {
+ if (!m_object->object_slid) {
goto done;
}
- si = m->object->vo_slide_info;
+ si = m_object->vo_slide_info;
if (si == NULL) {
goto done;
}
vm_map_size_t kernel_mapping_size;
boolean_t kernel_mapping_needs_unmap;
vm_offset_t kernel_vaddr;
- uint32_t pageIndex = 0;
+ uint32_t pageIndex;
+ uint32_t slide_chunk;
+ vm_object_t page_object;
+
+ page_object = VM_PAGE_OBJECT(page);
assert(!page->slid);
- assert(page->object->object_slid);
- vm_object_lock_assert_exclusive(page->object);
+ assert(page_object->object_slid);
+ vm_object_lock_assert_exclusive(page_object);
if (page->error)
return KERN_FAILURE;
* alive even if we have to unlock it (in vm_paging_map_object()
* for example)...
*/
- vm_object_paging_begin(page->object);
+ vm_object_paging_begin(page_object);
if (kernel_mapping_offset == 0) {
/*
kernel_mapping_size = PAGE_SIZE;
kernel_mapping_needs_unmap = FALSE;
kr = vm_paging_map_object(page,
- page->object,
+ page_object,
page->offset,
VM_PROT_READ | VM_PROT_WRITE,
FALSE,
/*assert that slide_file_info.start/end are page-aligned?*/
assert(!page->slid);
- assert(page->object->object_slid);
+ assert(page_object->object_slid);
+
+ pageIndex = (uint32_t)((page->offset -
+ page_object->vo_slide_info->start) /
+ PAGE_SIZE_FOR_SR_SLIDE);
+ for (slide_chunk = 0;
+ slide_chunk < PAGE_SIZE / PAGE_SIZE_FOR_SR_SLIDE;
+ slide_chunk++) {
+ kr = vm_shared_region_slide_page(page_object->vo_slide_info,
+ (kernel_vaddr +
+ (slide_chunk *
+ PAGE_SIZE_FOR_SR_SLIDE)),
+ (pageIndex + slide_chunk));
+ if (kr != KERN_SUCCESS) {
+ break;
+ }
+ }
- pageIndex = (uint32_t)((page->offset - page->object->vo_slide_info->start)/PAGE_SIZE);
- kr = vm_shared_region_slide_page(page->object->vo_slide_info, kernel_vaddr, pageIndex);
vm_page_slide_counter++;
/*
* Unmap the page from the kernel's address space,
*/
if (kernel_mapping_needs_unmap) {
- vm_paging_unmap_object(page->object,
+ vm_paging_unmap_object(page_object,
kernel_vaddr,
kernel_vaddr + PAGE_SIZE);
}
page->dirty = FALSE;
- pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
+ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(page), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
if (kr != KERN_SUCCESS || cs_debug > 1) {
printf("vm_page_slide(%p): "
"obj %p off 0x%llx mobj %p moff 0x%llx\n",
page,
- page->object, page->offset,
- page->object->pager,
- page->offset + page->object->paging_offset);
+ page_object, page->offset,
+ page_object->pager,
+ page->offset + page_object->paging_offset);
}
if (kr == KERN_SUCCESS) {
vm_page_slide_errors++;
}
- vm_object_paging_end(page->object);
+ vm_object_paging_end(page_object);
return kr;
}
return(UPL_PHYS_PAGE(upl, index));
}
+void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v)
+{
+ upl[index].mark = v;
+}
+
+boolean_t upl_page_get_mark(upl_page_info_t *upl, int index)
+{
+ return upl[index].mark;
+}
void
vm_countdirtypages(void)
precpages=0;
vm_page_lock_queues();
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
- if(m->pageout) pgopages++;
+ if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
- assert(m->object != kernel_object);
- m = (vm_page_t) queue_next(&m->pageq);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
- } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
+ } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
- m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
do {
if (m ==(vm_page_t )0) break;
dpages++;
assert(m->dirty);
- assert(!m->pageout);
- assert(m->object != kernel_object);
- m = (vm_page_t) queue_next(&m->pageq);
+ assert(!m->free_when_done);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
- } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
+ } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
vm_page_lock_queues();
- m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
do {
if (m ==(vm_page_t )0) break;
if(m->dirty) dpages++;
- if(m->pageout) pgopages++;
+ if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
- assert(m->object != kernel_object);
- m = (vm_page_t) queue_next(&m->pageq);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ m = (vm_page_t) vm_page_queue_next(&m->pageq);
if (m ==(vm_page_t )0) break;
- } while (!queue_end(&vm_page_queue_anonymous,(queue_entry_t) m));
+ } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
precpages=0;
vm_page_lock_queues();
- m = (vm_page_t) queue_first(&vm_page_queue_active);
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
do {
if(m == (vm_page_t )0) break;
if(m->dirty) dpages++;
- if(m->pageout) pgopages++;
+ if(m->free_when_done) pgopages++;
if(m->precious) precpages++;
- assert(m->object != kernel_object);
- m = (vm_page_t) queue_next(&m->pageq);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ m = (vm_page_t) vm_page_queue_next(&m->pageq);
if(m == (vm_page_t )0) break;
- } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m));
+ } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m));
vm_page_unlock_queues();
printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages);
return upl->size;
}
+upl_t upl_associated_upl(upl_t upl)
+{
+ return upl->associated_upl;
+}
+
+void upl_set_associated_upl(upl_t upl, upl_t associated_upl)
+{
+ upl->associated_upl = associated_upl;
+}
+
+struct vnode * upl_lookup_vnode(upl_t upl)
+{
+ if (!upl->map_object->internal)
+ return vnode_pager_lookup_vnode(upl->map_object->pager);
+ else
+ return NULL;
+}
+
#if UPL_DEBUG
kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
{
return KERN_SUCCESS;
}
#endif /* UPL_DEBUG */
+
+#if VM_PRESSURE_EVENTS
+/*
+ * Upward trajectory.
+ */
+extern boolean_t vm_compressor_low_on_space(void);
+
+boolean_t
+VM_PRESSURE_NORMAL_TO_WARNING(void) {
+
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+
+ /* Available pages below our threshold */
+ if (memorystatus_available_pages < memorystatus_available_pages_pressure) {
+ /* No frozen processes to kill */
+ if (memorystatus_frozen_count == 0) {
+ /* Not enough suspended processes available. */
+ if (memorystatus_suspended_count < MEMORYSTATUS_SUSPENDED_THRESHOLD) {
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+
+ } else {
+ return ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0);
+ }
+}
+
+boolean_t
+VM_PRESSURE_WARNING_TO_CRITICAL(void) {
+
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+
+ /* Available pages below our threshold */
+ if (memorystatus_available_pages < memorystatus_available_pages_critical) {
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ return (vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0);
+ }
+}
+
+/*
+ * Downward trajectory.
+ */
+boolean_t
+VM_PRESSURE_WARNING_TO_NORMAL(void) {
+
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+
+ /* Available pages above our threshold */
+ unsigned int target_threshold = memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100);
+ if (memorystatus_available_pages > target_threshold) {
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0);
+ }
+}
+
+boolean_t
+VM_PRESSURE_CRITICAL_TO_WARNING(void) {
+
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+
+ /* Available pages above our threshold */
+ unsigned int target_threshold = memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100);
+ if (memorystatus_available_pages > target_threshold) {
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0);
+ }
+}
+#endif /* VM_PRESSURE_EVENTS */
+