#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <kern/xpr.h>
+#include <kern/ledger.h>
#include <vm/pmap.h>
#include <vm/vm_init.h>
#include <vm/vm_map.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_compressor.h>
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
+
#include <IOKit/IOHibernatePrivate.h>
#include <sys/kdebug.h>
* or VP, table.]
*/
typedef struct {
- vm_page_t pages;
+ vm_page_packed_t page_list;
#if MACH_PAGE_HASH_STATS
int cur_count; /* current count */
int hi_count; /* high water mark */
unsigned int vm_colors;
unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
+unsigned int vm_free_magazine_refill_limit = 0;
queue_head_t vm_page_queue_free[MAX_COLORS];
unsigned int vm_page_free_wanted;
unsigned int vm_page_free_wanted_privileged;
unsigned int vm_page_free_count;
unsigned int vm_page_fictitious_count;
-unsigned int vm_page_free_count_minimum; /* debugging */
-
/*
* Occasionally, the virtual memory system uses
* resident page structures that do not refer to
unsigned int vm_page_wire_count;
unsigned int vm_page_wire_count_initial;
unsigned int vm_page_gobble_count = 0;
-unsigned int vm_page_wire_count_warning = 0;
-unsigned int vm_page_gobble_count_warning = 0;
+
+#define VM_PAGE_WIRE_COUNT_WARNING 0
+#define VM_PAGE_GOBBLE_COUNT_WARNING 0
unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
uint64_t vm_page_purged_count = 0; /* total count of purged pages */
+unsigned int vm_page_xpmapped_external_count = 0;
unsigned int vm_page_external_count = 0;
unsigned int vm_page_internal_count = 0;
unsigned int vm_page_pageable_external_count = 0;
unsigned int vm_page_free_target = 0;
unsigned int vm_page_free_min = 0;
unsigned int vm_page_throttle_limit = 0;
-uint32_t vm_page_creation_throttle = 0;
unsigned int vm_page_inactive_target = 0;
unsigned int vm_page_anonymous_min = 0;
unsigned int vm_page_inactive_min = 0;
void
vm_set_page_size(void)
{
- page_mask = page_size - 1;
+ page_size = PAGE_SIZE;
+ page_mask = PAGE_MASK;
+ page_shift = PAGE_SHIFT;
if ((page_mask & page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
break;
}
+#define COLOR_GROUPS_TO_STEAL 4
+
/* Called once during statup, once the cache geometry is known.
*/
vm_colors = n;
vm_color_mask = n - 1;
+
+ vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
}
m->pageq.prev = NULL;
m->listq.next = NULL;
m->listq.prev = NULL;
- m->next = VM_PAGE_NULL;
+ m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
m->object = VM_OBJECT_NULL; /* reset later */
m->offset = (vm_object_offset_t) -1; /* reset later */
m->no_cache = FALSE;
m->reusable = FALSE;
m->slid = FALSE;
- m->was_dirty = FALSE;
m->xpmapped = FALSE;
m->compressor = FALSE;
m->written_by_kernel = FALSE;
purgeable_queues[i].debug_count_objects = 0;
#endif
};
+ purgeable_nonvolatile_count = 0;
+ queue_init(&purgeable_nonvolatile_queue);
for (i = 0; i < MAX_COLORS; i++ )
queue_init(&vm_page_queue_free[i]);
/*
* Steal memory for the map and zone subsystems.
*/
+ kernel_debug_string("zone_steal_memory");
zone_steal_memory();
+ kernel_debug_string("vm_map_steal_memory");
vm_map_steal_memory();
/*
#endif /* VM_PAGE_FAKE_BUCKETS */
#endif /* VM_PAGE_BUCKETS_CHECK */
+ kernel_debug_string("vm_page_buckets");
vm_page_buckets = (vm_page_bucket_t *)
pmap_steal_memory(vm_page_bucket_count *
sizeof(vm_page_bucket_t));
+ kernel_debug_string("vm_page_bucket_locks");
vm_page_bucket_locks = (lck_spin_t *)
pmap_steal_memory(vm_page_bucket_lock_count *
sizeof(lck_spin_t));
for (i = 0; i < vm_page_bucket_count; i++) {
register vm_page_bucket_t *bucket = &vm_page_buckets[i];
- bucket->pages = VM_PAGE_NULL;
+ bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
#if MACH_PAGE_HASH_STATS
bucket->cur_count = 0;
bucket->hi_count = 0;
* to get the alignment right.
*/
+ kernel_debug_string("pmap_startup");
pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */
vm_page_wire_count_initial = vm_page_wire_count;
- vm_page_free_count_minimum = vm_page_free_count;
printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
vm_page_free_count, vm_page_wire_count);
+ kernel_debug_string("vm_page_bootstrap complete");
simple_lock_init(&vm_paging_lock, 0);
}
return (void *) addr;
}
+void vm_page_release_startup(vm_page_t mem);
void
pmap_startup(
vm_offset_t *startp,
ppnum_t phys_page;
addr64_t tmpaddr;
+
+#if defined(__LP64__)
+ /*
+ * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use
+ */
+ assert(sizeof(struct vm_page) == 64);
+
+ /*
+ * make sure we are aligned on a 64 byte boundary
+ * for VM_PAGE_PACK_PTR (it clips off the low-order
+ * 6 bits of the pointer)
+ */
+ if (virtual_space_start != virtual_space_end)
+ virtual_space_start = round_page(virtual_space_start);
+#endif
+
/*
* We calculate how many page frames we will have
* and then allocate the page structures in one chunk.
/*
* Initialize the page frames.
*/
+ kernel_debug_string("Initialize the page frames");
for (i = 0, pages_initialized = 0; i < npages; i++) {
if (!pmap_next_page(&phys_page))
break;
}
vm_pages_count = pages_initialized;
+#if defined(__LP64__)
+
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+ kernel_debug_string("page fill/release");
/*
* Check if we want to initialize pages to a known value
*/
// free low -> high so high is preferred
for (i = 1; i <= pages_initialized; i++) {
if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ vm_page_release_startup(&vm_pages[i - 1]);
}
}
else
*/
for (i = pages_initialized; i > 0; i--) {
if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ vm_page_release_startup(&vm_pages[i - 1]);
}
+ VM_CHECK_MEMORYSTATUS;
+
#if 0
{
vm_page_t xx, xxo, xxl;
boolean_t insert_in_hash,
boolean_t batch_pmap_op)
{
- vm_page_bucket_t *bucket;
- lck_spin_t *bucket_lock;
- int hash_id;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+ int hash_id;
+ task_t owner;
XPR(XPR_VM_PAGE,
"vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
assert(page_aligned(offset));
- if (object == vm_submap_object) {
- /* the vm_submap_object is only a placeholder for submaps */
- panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
- }
+ /* the vm_submap_object is only a placeholder for submaps */
+ assert(object != vm_submap_object);
vm_object_lock_assert_exclusive(object);
#if DEBUG
lck_spin_lock(bucket_lock);
- mem->next = bucket->pages;
- bucket->pages = mem;
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+ assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list));
+
#if MACH_PAGE_HASH_STATS
if (++bucket->cur_count > bucket->hi_count)
bucket->hi_count = bucket->cur_count;
OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
}
+ if (object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+ /* more non-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+
+ } else if (owner &&
+ (object->purgable == VM_PURGABLE_VOLATILE ||
+ object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
+
if (object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
- OSAddAtomic(1, &vm_page_purgeable_wired_count);
+ OSAddAtomic(+1, &vm_page_purgeable_wired_count);
} else {
- OSAddAtomic(1, &vm_page_purgeable_count);
+ OSAddAtomic(+1, &vm_page_purgeable_count);
}
} else if (object->purgable == VM_PURGABLE_EMPTY &&
mem->throttled) {
if (queues_lock_held == FALSE)
vm_page_unlock_queues();
}
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+ if (vm_object_tracking_inited &&
+ object->internal &&
+ object->resident_page_count == 0 &&
+ object->pager == NULL &&
+ object->shadow != NULL &&
+ object->shadow->copy == object) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int numsaved = 0;
+
+ numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_MODIFIED,
+ bt,
+ numsaved);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
}
/*
lck_spin_lock(bucket_lock);
- if (bucket->pages) {
- vm_page_t *mp = &bucket->pages;
- vm_page_t m = *mp;
+ if (bucket->page_list) {
+ vm_page_packed_t *mp = &bucket->page_list;
+ vm_page_t m = VM_PAGE_UNPACK_PTR(*mp);
do {
if (m->object == object && m->offset == offset) {
/*
* Remove old page from hash list
*/
- *mp = m->next;
+ *mp = m->next_m;
m->hashed = FALSE;
found_m = m;
break;
}
- mp = &m->next;
- } while ((m = *mp));
+ mp = &m->next_m;
+ } while ((m = VM_PAGE_UNPACK_PTR(*mp)));
- mem->next = bucket->pages;
+ mem->next_m = bucket->page_list;
} else {
- mem->next = VM_PAGE_NULL;
+ mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
}
/*
* insert new page at head of hash list
*/
- bucket->pages = mem;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
mem->hashed = TRUE;
lck_spin_unlock(bucket_lock);
vm_page_t this;
lck_spin_t *bucket_lock;
int hash_id;
+ task_t owner;
XPR(XPR_VM_PAGE,
"vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
lck_spin_lock(bucket_lock);
- if ((this = bucket->pages) == mem) {
+ if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) {
/* optimize for common case */
- bucket->pages = mem->next;
+ bucket->page_list = mem->next_m;
} else {
- vm_page_t *prev;
+ vm_page_packed_t *prev;
- for (prev = &this->next;
- (this = *prev) != mem;
- prev = &this->next)
+ for (prev = &this->next_m;
+ (this = VM_PAGE_UNPACK_PTR(*prev)) != mem;
+ prev = &this->next_m)
continue;
- *prev = this->next;
+ *prev = this->next_m;
}
#if MACH_PAGE_HASH_STATS
bucket->cur_count--;
mem->object->resident_page_count--;
if (mem->object->internal) {
+#if DEBUG
assert(vm_page_internal_count);
+#endif /* DEBUG */
+
OSAddAtomic(-1, &vm_page_internal_count);
} else {
assert(vm_page_external_count);
OSAddAtomic(-1, &vm_page_external_count);
+
+ if (mem->xpmapped) {
+ assert(vm_page_xpmapped_external_count);
+ OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+ }
}
if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
if (mem->object->resident_page_count == 0)
vm_page_stats_reusable.reused_remove++;
}
+ if (mem->object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = mem->object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+ /* less non-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ } else if (owner &&
+ (mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
assert(vm_page_purgeable_wired_count > 0);
* at outside the scope of the hash bucket lock... this is a
* really cheap optimiztion to avoid taking the lock
*/
- if (bucket->pages == VM_PAGE_NULL) {
+ if (!bucket->page_list) {
vm_page_lookup_bucket_NULL++;
return (VM_PAGE_NULL);
lck_spin_lock(bucket_lock);
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
+ for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) {
#if 0
/*
* we don't hold the page queue lock
* this is an interface to support bring-up of drivers
* on platforms with physical memory > 4G...
*/
-int vm_himemory_mode = 0;
+int vm_himemory_mode = 2;
/*
* request from the per-cpu queue.
*/
-#define COLOR_GROUPS_TO_STEAL 4
-
vm_page_t
vm_page_grab( void )
return_page_from_cpu_list:
PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
- mem->pageq.next = NULL;
enable_preemption();
+ mem->pageq.next = NULL;
assert(mem->listq.next == NULL && mem->listq.prev == NULL);
assert(mem->tabled == FALSE);
* Optionally produce warnings if the wire or gobble
* counts exceed some threshold.
*/
- if (vm_page_wire_count_warning > 0
- && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+ if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high wired page count of %d\n",
vm_page_wire_count);
- assert(vm_page_wire_count < vm_page_wire_count_warning);
}
- if (vm_page_gobble_count_warning > 0
- && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+ if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high gobbled page count of %d\n",
vm_page_gobble_count);
- assert(vm_page_gobble_count < vm_page_gobble_count_warning);
}
-
+#endif
lck_mtx_lock_spin(&vm_page_queue_free_lock);
/*
if (vm_page_free_count <= vm_page_free_reserved)
pages_to_steal = 1;
else {
- pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-
- if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+ if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+ pages_to_steal = vm_free_magazine_refill_limit;
+ else
pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
}
color = PROCESSOR_DATA(current_processor(), start_color);
head = tail = NULL;
+ vm_page_free_count -= pages_to_steal;
+
while (pages_to_steal--) {
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
while (queue_empty(&vm_page_queue_free[color]))
color = (color + 1) & vm_color_mask;
tail->pageq.next = (queue_t)mem;
tail = mem;
- mem->pageq.prev = NULL;
assert(mem->listq.next == NULL && mem->listq.prev == NULL);
assert(mem->tabled == FALSE);
assert(mem->object == VM_OBJECT_NULL);
assert(!mem->wpmapped);
assert(!pmap_is_noencrypt(mem->phys_page));
}
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
PROCESSOR_DATA(current_processor(), start_color) = color;
mem = head;
mem->pageq.next = NULL;
- lck_mtx_unlock(&vm_page_queue_free_lock);
-
enable_preemption();
}
/*
VM_CHECK_MEMORYSTATUS;
}
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+ register vm_page_t mem)
+{
+ queue_t queue_free;
+
+ if (vm_lopage_free_count < vm_lopage_free_limit &&
+ mem->phys_page < max_valid_low_ppnum) {
+ mem->lopage = TRUE;
+ vm_lopage_free_count++;
+ queue_free = &vm_lopage_queue_free;
+ } else {
+ mem->lopage = FALSE;
+ mem->free = TRUE;
+ vm_page_free_count++;
+ queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask];
+ }
+ queue_enter_first(queue_free, mem, vm_page_t, pageq);
+}
+
/*
* vm_page_wait:
*
VM_PAGE_CHECK(mem);
assert(!mem->free);
assert(!mem->cleaning);
-#if DEBUG
+
+#if MACH_ASSERT || DEBUG
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
if (mem->free)
panic("vm_page_free: freeing page on free list\n");
-#endif
+#endif /* MACH_ASSERT || DEBUG */
if (mem->object) {
vm_object_lock_assert_exclusive(mem->object);
}
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
}
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /*
+ * While wired, this page was accounted
+ * as "non-volatile" but it should now
+ * be accounted as "volatile".
+ */
+ /* one less "non-volatile"... */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* ... and "phys_footprint" */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ /* one more "volatile" */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
}
if (!mem->private && !mem->fictitious)
vm_page_wire_count--;
OSAddAtomic(-1, &vm_page_purgeable_count);
OSAddAtomic(1, &vm_page_purgeable_wired_count);
}
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* more not-quite-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
if (mem->object->all_reusable) {
/*
* Wired pages are not counted as "re-usable"
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
}
- assert(!mem->laundry);
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* less not-quite-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
assert(mem->object != kernel_object);
assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
return;
if (!m->absent && clear_hw_reference == TRUE)
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->clean_queue || m->pageout_queue || m->private || m->fictitious)
+ if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious)
return;
VM_PAGE_QUEUES_REMOVE(m);
* The page queues must be locked.
*/
-#if CONFIG_JETSAM
-#if LATENCY_JETSAM
-extern struct vm_page jetsam_latency_page[NUM_OF_JETSAM_LATENCY_TOKENS];
-#endif /* LATENCY_JETSAM */
-#endif /* CONFIG_JETSAM */
-
void
vm_page_activate(
register vm_page_t m)
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor)
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
return;
#if DEBUG
} else {
vm_page_pageable_external_count++;
}
-#if LATENCY_JETSAM
- if (jlp_init) {
- uint64_t now = mach_absolute_time();
- uint64_t delta = now - jlp_time;
- clock_sec_t jl_secs = 0;
- clock_usec_t jl_usecs = 0;
- vm_page_t jlp;
-
- absolutetime_to_microtime(delta, &jl_secs, &jl_usecs);
-
- jl_usecs += jl_secs * USEC_PER_SEC;
- if (jl_usecs >= JETSAM_LATENCY_TOKEN_AGE) {
-
- jlp = &jetsam_latency_page[jlp_current];
- if (jlp->active) {
- queue_remove(&vm_page_queue_active, jlp, vm_page_t, pageq);
- }
- queue_enter(&vm_page_queue_active, jlp, vm_page_t, pageq);
-
- jlp->active = TRUE;
-
- jlp->offset = now;
- jlp_time = jlp->offset;
-
- if(++jlp_current == NUM_OF_JETSAM_LATENCY_TOKENS) {
- jlp_current = 0;
- }
-
- }
- }
-#endif /* LATENCY_JETSAM */
}
m->reference = TRUE;
m->no_cache = FALSE;
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor)
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
return;
VM_PAGE_QUEUES_REMOVE(m);
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
+ if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
return;
m->no_cache = FALSE;
printf("vm_page %p: \n", p);
printf(" pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
printf(" listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
- printf(" next=%p\n", p->next);
+ printf(" next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m));
printf(" object=%p offset=0x%llx\n", p->object, p->offset);
printf(" wire_count=%u\n", p->wire_count);
/*
* Check the free lists for proper length etc.
*/
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
static unsigned int
vm_page_verify_free_list(
queue_head_t *vm_page_queue,
vm_page_t prev_m;
boolean_t found_page;
+ if (! vm_page_verify_this_free_list_enabled)
+ return 0;
+
found_page = FALSE;
npages = 0;
prev_m = (vm_page_t) vm_page_queue;
return npages;
}
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
static void
vm_page_verify_free_lists( void )
{
unsigned int color, npages, nlopages;
+ boolean_t toggle = TRUE;
- if (! vm_page_verify_free_lists_enabled)
+ if (! vm_page_verify_all_free_lists_enabled)
return;
npages = 0;
lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_verify_this_free_list_enabled == TRUE) {
+ /*
+ * This variable has been set globally for extra checking of
+ * each free list Q. Since we didn't set it, we don't own it
+ * and we shouldn't toggle it.
+ */
+ toggle = FALSE;
+ }
+
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = TRUE;
+ }
for( color = 0; color < vm_colors; color++ ) {
npages += vm_page_verify_free_list(&vm_page_queue_free[color],
"npages %u free_count %d nlopages %u lo_free_count %u",
npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = FALSE;
+ }
+
lck_mtx_unlock(&vm_page_queue_free_lock);
}
vm_page_free_count--;
}
}
- /*
- * adjust global freelist counts
- */
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
-
if( flags & KMA_LOMEM)
vm_page_lomem_find_contiguous_last_idx = page_idx;
else
assert(!m1->gobbled);
assert(!m1->private);
m2->no_cache = m1->no_cache;
- m2->xpmapped = m1->xpmapped;
+ m2->xpmapped = 0;
assert(!m1->busy);
assert(!m1->wanted);
assert(!m1->fictitious);
assert(!m1->lopage);
m2->slid = m1->slid;
- m2->was_dirty = m1->was_dirty;
m2->compressor = m1->compressor;
/*
if (dwp->dw_mask & DW_vm_pageout_throttle_up)
vm_pageout_throttle_up(m);
-
+#if CONFIG_PHANTOM_CACHE
+ if (dwp->dw_mask & DW_vm_phantom_cache_update)
+ vm_phantom_cache_update(m);
+#endif
if (dwp->dw_mask & DW_vm_page_wire)
vm_page_wire(m);
else if (dwp->dw_mask & DW_vm_page_unwire) {
boolean_t queueit;
- queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE;
+ queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
vm_page_unwire(m, queueit);
}
int cd_found_laundry;
int cd_found_dirty;
int cd_found_xpmapped;
+ int cd_skipped_xpmapped;
int cd_local_free;
int cd_total_free;
int cd_vm_page_wire_count;
} hibernate_stats;
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define HIBERNATE_XPMAPPED_LIMIT 40000
+
static int
hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
goto reenter_pg_on_q;
}
- vm_pageout_scan_wants_object = m_object;
vm_page_unlock_queues();
mutex_pause(try_failed_count++);
continue;
} else {
l_object = m_object;
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
}
if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
while (retval == 0) {
VM_PAGE_QUEUES_REMOVE(m);
- if (COMPRESSED_PAGER_IS_ACTIVE)
- pmap_disconnect(m->phys_page);
+ if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE)
+ pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
vm_pageout_cluster(m, FALSE);
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_page_unlock_queues();
struct vm_speculative_age_q *aq;
uint32_t i;
- bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
-
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++)
vm_page_reactivate_local(i, TRUE, FALSE);
}
+void
+hibernate_reset_stats()
+{
+ bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
+
int
hibernate_flush_memory()
{
if (COMPRESSED_PAGER_IS_ACTIVE) {
- if ((retval = hibernate_flush_dirty_pages(2)) == 0) {
-
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
vm_compressor_flush();
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
- }
}
- if (retval == 0 && consider_buffer_cache_collect != NULL) {
+ if (consider_buffer_cache_collect != NULL) {
unsigned int orig_wire_count;
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
if (discard == FALSE) {
if (!preflight)
hibernate_stats.cd_found_dirty++;
- } else if (m->xpmapped && m->reference) {
- if (!preflight)
- hibernate_stats.cd_found_xpmapped++;
- discard = FALSE;
+ } else if (m->xpmapped && m->reference && !object->internal) {
+ if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ } else {
+ if (!preflight)
+ hibernate_stats.cd_skipped_xpmapped++;
+ }
}
}
while (FALSE);
*/
return;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_object_t object = m->object;
if (!vm_object_lock_try(m->object))
panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
#else
/* No need to lock page queue for token delete, hibernate_vm_unlock()
makes sure these locks are uncontended before sleep */
-#endif /* !DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
if (m->pmapped == TRUE)
{
vm_purgeable_token_delete_first(old_queue);
}
m->object->purgable = VM_PURGABLE_EMPTY;
+
+ /*
+ * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
+ * accounted in the "volatile" ledger, so no change here.
+ * We have to update vm_page_purgeable_count, though, since we're
+ * effectively purging this object.
+ */
+ unsigned int delta;
+ assert(m->object->resident_page_count >= m->object->wired_page_count);
+ delta = (m->object->resident_page_count - m->object->wired_page_count);
+ assert(vm_page_purgeable_count >= delta);
+ assert(delta > 0);
+ OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
}
vm_page_free(m);
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_object_unlock(object);
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
}
/*
discard_all = will_discard;
}
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (!preflight)
{
vm_page_lock_queues();
}
}
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_inactive++;
+ count_discard_cleaned++;
discard = discard_all;
}
else
- count_inactive++;
+ count_cleaned++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
- while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ discard = discard_all;
+ }
+ else
+ count_active++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_cleaned++;
+ count_discard_inactive++;
discard = discard_all;
}
else
- count_cleaned++;
+ count_inactive++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
}
}
- m = (vm_page_t) queue_first(&vm_page_queue_active);
- while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
- {
- next = (vm_page_t) m->pageq.next;
- discard = FALSE;
- if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
- && hibernate_consider_discard(m, preflight))
- {
- if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
- if (m->dirty)
- count_discard_purgeable++;
- else
- count_discard_active++;
- discard = discard_all;
- }
- else
- count_active++;
- count_wire--;
- if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- if (discard) hibernate_discard_page(m);
- m = next;
- }
-
queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
{
count_compressor++;
discard_all ? "did" : "could",
count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+ if (hibernate_stats.cd_skipped_xpmapped)
+ HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (!preflight)
{
if (vm_page_local_q) {
}
vm_page_unlock_queues();
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
if (preflight) {
lck_mtx_unlock(&vm_page_queue_free_lock);
uint32_t count_discard_speculative = 0;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_page_lock_queues();
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++) {
VPL_LOCK(&lq->vpl_lock);
}
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
clock_get_uptime(&start);
m = next;
}
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++) {
struct vpl *lq;
}
}
vm_page_unlock_queues();
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
clock_get_uptime(&end);
absolutetime_to_nanoseconds(end - start, &nsec);
hash_id = vm_page_hash(mem->object, mem->offset);
bucket = &vm_page_buckets[hash_id];
- mem->next = bucket->pages;
- bucket->pages = mem;
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
}
* hibernate_teardown_vm_structs leaves the location where
* this vm_page_t must be located in "next".
*/
- tmem = mem->next;
- mem->next = NULL;
+ tmem = VM_PAGE_UNPACK_PTR(mem->next_m);
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
sindx = (int)(tmem - &vm_pages[0]);
* vm_page_t's that were created on the fly (i.e. fictitious)
*/
for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
- mem_next = mem->next;
+ mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
- mem->next = NULL;
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
hibernate_hash_insert_page(mem);
}
hibernate_rebuild_hash_list = NULL;
bucket = &vm_page_buckets[i];
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
+ for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) {
assert(mem->hashed);
- mem_next = mem->next;
+ mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
- mem->next = hibernate_rebuild_hash_list;
+ mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
hibernate_rebuild_hash_list = mem;
}
}
* as an indicator to the rebuild function that
* we don't have to move it
*/
- mem->next = mem;
+ mem->next_m = VM_PAGE_PACK_PTR(mem);
if (vm_pages[compact_target_indx].free) {
/*
bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
lck_spin_lock(bucket_lock);
- for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m))
bucket_count++;
lck_spin_unlock(bucket_lock);
panic("BUCKET_CHECK: corruption at %p in fake buckets "
"[0x%llx:0x%llx]\n",
cp,
- vm_page_fake_buckets_start,
- vm_page_fake_buckets_end);
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
}
}
#endif /* VM_PAGE_FAKE_BUCKETS */
for (i = 0; i < vm_page_bucket_count; i++) {
bucket = &vm_page_buckets[i];
- if (bucket->pages == VM_PAGE_NULL) {
+ if (!bucket->page_list) {
continue;
}
bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
lck_spin_lock(bucket_lock);
- p = bucket->pages;
+ p = VM_PAGE_UNPACK_PTR(bucket->page_list);
while (p != VM_PAGE_NULL) {
if (!p->hashed) {
panic("BUCKET_CHECK: page %p (%p,0x%llx) "
i, bucket, p, p->object, p->offset,
p_hash);
}
- p = p->next;
+ p = VM_PAGE_UNPACK_PTR(p->next_m);
}
lck_spin_unlock(bucket_lock);
}