#include <debug.h>
#include <libkern/OSAtomic.h>
+#include <libkern/OSDebug.h>
#include <mach/clock_types.h>
#include <mach/vm_prot.h>
#include <kern/kalloc.h>
#include <kern/zalloc.h>
#include <kern/xpr.h>
+#include <kern/ledger.h>
#include <vm/pmap.h>
#include <vm/vm_init.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h> /* kernel_memory_allocate() */
#include <kern/misc_protos.h>
#include <zone_debug.h>
+#include <mach_debug/zone_info.h>
#include <vm/cpm.h>
#include <pexpert/pexpert.h>
#include <vm/vm_purgeable_internal.h>
#include <vm/vm_compressor.h>
+#if CONFIG_PHANTOM_CACHE
+#include <vm/vm_phantom_cache.h>
+#endif
+
#include <IOKit/IOHibernatePrivate.h>
#include <sys/kdebug.h>
static void vm_page_free_prepare(vm_page_t page);
static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr);
+static void vm_tag_init(void);
-
+uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
/*
* Associated with page of user-allocatable memory is a
* or VP, table.]
*/
typedef struct {
- vm_page_t pages;
+ vm_page_packed_t page_list;
#if MACH_PAGE_HASH_STATS
int cur_count; /* current count */
int hi_count; /* high water mark */
unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */
lck_spin_t *vm_page_bucket_locks;
+lck_spin_t vm_objects_wired_lock;
+lck_spin_t vm_allocation_sites_lock;
+
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+extern int not_in_kdp;
#if MACH_PAGE_HASH_STATS
unsigned int vm_colors;
unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
+unsigned int vm_free_magazine_refill_limit = 0;
queue_head_t vm_page_queue_free[MAX_COLORS];
unsigned int vm_page_free_wanted;
unsigned int vm_page_free_wanted_privileged;
unsigned int vm_page_free_count;
unsigned int vm_page_fictitious_count;
-unsigned int vm_page_free_count_minimum; /* debugging */
-
/*
* Occasionally, the virtual memory system uses
* resident page structures that do not refer to
queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
queue_head_t vm_page_queue_throttled;
+queue_head_t vm_objects_wired;
+
unsigned int vm_page_active_count;
unsigned int vm_page_inactive_count;
unsigned int vm_page_anonymous_count;
unsigned int vm_page_throttled_count;
unsigned int vm_page_speculative_count;
+
unsigned int vm_page_wire_count;
+unsigned int vm_page_stolen_count;
unsigned int vm_page_wire_count_initial;
+unsigned int vm_page_pages_initial;
unsigned int vm_page_gobble_count = 0;
-unsigned int vm_page_wire_count_warning = 0;
-unsigned int vm_page_gobble_count_warning = 0;
+
+#define VM_PAGE_WIRE_COUNT_WARNING 0
+#define VM_PAGE_GOBBLE_COUNT_WARNING 0
unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
uint64_t vm_page_purged_count = 0; /* total count of purged pages */
+unsigned int vm_page_xpmapped_external_count = 0;
unsigned int vm_page_external_count = 0;
unsigned int vm_page_internal_count = 0;
unsigned int vm_page_pageable_external_count = 0;
unsigned int vm_page_free_target = 0;
unsigned int vm_page_free_min = 0;
unsigned int vm_page_throttle_limit = 0;
-uint32_t vm_page_creation_throttle = 0;
unsigned int vm_page_inactive_target = 0;
unsigned int vm_page_anonymous_min = 0;
unsigned int vm_page_inactive_min = 0;
void
vm_set_page_size(void)
{
- page_mask = page_size - 1;
+ page_size = PAGE_SIZE;
+ page_mask = PAGE_MASK;
+ page_shift = PAGE_SHIFT;
if ((page_mask & page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
break;
}
+#define COLOR_GROUPS_TO_STEAL 4
+
/* Called once during statup, once the cache geometry is known.
*/
vm_colors = n;
vm_color_mask = n - 1;
+
+ vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
}
m->pageq.prev = NULL;
m->listq.next = NULL;
m->listq.prev = NULL;
- m->next = VM_PAGE_NULL;
+ m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
m->object = VM_OBJECT_NULL; /* reset later */
m->offset = (vm_object_offset_t) -1; /* reset later */
m->busy = TRUE;
m->wanted = FALSE;
m->tabled = FALSE;
+ m->hashed = FALSE;
m->fictitious = FALSE;
m->pmapped = FALSE;
m->wpmapped = FALSE;
m->encrypted_cleaning = FALSE;
m->cs_validated = FALSE;
m->cs_tainted = FALSE;
+ m->cs_nx = FALSE;
m->no_cache = FALSE;
m->reusable = FALSE;
m->slid = FALSE;
- m->was_dirty = FALSE;
m->xpmapped = FALSE;
m->compressor = FALSE;
+ m->written_by_kernel = FALSE;
m->__unused_object_bits = 0;
/*
purgeable_queues[i].debug_count_objects = 0;
#endif
};
+ purgeable_nonvolatile_count = 0;
+ queue_init(&purgeable_nonvolatile_queue);
for (i = 0; i < MAX_COLORS; i++ )
queue_init(&vm_page_queue_free[i]);
queue_init(&vm_page_queue_cleaned);
queue_init(&vm_page_queue_throttled);
queue_init(&vm_page_queue_anonymous);
+ queue_init(&vm_objects_wired);
for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) {
queue_init(&vm_page_queue_speculative[i].age_q);
/*
* Steal memory for the map and zone subsystems.
*/
+ kernel_debug_string_simple("zone_steal_memory");
zone_steal_memory();
+ kernel_debug_string_simple("vm_map_steal_memory");
vm_map_steal_memory();
/*
if (vm_page_hash_mask & vm_page_bucket_count)
printf("vm_page_bootstrap: WARNING -- strange page hash\n");
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ /*
+ * Allocate a decoy set of page buckets, to detect
+ * any stomping there.
+ */
+ vm_page_fake_buckets = (vm_page_bucket_t *)
+ pmap_steal_memory(vm_page_bucket_count *
+ sizeof(vm_page_bucket_t));
+ vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+ vm_page_fake_buckets_end =
+ vm_map_round_page((vm_page_fake_buckets_start +
+ (vm_page_bucket_count *
+ sizeof (vm_page_bucket_t))),
+ PAGE_MASK);
+ char *cp;
+ for (cp = (char *)vm_page_fake_buckets_start;
+ cp < (char *)vm_page_fake_buckets_end;
+ cp++) {
+ *cp = 0x5a;
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+ kernel_debug_string_simple("vm_page_buckets");
vm_page_buckets = (vm_page_bucket_t *)
pmap_steal_memory(vm_page_bucket_count *
sizeof(vm_page_bucket_t));
+ kernel_debug_string_simple("vm_page_bucket_locks");
vm_page_bucket_locks = (lck_spin_t *)
pmap_steal_memory(vm_page_bucket_lock_count *
sizeof(lck_spin_t));
for (i = 0; i < vm_page_bucket_count; i++) {
register vm_page_bucket_t *bucket = &vm_page_buckets[i];
- bucket->pages = VM_PAGE_NULL;
+ bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
#if MACH_PAGE_HASH_STATS
bucket->cur_count = 0;
bucket->hi_count = 0;
for (i = 0; i < vm_page_bucket_lock_count; i++)
lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
+ vm_tag_init();
+
+#if VM_PAGE_BUCKETS_CHECK
+ vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
/*
* Machine-dependent code allocates the resident page table.
* It uses vm_page_init to initialize the page frames.
* to get the alignment right.
*/
+ kernel_debug_string_simple("pmap_startup");
pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */
vm_page_wire_count_initial = vm_page_wire_count;
- vm_page_free_count_minimum = vm_page_free_count;
+ vm_page_pages_initial = vm_page_pages;
printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
vm_page_free_count, vm_page_wire_count);
+ kernel_debug_string_simple("vm_page_bootstrap complete");
simple_lock_init(&vm_paging_lock, 0);
}
* Account for newly stolen memory
*/
vm_page_wire_count++;
-
+ vm_page_stolen_count++;
}
return (void *) addr;
}
+void vm_page_release_startup(vm_page_t mem);
void
pmap_startup(
vm_offset_t *startp,
ppnum_t phys_page;
addr64_t tmpaddr;
+
+#if defined(__LP64__)
+ /*
+ * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use
+ */
+ assert(sizeof(struct vm_page) == 64);
+
+ /*
+ * make sure we are aligned on a 64 byte boundary
+ * for VM_PAGE_PACK_PTR (it clips off the low-order
+ * 6 bits of the pointer)
+ */
+ if (virtual_space_start != virtual_space_end)
+ virtual_space_start = round_page(virtual_space_start);
+#endif
+
/*
* We calculate how many page frames we will have
* and then allocate the page structures in one chunk.
/*
* Initialize the page frames.
*/
+ kernel_debug_string_simple("Initialize the page frames");
for (i = 0, pages_initialized = 0; i < npages; i++) {
if (!pmap_next_page(&phys_page))
break;
}
vm_pages_count = pages_initialized;
+#if defined(__LP64__)
+
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
+
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1])
+ panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]);
+#endif
+ kernel_debug_string_simple("page fill/release");
/*
* Check if we want to initialize pages to a known value
*/
// free low -> high so high is preferred
for (i = 1; i <= pages_initialized; i++) {
if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ vm_page_release_startup(&vm_pages[i - 1]);
}
}
else
*/
for (i = pages_initialized; i > 0; i--) {
if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
- vm_page_release(&vm_pages[i - 1]);
+ vm_page_release_startup(&vm_pages[i - 1]);
}
+ VM_CHECK_MEMORYSTATUS;
+
#if 0
{
vm_page_t xx, xxo, xxl;
void
vm_page_module_init(void)
{
+ uint64_t vm_page_zone_pages, vm_page_zone_data_size;
vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
0, PAGE_SIZE, "vm pages");
zone_change(vm_page_zone, Z_EXHAUST, TRUE);
zone_change(vm_page_zone, Z_FOREIGN, TRUE);
zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE);
- /*
- * Adjust zone statistics to account for the real pages allocated
- * in vm_page_create(). [Q: is this really what we want?]
- */
- vm_page_zone->count += vm_page_pages;
- vm_page_zone->sum_count += vm_page_pages;
- vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
+ /*
+ * Adjust zone statistics to account for the real pages allocated
+ * in vm_page_create(). [Q: is this really what we want?]
+ */
+ vm_page_zone->count += vm_page_pages;
+ vm_page_zone->sum_count += vm_page_pages;
+ vm_page_zone_data_size = vm_page_pages * vm_page_zone->elem_size;
+ vm_page_zone->cur_size += vm_page_zone_data_size;
+ vm_page_zone_pages = ((round_page(vm_page_zone_data_size)) / PAGE_SIZE);
+ OSAddAtomic64(vm_page_zone_pages, &(vm_page_zone->page_count));
+ /* since zone accounts for these, take them out of stolen */
+ VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
}
/*
vm_object_t object,
vm_object_offset_t offset)
{
- vm_page_insert_internal(mem, object, offset, FALSE, TRUE, FALSE);
+ vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
+}
+
+void
+vm_page_insert_wired(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_tag_t tag)
+{
+ vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
}
void
vm_page_t mem,
vm_object_t object,
vm_object_offset_t offset,
+ vm_tag_t tag,
boolean_t queues_lock_held,
boolean_t insert_in_hash,
- boolean_t batch_pmap_op)
+ boolean_t batch_pmap_op,
+ boolean_t batch_accounting,
+ uint64_t *delayed_ledger_update)
{
- vm_page_bucket_t *bucket;
- lck_spin_t *bucket_lock;
- int hash_id;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+ int hash_id;
+ task_t owner;
XPR(XPR_VM_PAGE,
"vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
assert(page_aligned(offset));
- if (object == vm_submap_object) {
- /* the vm_submap_object is only a placeholder for submaps */
- panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset);
- }
+ assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE));
+
+ /* the vm_submap_object is only a placeholder for submaps */
+ assert(object != vm_submap_object);
vm_object_lock_assert_exclusive(object);
#if DEBUG
queues_lock_held ? LCK_MTX_ASSERT_OWNED
: LCK_MTX_ASSERT_NOTOWNED);
#endif /* DEBUG */
-
+
if (insert_in_hash == TRUE) {
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
if (mem->tabled || mem->object != VM_OBJECT_NULL)
panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
"already in (obj=%p,off=0x%llx)",
/* only insert "pageout" pages into "pageout" objects,
* and normal pages into normal objects */
+#if 00
+ /*
+ * For some reason, this assertion gets tripped
+ * but it's mostly harmless, so let's disable it
+ * for now.
+ */
assert(object->pageout == mem->pageout);
+#endif /* 00 */
assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
lck_spin_lock(bucket_lock);
- mem->next = bucket->pages;
- bucket->pages = mem;
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+ assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list));
+
#if MACH_PAGE_HASH_STATS
if (++bucket->cur_count > bucket->hi_count)
bucket->hi_count = bucket->cur_count;
#endif /* MACH_PAGE_HASH_STATS */
-
+ mem->hashed = TRUE;
lck_spin_unlock(bucket_lock);
}
/*
* Now link into the object's list of backed pages.
*/
- VM_PAGE_INSERT(mem, object);
+ queue_enter(&object->memq, mem, vm_page_t, listq);
+ object->memq_hint = mem;
mem->tabled = TRUE;
/*
object->resident_page_count++;
if (VM_PAGE_WIRED(mem)) {
- object->wired_page_count++;
+ if (!mem->private && !mem->fictitious)
+ {
+ if (!object->wired_page_count)
+ {
+ assert(VM_KERN_MEMORY_NONE != tag);
+ object->wire_tag = tag;
+ VM_OBJECT_WIRED(object);
+ }
+ }
+ object->wired_page_count++;
}
assert(object->resident_page_count >= object->wired_page_count);
- if (object->internal) {
- OSAddAtomic(1, &vm_page_internal_count);
- } else {
- OSAddAtomic(1, &vm_page_external_count);
+ if (batch_accounting == FALSE) {
+ if (object->internal) {
+ OSAddAtomic(1, &vm_page_internal_count);
+ } else {
+ OSAddAtomic(1, &vm_page_external_count);
+ }
}
/*
OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
}
+ if (object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+
+ if (delayed_ledger_update)
+ *delayed_ledger_update += PAGE_SIZE;
+ else {
+ /* more non-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
+
+ } else if (owner &&
+ (object->purgable == VM_PURGABLE_VOLATILE ||
+ object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
+
if (object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
- OSAddAtomic(1, &vm_page_purgeable_wired_count);
+ OSAddAtomic(+1, &vm_page_purgeable_wired_count);
} else {
- OSAddAtomic(1, &vm_page_purgeable_count);
+ OSAddAtomic(+1, &vm_page_purgeable_count);
}
} else if (object->purgable == VM_PURGABLE_EMPTY &&
mem->throttled) {
if (queues_lock_held == FALSE)
vm_page_unlock_queues();
}
+
+#if VM_OBJECT_TRACKING_OP_MODIFIED
+ if (vm_object_tracking_inited &&
+ object->internal &&
+ object->resident_page_count == 0 &&
+ object->pager == NULL &&
+ object->shadow != NULL &&
+ object->shadow->copy == object) {
+ void *bt[VM_OBJECT_TRACKING_BTDEPTH];
+ int numsaved = 0;
+
+ numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
+ btlog_add_entry(vm_object_tracking_btlog,
+ object,
+ VM_OBJECT_TRACKING_OP_MODIFIED,
+ bt,
+ numsaved);
+ }
+#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
}
/*
VM_PAGE_CHECK(mem);
#endif
vm_object_lock_assert_exclusive(object);
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
if (mem->tabled || mem->object != VM_OBJECT_NULL)
panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
"already in (obj=%p,off=0x%llx)",
lck_spin_lock(bucket_lock);
- if (bucket->pages) {
- vm_page_t *mp = &bucket->pages;
- vm_page_t m = *mp;
+ if (bucket->page_list) {
+ vm_page_packed_t *mp = &bucket->page_list;
+ vm_page_t m = VM_PAGE_UNPACK_PTR(*mp);
do {
if (m->object == object && m->offset == offset) {
/*
* Remove old page from hash list
*/
- *mp = m->next;
+ *mp = m->next_m;
+ m->hashed = FALSE;
found_m = m;
break;
}
- mp = &m->next;
- } while ((m = *mp));
+ mp = &m->next_m;
+ } while ((m = VM_PAGE_UNPACK_PTR(*mp)));
- mem->next = bucket->pages;
+ mem->next_m = bucket->page_list;
} else {
- mem->next = VM_PAGE_NULL;
+ mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
}
/*
* insert new page at head of hash list
*/
- bucket->pages = mem;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+ mem->hashed = TRUE;
lck_spin_unlock(bucket_lock);
*/
vm_page_free_unlocked(found_m, FALSE);
}
- vm_page_insert_internal(mem, object, offset, FALSE, FALSE, FALSE);
+ vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
}
/*
vm_page_t this;
lck_spin_t *bucket_lock;
int hash_id;
+ task_t owner;
XPR(XPR_VM_PAGE,
"vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
lck_spin_lock(bucket_lock);
- if ((this = bucket->pages) == mem) {
+ if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) {
/* optimize for common case */
- bucket->pages = mem->next;
+ bucket->page_list = mem->next_m;
} else {
- vm_page_t *prev;
+ vm_page_packed_t *prev;
- for (prev = &this->next;
- (this = *prev) != mem;
- prev = &this->next)
+ for (prev = &this->next_m;
+ (this = VM_PAGE_UNPACK_PTR(*prev)) != mem;
+ prev = &this->next_m)
continue;
- *prev = this->next;
+ *prev = this->next_m;
}
#if MACH_PAGE_HASH_STATS
bucket->cur_count--;
#endif /* MACH_PAGE_HASH_STATS */
-
+ mem->hashed = FALSE;
lck_spin_unlock(bucket_lock);
}
/*
* Now remove from the object's list of backed pages.
*/
- VM_PAGE_REMOVE(mem);
+ vm_page_remove_internal(mem);
/*
* And show that the object has one fewer resident
mem->object->resident_page_count--;
if (mem->object->internal) {
+#if DEBUG
assert(vm_page_internal_count);
+#endif /* DEBUG */
+
OSAddAtomic(-1, &vm_page_internal_count);
} else {
assert(vm_page_external_count);
OSAddAtomic(-1, &vm_page_external_count);
+
+ if (mem->xpmapped) {
+ assert(vm_page_xpmapped_external_count);
+ OSAddAtomic(-1, &vm_page_xpmapped_external_count);
+ }
}
if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) {
if (mem->object->resident_page_count == 0)
if (VM_PAGE_WIRED(mem)) {
assert(mem->object->wired_page_count > 0);
mem->object->wired_page_count--;
+ if (!mem->object->wired_page_count) {
+ VM_OBJECT_UNWIRED(mem->object);
+ }
}
assert(mem->object->resident_page_count >=
mem->object->wired_page_count);
vm_page_stats_reusable.reused_remove++;
}
+ if (mem->object->purgable == VM_PURGABLE_DENY) {
+ owner = TASK_NULL;
+ } else {
+ owner = mem->object->vo_purgeable_owner;
+ }
+ if (owner &&
+ (mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ VM_PAGE_WIRED(mem))) {
+ /* less non-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ } else if (owner &&
+ (mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY)) {
+ assert(! VM_PAGE_WIRED(mem));
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
if (VM_PAGE_WIRED(mem)) {
assert(vm_page_purgeable_wired_count > 0);
* The object must be locked. No side effects.
*/
-unsigned long vm_page_lookup_hint = 0;
-unsigned long vm_page_lookup_hint_next = 0;
-unsigned long vm_page_lookup_hint_prev = 0;
-unsigned long vm_page_lookup_hint_miss = 0;
-unsigned long vm_page_lookup_bucket_NULL = 0;
-unsigned long vm_page_lookup_miss = 0;
+#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
+
+#if DEBUG_VM_PAGE_LOOKUP
+
+struct {
+ uint64_t vpl_total;
+ uint64_t vpl_empty_obj;
+ uint64_t vpl_bucket_NULL;
+ uint64_t vpl_hit_hint;
+ uint64_t vpl_hit_hint_next;
+ uint64_t vpl_hit_hint_prev;
+ uint64_t vpl_fast;
+ uint64_t vpl_slow;
+ uint64_t vpl_hit;
+ uint64_t vpl_miss;
+
+ uint64_t vpl_fast_elapsed;
+ uint64_t vpl_slow_elapsed;
+} vm_page_lookup_stats __attribute__((aligned(8)));
+
+#endif
+
+#define KDP_VM_PAGE_WALK_MAX 1000
+
+vm_page_t
+kdp_vm_page_lookup(
+ vm_object_t object,
+ vm_object_offset_t offset)
+{
+ vm_page_t cur_page;
+ int num_traversed = 0;
+
+ if (not_in_kdp) {
+ panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
+ }
+
+ queue_iterate(&object->memq, cur_page, vm_page_t, listq) {
+ if (cur_page->offset == offset) {
+ return cur_page;
+ }
+ num_traversed++;
+
+ if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
+ return VM_PAGE_NULL;
+ }
+ }
+ return VM_PAGE_NULL;
+}
vm_page_t
vm_page_lookup(
vm_page_t mem;
vm_page_bucket_t *bucket;
queue_entry_t qe;
- lck_spin_t *bucket_lock;
+ lck_spin_t *bucket_lock = NULL;
int hash_id;
+#if DEBUG_VM_PAGE_LOOKUP
+ uint64_t start, elapsed;
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
+#endif
vm_object_lock_assert_held(object);
+
+ if (object->resident_page_count == 0) {
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
+#endif
+ return (VM_PAGE_NULL);
+ }
+
mem = object->memq_hint;
if (mem != VM_PAGE_NULL) {
assert(mem->object == object);
if (mem->offset == offset) {
- vm_page_lookup_hint++;
- return mem;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
+#endif
+ return (mem);
}
qe = queue_next(&mem->listq);
assert(next_page->object == object);
if (next_page->offset == offset) {
- vm_page_lookup_hint_next++;
object->memq_hint = next_page; /* new hint */
- return next_page;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
+#endif
+ return (next_page);
}
}
qe = queue_prev(&mem->listq);
assert(prev_page->object == object);
if (prev_page->offset == offset) {
- vm_page_lookup_hint_prev++;
object->memq_hint = prev_page; /* new hint */
- return prev_page;
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
+#endif
+ return (prev_page);
}
}
}
* at outside the scope of the hash bucket lock... this is a
* really cheap optimiztion to avoid taking the lock
*/
- if (bucket->pages == VM_PAGE_NULL) {
- vm_page_lookup_bucket_NULL++;
-
+ if (!bucket->page_list) {
+#if DEBUG_VM_PAGE_LOOKUP
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
+#endif
return (VM_PAGE_NULL);
}
- bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
- lck_spin_lock(bucket_lock);
-
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
-#if 0
+#if DEBUG_VM_PAGE_LOOKUP
+ start = mach_absolute_time();
+#endif
+ if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
/*
- * we don't hold the page queue lock
- * so this check isn't safe to make
+ * on average, it's roughly 3 times faster to run a short memq list
+ * than to take the spin lock and go through the hash list
*/
- VM_PAGE_CHECK(mem);
+ mem = (vm_page_t)queue_first(&object->memq);
+
+ while (!queue_end(&object->memq, (queue_entry_t)mem)) {
+
+ if (mem->offset == offset)
+ break;
+
+ mem = (vm_page_t)queue_next(&mem->listq);
+ }
+ if (queue_end(&object->memq, (queue_entry_t)mem))
+ mem = NULL;
+ } else {
+
+ bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
+
+ lck_spin_lock(bucket_lock);
+
+ for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) {
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
+ VM_PAGE_CHECK(mem);
#endif
- if ((mem->object == object) && (mem->offset == offset))
- break;
+ if ((mem->object == object) && (mem->offset == offset))
+ break;
+ }
+ lck_spin_unlock(bucket_lock);
}
- lck_spin_unlock(bucket_lock);
+#if DEBUG_VM_PAGE_LOOKUP
+ elapsed = mach_absolute_time() - start;
+
+ if (bucket_lock) {
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
+ OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
+ } else {
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
+ OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
+ }
+ if (mem != VM_PAGE_NULL)
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
+ else
+ OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
+#endif
if (mem != VM_PAGE_NULL) {
- if (object->memq_hint != VM_PAGE_NULL) {
- vm_page_lookup_hint_miss++;
- }
assert(mem->object == object);
- object->memq_hint = mem;
- } else
- vm_page_lookup_miss++;
- return(mem);
+ object->memq_hint = mem;
+ }
+ return (mem);
}
vm_object_offset_t new_offset,
boolean_t encrypted_ok)
{
- boolean_t internal_to_external, external_to_internal;
+ boolean_t internal_to_external, external_to_internal;
+ vm_tag_t tag;
assert(mem->object != new_object);
+ assert(mem->object);
+
/*
* ENCRYPTED SWAP:
* The encryption key is based on the page's memory object
* up there anyway, and we don't do vm_page_rename's frequently enough
* for this to matter.
*/
- VM_PAGE_QUEUES_REMOVE(mem);
+ vm_page_queues_remove(mem);
vm_page_activate(mem);
}
if (mem->active || mem->inactive || mem->speculative) {
}
}
+ tag = mem->object->wire_tag;
vm_page_remove(mem, TRUE);
- vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE, FALSE);
+ vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
if (internal_to_external) {
vm_page_pageable_internal_count--;
retval = kernel_memory_allocate(zone_map,
&addr, PAGE_SIZE, VM_PROT_ALL,
- KMA_KOBJECT|KMA_NOPAGEWAIT);
+ KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE);
if (retval != KERN_SUCCESS) {
/*
* No page was available. Drop the
return;
}
- /* Increment zone page count. We account for all memory managed by the zone in z->page_count */
- OSAddAtomic64(1, &(vm_page_zone->page_count));
-
zcram(vm_page_zone, addr, PAGE_SIZE);
lck_mtx_unlock(&vm_page_alloc_lock);
* this is an interface to support bring-up of drivers
* on platforms with physical memory > 4G...
*/
-int vm_himemory_mode = 0;
+int vm_himemory_mode = 2;
/*
* request from the per-cpu queue.
*/
-#define COLOR_GROUPS_TO_STEAL 4
-
vm_page_t
vm_page_grab( void )
return_page_from_cpu_list:
PROCESSOR_DATA(current_processor(), page_grab_count) += 1;
PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next;
- mem->pageq.next = NULL;
enable_preemption();
+ mem->pageq.next = NULL;
assert(mem->listq.next == NULL && mem->listq.prev == NULL);
assert(mem->tabled == FALSE);
* Optionally produce warnings if the wire or gobble
* counts exceed some threshold.
*/
- if (vm_page_wire_count_warning > 0
- && vm_page_wire_count >= vm_page_wire_count_warning) {
+#if VM_PAGE_WIRE_COUNT_WARNING
+ if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high wired page count of %d\n",
vm_page_wire_count);
- assert(vm_page_wire_count < vm_page_wire_count_warning);
}
- if (vm_page_gobble_count_warning > 0
- && vm_page_gobble_count >= vm_page_gobble_count_warning) {
+#endif
+#if VM_PAGE_GOBBLE_COUNT_WARNING
+ if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
printf("mk: vm_page_grab(): high gobbled page count of %d\n",
vm_page_gobble_count);
- assert(vm_page_gobble_count < vm_page_gobble_count_warning);
}
-
+#endif
lck_mtx_lock_spin(&vm_page_queue_free_lock);
/*
if (vm_page_free_count <= vm_page_free_reserved)
pages_to_steal = 1;
else {
- pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors;
-
- if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved))
+ if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved))
+ pages_to_steal = vm_free_magazine_refill_limit;
+ else
pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
}
color = PROCESSOR_DATA(current_processor(), start_color);
head = tail = NULL;
+ vm_page_free_count -= pages_to_steal;
+
while (pages_to_steal--) {
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
while (queue_empty(&vm_page_queue_free[color]))
color = (color + 1) & vm_color_mask;
tail->pageq.next = (queue_t)mem;
tail = mem;
- mem->pageq.prev = NULL;
assert(mem->listq.next == NULL && mem->listq.prev == NULL);
assert(mem->tabled == FALSE);
assert(mem->object == VM_OBJECT_NULL);
assert(!mem->wpmapped);
assert(!pmap_is_noencrypt(mem->phys_page));
}
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next;
PROCESSOR_DATA(current_processor(), start_color) = color;
mem = head;
mem->pageq.next = NULL;
- lck_mtx_unlock(&vm_page_queue_free_lock);
-
enable_preemption();
}
/*
VM_CHECK_MEMORYSTATUS;
}
+/*
+ * This version of vm_page_release() is used only at startup
+ * when we are single-threaded and pages are being released
+ * for the first time. Hence, no locking or unnecessary checks are made.
+ * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
+ */
+void
+vm_page_release_startup(
+ register vm_page_t mem)
+{
+ queue_t queue_free;
+
+ if (vm_lopage_free_count < vm_lopage_free_limit &&
+ mem->phys_page < max_valid_low_ppnum) {
+ mem->lopage = TRUE;
+ vm_lopage_free_count++;
+ queue_free = &vm_lopage_queue_free;
+ } else {
+ mem->lopage = FALSE;
+ mem->free = TRUE;
+ vm_page_free_count++;
+ queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask];
+ }
+ queue_enter_first(queue_free, mem, vm_page_t, pageq);
+}
+
/*
* vm_page_wait:
*
return(mem);
}
-vm_page_t
-vm_page_alloclo(
- vm_object_t object,
- vm_object_offset_t offset)
-{
- register vm_page_t mem;
-
- vm_object_lock_assert_exclusive(object);
- mem = vm_page_grablo();
- if (mem == VM_PAGE_NULL)
- return VM_PAGE_NULL;
-
- vm_page_insert(mem, object, offset);
-
- return(mem);
-}
-
-
/*
* vm_page_alloc_guard:
*
VM_PAGE_CHECK(mem);
assert(!mem->free);
assert(!mem->cleaning);
-#if DEBUG
+
+#if MACH_ASSERT || DEBUG
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
if (mem->free)
panic("vm_page_free: freeing page on free list\n");
-#endif
+#endif /* MACH_ASSERT || DEBUG */
if (mem->object) {
vm_object_lock_assert_exclusive(mem->object);
}
counter(++c_laundry_pages_freed);
}
- VM_PAGE_QUEUES_REMOVE(mem); /* clears local/active/inactive/throttled/speculative */
+ vm_page_queues_remove(mem); /* clears local/active/inactive/throttled/speculative */
if (VM_PAGE_WIRED(mem)) {
if (mem->object) {
assert(mem->object->wired_page_count > 0);
mem->object->wired_page_count--;
+ if (!mem->object->wired_page_count) {
+ VM_OBJECT_UNWIRED(mem->object);
+ }
+
assert(mem->object->resident_page_count >=
mem->object->wired_page_count);
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
}
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /*
+ * While wired, this page was accounted
+ * as "non-volatile" but it should now
+ * be accounted as "volatile".
+ */
+ /* one less "non-volatile"... */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* ... and "phys_footprint" */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ /* one more "volatile" */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ }
}
if (!mem->private && !mem->fictitious)
vm_page_wire_count--;
*
* The page's object and the page queues must be locked.
*/
+
+
void
vm_page_wire(
- register vm_page_t mem)
+ register vm_page_t mem,
+ vm_tag_t tag,
+ boolean_t check_memorystatus)
{
// dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
mem->pageout = FALSE;
vm_pageout_throttle_up(mem);
}
- VM_PAGE_QUEUES_REMOVE(mem);
+ vm_page_queues_remove(mem);
if (mem->object) {
+
+ if (!mem->private && !mem->fictitious)
+ {
+ if (!mem->object->wired_page_count)
+ {
+ assert(VM_KERN_MEMORY_NONE != tag);
+ mem->object->wire_tag = tag;
+ VM_OBJECT_WIRED(mem->object);
+ }
+ }
mem->object->wired_page_count++;
+
assert(mem->object->resident_page_count >=
mem->object->wired_page_count);
if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
OSAddAtomic(-1, &vm_page_purgeable_count);
OSAddAtomic(1, &vm_page_purgeable_wired_count);
}
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /* less volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* more not-quite-volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* more footprint */
+ ledger_credit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
if (mem->object->all_reusable) {
/*
* Wired pages are not counted as "re-usable"
vm_page_gobble_count--;
mem->gobbled = FALSE;
- VM_CHECK_MEMORYSTATUS;
-
+ if (check_memorystatus == TRUE) {
+ VM_CHECK_MEMORYSTATUS;
+ }
/*
* ENCRYPTED SWAP:
* The page could be encrypted, but
VM_PAGE_CHECK(mem);
}
-/*
- * vm_page_gobble:
- *
- * Mark this page as consumed by the vm/ipc/xmm subsystems.
- *
- * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
- */
-void
-vm_page_gobble(
- register vm_page_t mem)
-{
- vm_page_lockspin_queues();
- VM_PAGE_CHECK(mem);
-
- assert(!mem->gobbled);
- assert( !VM_PAGE_WIRED(mem));
-
- if (!mem->gobbled && !VM_PAGE_WIRED(mem)) {
- if (!mem->private && !mem->fictitious)
- vm_page_wire_count++;
- }
- vm_page_gobble_count++;
- mem->gobbled = TRUE;
- vm_page_unlock_queues();
-}
-
/*
* vm_page_unwire:
*
VM_PAGE_CHECK(mem);
assert(VM_PAGE_WIRED(mem));
+ assert(!mem->gobbled);
assert(mem->object != VM_OBJECT_NULL);
#if DEBUG
vm_object_lock_assert_exclusive(mem->object);
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
if (--mem->wire_count == 0) {
- assert(!mem->private && !mem->fictitious);
- vm_page_wire_count--;
+ if (!mem->private && !mem->fictitious) {
+ vm_page_wire_count--;
+ }
assert(mem->object->wired_page_count > 0);
mem->object->wired_page_count--;
+ if (!mem->object->wired_page_count) {
+ VM_OBJECT_UNWIRED(mem->object);
+ }
assert(mem->object->resident_page_count >=
mem->object->wired_page_count);
if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
assert(vm_page_purgeable_wired_count > 0);
OSAddAtomic(-1, &vm_page_purgeable_wired_count);
}
- assert(!mem->laundry);
+ if ((mem->object->purgable == VM_PURGABLE_VOLATILE ||
+ mem->object->purgable == VM_PURGABLE_EMPTY) &&
+ mem->object->vo_purgeable_owner != TASK_NULL) {
+ task_t owner;
+
+ owner = mem->object->vo_purgeable_owner;
+ /* more volatile bytes */
+ ledger_credit(owner->ledger,
+ task_ledgers.purgeable_volatile,
+ PAGE_SIZE);
+ /* less not-quite-volatile bytes */
+ ledger_debit(owner->ledger,
+ task_ledgers.purgeable_nonvolatile,
+ PAGE_SIZE);
+ /* less footprint */
+ ledger_debit(owner->ledger,
+ task_ledgers.phys_footprint,
+ PAGE_SIZE);
+ }
assert(mem->object != kernel_object);
assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
}
/*
* if this page is currently on the pageout queue, we can't do the
- * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
* and we can't remove it manually since we would need the object lock
* (which is not required here) to decrement the activity_in_progress
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m)))
return;
if (!m->absent && clear_hw_reference == TRUE)
m->no_cache = FALSE;
if (!m->inactive) {
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
m->dirty && m->object->internal &&
(m->object->purgable == VM_PURGABLE_DENY ||
m->object->purgable == VM_PURGABLE_NONVOLATILE ||
m->object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_check_pageable_safe(m);
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
vm_page_speculative_recreated++;
#endif
} else {
- VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+ vm_page_enqueue_inactive(m, FALSE);
}
}
}
}
/*
* if this page is currently on the pageout queue, we can't do the
- * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
* and we can't remove it manually since we would need the object lock
* (which is not required here) to decrement the activity_in_progress
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->clean_queue || m->pageout_queue || m->private || m->fictitious)
+ if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious)
return;
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
+ vm_page_check_pageable_safe(m);
queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq);
m->clean_queue = TRUE;
vm_page_cleaned_count++;
* The page queues must be locked.
*/
-#if CONFIG_JETSAM
-#if LATENCY_JETSAM
-extern struct vm_page jetsam_latency_page[NUM_OF_JETSAM_LATENCY_TOKENS];
-#endif /* LATENCY_JETSAM */
-#endif /* CONFIG_JETSAM */
-
void
vm_page_activate(
register vm_page_t m)
}
/*
* if this page is currently on the pageout queue, we can't do the
- * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
* and we can't remove it manually since we would need the object lock
* (which is not required here) to decrement the activity_in_progress
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor)
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
return;
#if DEBUG
DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
}
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
if ( !VM_PAGE_WIRED(m)) {
-
+ vm_page_check_pageable_safe(m);
if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) &&
m->dirty && m->object->internal &&
(m->object->purgable == VM_PURGABLE_DENY ||
} else {
vm_page_pageable_external_count++;
}
-#if LATENCY_JETSAM
- if (jlp_init) {
- uint64_t now = mach_absolute_time();
- uint64_t delta = now - jlp_time;
- clock_sec_t jl_secs = 0;
- clock_usec_t jl_usecs = 0;
- vm_page_t jlp;
-
- absolutetime_to_microtime(delta, &jl_secs, &jl_usecs);
-
- jl_usecs += jl_secs * USEC_PER_SEC;
- if (jl_usecs >= JETSAM_LATENCY_TOKEN_AGE) {
-
- jlp = &jetsam_latency_page[jlp_current];
- if (jlp->active) {
- queue_remove(&vm_page_queue_active, jlp, vm_page_t, pageq);
- }
- queue_enter(&vm_page_queue_active, jlp, vm_page_t, pageq);
-
- jlp->active = TRUE;
-
- jlp->offset = now;
- jlp_time = jlp->offset;
-
- if(++jlp_current == NUM_OF_JETSAM_LATENCY_TOKENS) {
- jlp_current = 0;
- }
-
- }
- }
-#endif /* LATENCY_JETSAM */
}
m->reference = TRUE;
m->no_cache = FALSE;
struct vm_speculative_age_q *aq;
VM_PAGE_CHECK(m);
- assert(m->object != kernel_object);
+ vm_page_check_pageable_safe(m);
+
assert(m->phys_page != vm_page_guard_addr);
#if DEBUG
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
/*
* if this page is currently on the pageout queue, we can't do the
- * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
* and we can't remove it manually since we would need the object lock
* (which is not required here) to decrement the activity_in_progress
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->fictitious || m->compressor)
+ if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor)
return;
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
if ( !VM_PAGE_WIRED(m)) {
mach_timespec_t ts;
#endif
/*
* if this page is currently on the pageout queue, we can't do the
- * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case)
+ * vm_page_queues_remove (which doesn't handle the pageout queue case)
* and we can't remove it manually since we would need the object lock
* (which is not required here) to decrement the activity_in_progress
* reference which is held on the object while the page is in the pageout queue...
* just let the normal laundry processing proceed
*/
- if (m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
+ if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m)))
return;
m->no_cache = FALSE;
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
- VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+ vm_page_enqueue_inactive(m, FALSE);
}
queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) {
VM_PAGE_CHECK(m);
+ vm_page_check_pageable_safe(m);
assert(m->local);
assert(!m->active);
assert(!m->inactive);
printf("vm_page %p: \n", p);
printf(" pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev);
printf(" listq: next=%p prev=%p\n", p->listq.next, p->listq.prev);
- printf(" next=%p\n", p->next);
+ printf(" next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m));
printf(" object=%p offset=0x%llx\n", p->object, p->offset);
printf(" wire_count=%u\n", p->wire_count);
(p->unusual ? "" : "!"),
(p->encrypted ? "" : "!"),
(p->encrypted_cleaning ? "" : "!"));
- printf(" %scs_validated, %scs_tainted, %sno_cache\n",
+ printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n",
(p->cs_validated ? "" : "!"),
(p->cs_tainted ? "" : "!"),
+ (p->cs_nx ? "" : "!"),
(p->no_cache ? "" : "!"));
printf("phys_page=0x%x\n", p->phys_page);
/*
* Check the free lists for proper length etc.
*/
+static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
static unsigned int
vm_page_verify_free_list(
queue_head_t *vm_page_queue,
vm_page_t prev_m;
boolean_t found_page;
+ if (! vm_page_verify_this_free_list_enabled)
+ return 0;
+
found_page = FALSE;
npages = 0;
prev_m = (vm_page_t) vm_page_queue;
return npages;
}
-static boolean_t vm_page_verify_free_lists_enabled = FALSE;
+static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
static void
vm_page_verify_free_lists( void )
{
unsigned int color, npages, nlopages;
+ boolean_t toggle = TRUE;
- if (! vm_page_verify_free_lists_enabled)
+ if (! vm_page_verify_all_free_lists_enabled)
return;
npages = 0;
lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_verify_this_free_list_enabled == TRUE) {
+ /*
+ * This variable has been set globally for extra checking of
+ * each free list Q. Since we didn't set it, we don't own it
+ * and we shouldn't toggle it.
+ */
+ toggle = FALSE;
+ }
+
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = TRUE;
+ }
for( color = 0; color < vm_colors; color++ ) {
npages += vm_page_verify_free_list(&vm_page_queue_free[color],
"npages %u free_count %d nlopages %u lo_free_count %u",
npages, vm_page_free_count, nlopages, vm_lopage_free_count);
+ if (toggle == TRUE) {
+ vm_page_verify_this_free_list_enabled = FALSE;
+ }
+
lck_mtx_unlock(&vm_page_queue_free_lock);
}
#endif /* MACH_ASSERT */
+
+
+
+extern boolean_t (* volatile consider_buffer_cache_collect)(int);
+
/*
* CONTIGUOUS PAGE ALLOCATION
*
unsigned int idx_last_contig_page_found = 0;
int free_considered, free_available;
int substitute_needed;
- boolean_t wrapped;
+ boolean_t wrapped, zone_gc_called = FALSE;
#if DEBUG
clock_sec_t tv_start_sec, tv_end_sec;
clock_usec_t tv_start_usec, tv_end_usec;
#endif
-#if MACH_ASSERT
+
int yielded = 0;
int dumped_run = 0;
int stolen_pages = 0;
int compressed_pages = 0;
-#endif
+
if (contig_pages == 0)
return VM_PAGE_NULL;
+full_scan_again:
+
#if MACH_ASSERT
vm_page_verify_free_lists();
#endif
PAGE_REPLACEMENT_ALLOWED(TRUE);
vm_page_lock_queues();
+
+
lck_mtx_lock(&vm_page_queue_free_lock);
RESET_STATE_OF_RUN();
*/
free_available = vm_page_free_count - vm_page_free_reserved;
considered = 0;
-#if MACH_ASSERT
+
yielded++;
-#endif
+
goto retry;
}
considered++;
vm_page_free_count--;
}
}
- /*
- * adjust global freelist counts
- */
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
-
if( flags & KMA_LOMEM)
vm_page_lomem_find_contiguous_last_idx = page_idx;
else
assert(!m1->gobbled);
assert(!m1->private);
m2->no_cache = m1->no_cache;
- m2->xpmapped = m1->xpmapped;
+ m2->xpmapped = 0;
assert(!m1->busy);
assert(!m1->wanted);
assert(!m1->fictitious);
assert(!m1->encrypted_cleaning);
m2->cs_validated = m1->cs_validated;
m2->cs_tainted = m1->cs_tainted;
+ m2->cs_nx = m1->cs_nx;
/*
* If m1 had really been reusable,
assert(!m1->lopage);
m2->slid = m1->slid;
- m2->was_dirty = m1->was_dirty;
m2->compressor = m1->compressor;
+ /*
+ * page may need to be flushed if
+ * it is marshalled into a UPL
+ * that is going to be used by a device
+ * that doesn't support coherency
+ */
+ m2->written_by_kernel = TRUE;
+
/*
* make sure we clear the ref/mod state
* from the pmap layer... else we risk
* now put the substitute page
* on the object
*/
- vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE, FALSE);
+ vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
if (m2->compressor) {
m2->pmapped = TRUE;
PMAP_ENTER(kernel_pmap, m2->offset, m2,
VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
-#if MACH_ASSERT
+
compressed_pages++;
-#endif
+
} else {
if (m2->reference)
vm_page_activate(m2);
*/
vm_page_free_prepare(m1);
}
-#if MACH_ASSERT
+
stolen_pages++;
-#endif
+
}
m1->pageq.next = (queue_entry_t) m;
m1->pageq.prev = NULL;
if (m != VM_PAGE_NULL) {
vm_page_free_list(m, FALSE);
}
-#if MACH_ASSERT
+
dumped_run++;
-#endif
+
/*
* want the index of the last
* page in this run that was
#if MACH_ASSERT
vm_page_verify_free_lists();
#endif
+ if (m == NULL && zone_gc_called == FALSE) {
+ printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
+ __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
+ scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
+
+ if (consider_buffer_cache_collect != NULL) {
+ (void)(*consider_buffer_cache_collect)(1);
+ }
+
+ consider_zone_gc(TRUE);
+
+ zone_gc_called = TRUE;
+
+ printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
+ goto full_scan_again;
+ }
+
return m;
}
void
vm_page_do_delayed_work(
vm_object_t object,
+ vm_tag_t tag,
struct vm_page_delayed_work *dwp,
int dw_count)
{
if (dwp->dw_mask & DW_vm_pageout_throttle_up)
vm_pageout_throttle_up(m);
-
+#if CONFIG_PHANTOM_CACHE
+ if (dwp->dw_mask & DW_vm_phantom_cache_update)
+ vm_phantom_cache_update(m);
+#endif
if (dwp->dw_mask & DW_vm_page_wire)
- vm_page_wire(m);
+ vm_page_wire(m, tag, FALSE);
else if (dwp->dw_mask & DW_vm_page_unwire) {
boolean_t queueit;
- queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE;
+ queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
vm_page_unwire(m, queueit);
}
vm_page_lru(m);
else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
if ( !m->pageout_queue)
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
}
if (dwp->dw_mask & DW_set_reference)
m->reference = TRUE;
if (dwp->dw_mask & DW_move_page) {
if ( !m->pageout_queue) {
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
assert(m->object != kernel_object);
- VM_PAGE_ENQUEUE_INACTIVE(m, FALSE);
+ vm_page_enqueue_inactive(m, FALSE);
}
}
if (dwp->dw_mask & DW_clear_busy)
static vm_page_t hibernate_gobble_queue;
-extern boolean_t (* volatile consider_buffer_cache_collect)(int);
-
static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
static int hibernate_flush_dirty_pages(int);
static int hibernate_flush_queue(queue_head_t *, int);
int cd_found_laundry;
int cd_found_dirty;
int cd_found_xpmapped;
+ int cd_skipped_xpmapped;
int cd_local_free;
int cd_total_free;
int cd_vm_page_wire_count;
} hibernate_stats;
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define HIBERNATE_XPMAPPED_LIMIT 40000
+
static int
hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
goto reenter_pg_on_q;
}
- vm_pageout_scan_wants_object = m_object;
vm_page_unlock_queues();
mutex_pause(try_failed_count++);
continue;
} else {
l_object = m_object;
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
}
if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
while (retval == 0) {
/*
* we've already factored out pages in the laundry which
* means this page can't be on the pageout queue so it's
- * safe to do the VM_PAGE_QUEUES_REMOVE
+ * safe to do the vm_page_queues_remove
*/
assert(!m->pageout_queue);
- VM_PAGE_QUEUES_REMOVE(m);
+ vm_page_queues_remove(m);
- if (COMPRESSED_PAGER_IS_ACTIVE)
- pmap_disconnect(m->phys_page);
+ if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE)
+ pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL);
- vm_pageout_cluster(m, FALSE);
+ (void)vm_pageout_cluster(m, FALSE, FALSE, FALSE);
hibernate_stats.hibernate_found_dirty++;
vm_object_unlock(l_object);
l_object = NULL;
}
- vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_page_unlock_queues();
struct vm_speculative_age_q *aq;
uint32_t i;
- bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
-
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++)
vm_page_reactivate_local(i, TRUE, FALSE);
}
+void
+hibernate_reset_stats()
+{
+ bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
+
int
hibernate_flush_memory()
{
if (COMPRESSED_PAGER_IS_ACTIVE) {
- if ((retval = hibernate_flush_dirty_pages(2)) == 0) {
-
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
vm_compressor_flush();
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
- }
}
- if (retval == 0 && consider_buffer_cache_collect != NULL) {
+ if (consider_buffer_cache_collect != NULL) {
unsigned int orig_wire_count;
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
}
}
-void
-hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
-{
- uint32_t i;
- vm_page_t m;
- uint64_t start, end, timeout, nsec;
- clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
- clock_get_uptime(&start);
-
- for (i = 0; i < gobble_count; i++)
- {
- while (VM_PAGE_NULL == (m = vm_page_grab()))
- {
- clock_get_uptime(&end);
- if (end >= timeout)
- break;
- VM_PAGE_WAIT();
- }
- if (!m)
- break;
- m->busy = FALSE;
- vm_page_gobble(m);
-
- m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
- hibernate_gobble_queue = m;
- }
-
- clock_get_uptime(&end);
- absolutetime_to_nanoseconds(end - start, &nsec);
- HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
-}
-
void
hibernate_free_gobble_pages(void)
{
if (discard == FALSE) {
if (!preflight)
hibernate_stats.cd_found_dirty++;
- } else if (m->xpmapped && m->reference) {
- if (!preflight)
- hibernate_stats.cd_found_xpmapped++;
- discard = FALSE;
+ } else if (m->xpmapped && m->reference && !object->internal) {
+ if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ } else {
+ if (!preflight)
+ hibernate_stats.cd_skipped_xpmapped++;
+ }
}
}
while (FALSE);
*/
return;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_object_t object = m->object;
if (!vm_object_lock_try(m->object))
panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
#else
/* No need to lock page queue for token delete, hibernate_vm_unlock()
makes sure these locks are uncontended before sleep */
-#endif /* !DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
if (m->pmapped == TRUE)
{
vm_purgeable_token_delete_first(old_queue);
}
m->object->purgable = VM_PURGABLE_EMPTY;
+
+ /*
+ * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
+ * accounted in the "volatile" ledger, so no change here.
+ * We have to update vm_page_purgeable_count, though, since we're
+ * effectively purging this object.
+ */
+ unsigned int delta;
+ assert(m->object->resident_page_count >= m->object->wired_page_count);
+ delta = (m->object->resident_page_count - m->object->wired_page_count);
+ assert(vm_page_purgeable_count >= delta);
+ assert(delta > 0);
+ OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
}
vm_page_free(m);
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_object_unlock(object);
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
}
/*
boolean_t discard_all;
boolean_t discard;
- HIBLOG("hibernate_page_list_setall(preflight %d) start %p, %p\n", preflight, page_list, page_list_wired);
+ HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
if (preflight) {
page_list = NULL;
discard_all = will_discard;
}
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (!preflight)
{
vm_page_lock_queues();
}
}
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_inactive++;
+ count_discard_cleaned++;
discard = discard_all;
}
else
- count_inactive++;
+ count_cleaned++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
- while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ discard = discard_all;
+ }
+ else
+ count_active++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_cleaned++;
+ count_discard_inactive++;
discard = discard_all;
}
else
- count_cleaned++;
+ count_inactive++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
}
}
- m = (vm_page_t) queue_first(&vm_page_queue_active);
- while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
- {
- next = (vm_page_t) m->pageq.next;
- discard = FALSE;
- if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
- && hibernate_consider_discard(m, preflight))
- {
- if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
- if (m->dirty)
- count_discard_purgeable++;
- else
- count_discard_active++;
- discard = discard_all;
- }
- else
- count_active++;
- count_wire--;
- if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- if (discard) hibernate_discard_page(m);
- m = next;
- }
-
queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
{
count_compressor++;
discard_all ? "did" : "could",
count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+ if (hibernate_stats.cd_skipped_xpmapped)
+ HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (!preflight)
{
if (vm_page_local_q) {
}
vm_page_unlock_queues();
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
if (preflight) {
lck_mtx_unlock(&vm_page_queue_free_lock);
uint32_t count_discard_speculative = 0;
-#if DEBUG
+#if MACH_ASSERT || DEBUG
vm_page_lock_queues();
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++) {
VPL_LOCK(&lq->vpl_lock);
}
}
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
clock_get_uptime(&start);
m = next;
}
-#if DEBUG
+#if MACH_ASSERT || DEBUG
if (vm_page_local_q) {
for (i = 0; i < vm_page_local_q_count; i++) {
struct vpl *lq;
}
}
vm_page_unlock_queues();
-#endif /* DEBUG */
+#endif /* MACH_ASSERT || DEBUG */
clock_get_uptime(&end);
absolutetime_to_nanoseconds(end - start, &nsec);
vm_page_bucket_t *bucket;
int hash_id;
- assert(mem->tabled);
+ assert(mem->hashed);
assert(mem->object);
assert(mem->offset != (vm_object_offset_t) -1);
hash_id = vm_page_hash(mem->object, mem->offset);
bucket = &vm_page_buckets[hash_id];
- mem->next = bucket->pages;
- bucket->pages = mem;
+ mem->next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
}
* hibernate_teardown_vm_structs leaves the location where
* this vm_page_t must be located in "next".
*/
- tmem = mem->next;
- mem->next = NULL;
+ tmem = VM_PAGE_UNPACK_PTR(mem->next_m);
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
sindx = (int)(tmem - &vm_pages[0]);
*tmem = *mem;
mem = tmem;
}
- if (mem->tabled)
+ if (mem->hashed)
hibernate_hash_insert_page(mem);
/*
* the 'hole' between this vm_page_t and the previous
assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
/*
- * process the list of vm_page_t's that were tabled in the hash,
+ * process the list of vm_page_t's that were entered in the hash,
* but were not located in the vm_pages arrary... these are
* vm_page_t's that were created on the fly (i.e. fictitious)
*/
for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
- mem_next = mem->next;
+ mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
- mem->next = NULL;
+ mem->next_m = VM_PAGE_PACK_PTR(NULL);
hibernate_hash_insert_page(mem);
}
hibernate_rebuild_hash_list = NULL;
bucket = &vm_page_buckets[i];
- for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
-
- assert(mem->tabled);
+ for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->hashed);
- mem_next = mem->next;
+ mem_next = VM_PAGE_UNPACK_PTR(mem->next_m);
if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
- mem->next = hibernate_rebuild_hash_list;
+ mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
hibernate_rebuild_hash_list = mem;
}
}
* as an indicator to the rebuild function that
* we don't have to move it
*/
- mem->next = mem;
+ mem->next_m = VM_PAGE_PACK_PTR(mem);
if (vm_pages[compact_target_indx].free) {
/*
bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
lck_spin_lock(bucket_lock);
- for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m))
bucket_count++;
lck_spin_unlock(bucket_lock);
return vm_page_bucket_count;
}
#endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+ unsigned int i;
+ vm_page_t p;
+ unsigned int p_hash;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+
+ if (!vm_page_buckets_check_ready) {
+ return;
+ }
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed ||
+ hibernate_rebuild_hash_list) {
+ panic("BUCKET_CHECK: hibernation in progress: "
+ "rebuild_needed=%d rebuild_hash_list=%p\n",
+ hibernate_rebuild_needed,
+ hibernate_rebuild_hash_list);
+ }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+ char *cp;
+ for (cp = (char *) vm_page_fake_buckets_start;
+ cp < (char *) vm_page_fake_buckets_end;
+ cp++) {
+ if (*cp != 0x5a) {
+ panic("BUCKET_CHECK: corruption at %p in fake buckets "
+ "[0x%llx:0x%llx]\n",
+ cp,
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ }
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ bucket = &vm_page_buckets[i];
+ if (!bucket->page_list) {
+ continue;
+ }
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+ p = VM_PAGE_UNPACK_PTR(bucket->page_list);
+ while (p != VM_PAGE_NULL) {
+ if (!p->hashed) {
+ panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+ "hash %d in bucket %d at %p "
+ "is not hashed\n",
+ p, p->object, p->offset,
+ p_hash, i, bucket);
+ }
+ p_hash = vm_page_hash(p->object, p->offset);
+ if (p_hash != i) {
+ panic("BUCKET_CHECK: corruption in bucket %d "
+ "at %p: page %p object %p offset 0x%llx "
+ "hash %d\n",
+ i, bucket, p, p->object, p->offset,
+ p_hash);
+ }
+ p = VM_PAGE_UNPACK_PTR(p->next_m);
+ }
+ lck_spin_unlock(bucket_lock);
+ }
+
+// printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+/*
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues... once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point... we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
+ */
+void
+vm_page_queues_remove(vm_page_t mem)
+{
+ boolean_t was_pageable;
+
+ VM_PAGE_QUEUES_ASSERT(mem, 1);
+ assert(!mem->pageout_queue);
+ /*
+ * if (mem->pageout_queue)
+ * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+ * the caller is responsible for determing if the page is on that queue, and if so, must
+ * either first remove it (it needs both the page queues lock and the object lock to do
+ * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+ */
+ if (mem->local) {
+ struct vpl *lq;
+ assert(mem->object != kernel_object);
+ assert(mem->object != compressor_object);
+ assert(!mem->inactive && !mem->speculative);
+ assert(!mem->active && !mem->throttled);
+ assert(!mem->clean_queue);
+ assert(!mem->fictitious);
+ lq = &vm_page_local_q[mem->local_id].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ queue_remove(&lq->vpl_queue,
+ mem, vm_page_t, pageq);
+ mem->local = FALSE;
+ mem->local_id = 0;
+ lq->vpl_count--;
+ if (mem->object->internal) {
+ lq->vpl_internal_count--;
+ } else {
+ lq->vpl_external_count--;
+ }
+ VPL_UNLOCK(&lq->vpl_lock);
+ was_pageable = FALSE;
+ }
+
+ else if (mem->active) {
+ assert(mem->object != kernel_object);
+ assert(mem->object != compressor_object);
+ assert(!mem->inactive && !mem->speculative);
+ assert(!mem->clean_queue);
+ assert(!mem->throttled);
+ assert(!mem->fictitious);
+ queue_remove(&vm_page_queue_active,
+ mem, vm_page_t, pageq);
+ mem->active = FALSE;
+ vm_page_active_count--;
+ was_pageable = TRUE;
+ }
+
+ else if (mem->inactive) {
+ assert(mem->object != kernel_object);
+ assert(mem->object != compressor_object);
+ assert(!mem->active && !mem->speculative);
+ assert(!mem->throttled);
+ assert(!mem->fictitious);
+ vm_page_inactive_count--;
+ if (mem->clean_queue) {
+ queue_remove(&vm_page_queue_cleaned,
+ mem, vm_page_t, pageq);
+ mem->clean_queue = FALSE;
+ vm_page_cleaned_count--;
+ } else {
+ if (mem->object->internal) {
+ queue_remove(&vm_page_queue_anonymous,
+ mem, vm_page_t, pageq);
+ vm_page_anonymous_count--;
+ } else {
+ queue_remove(&vm_page_queue_inactive,
+ mem, vm_page_t, pageq);
+ }
+ vm_purgeable_q_advance_all();
+ }
+ mem->inactive = FALSE;
+ was_pageable = TRUE;
+ }
+
+ else if (mem->throttled) {
+ assert(mem->object != compressor_object);
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->speculative);
+ assert(!mem->fictitious);
+ queue_remove(&vm_page_queue_throttled,
+ mem, vm_page_t, pageq);
+ mem->throttled = FALSE;
+ vm_page_throttled_count--;
+ was_pageable = FALSE;
+ }
+
+ else if (mem->speculative) {
+ assert(mem->object != compressor_object);
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->throttled);
+ assert(!mem->fictitious);
+ remque(&mem->pageq);
+ mem->speculative = FALSE;
+ vm_page_speculative_count--;
+ was_pageable = TRUE;
+ }
+
+ else if (mem->pageq.next || mem->pageq.prev) {
+ was_pageable = FALSE;
+ panic("vm_page_queues_remove: unmarked page on Q");
+ } else {
+ was_pageable = FALSE;
+ }
+
+ mem->pageq.next = NULL;
+ mem->pageq.prev = NULL;
+ VM_PAGE_QUEUES_ASSERT(mem, 0);
+ if (was_pageable) {
+ if (mem->object->internal) {
+ vm_page_pageable_internal_count--;
+ } else {
+ vm_page_pageable_external_count--;
+ }
+ }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+ vm_object_t __object = page->object;
+ if (page == __object->memq_hint) {
+ vm_page_t __new_hint;
+ queue_entry_t __qe;
+ __qe = queue_next(&page->listq);
+ if (queue_end(&__object->memq, __qe)) {
+ __qe = queue_prev(&page->listq);
+ if (queue_end(&__object->memq, __qe)) {
+ __qe = NULL;
+ }
+ }
+ __new_hint = (vm_page_t) __qe;
+ __object->memq_hint = __new_hint;
+ }
+ queue_remove(&__object->memq, page, vm_page_t, listq);
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+ VM_PAGE_QUEUES_ASSERT(mem, 0);
+ assert(!mem->fictitious);
+ assert(!mem->laundry);
+ assert(!mem->pageout_queue);
+ vm_page_check_pageable_safe(mem);
+ if (mem->object->internal) {
+ if (first == TRUE)
+ queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+ else
+ queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq);
+ vm_page_anonymous_count++;
+ vm_page_pageable_internal_count++;
+ } else {
+ if (first == TRUE)
+ queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+ else
+ queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq);
+ vm_page_pageable_external_count++;
+ }
+ mem->inactive = TRUE;
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+ if (page->object == kernel_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from kernel object (%p) to pageable queue", kernel_object);
+ }
+
+ if (page->object == compressor_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from compressor object (%p) to pageable queue", compressor_object);
+ }
+
+ if (page->object == vm_submap_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from submap object (%p) to pageable queue", vm_submap_object);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+vm_allocation_site_t *
+vm_allocation_sites[VM_KERN_MEMORY_COUNT];
+
+vm_tag_t
+vm_tag_bt(void)
+{
+ uintptr_t* frameptr;
+ uintptr_t* frameptr_next;
+ uintptr_t retaddr;
+ uintptr_t kstackb, kstackt;
+ const vm_allocation_site_t * site;
+ thread_t cthread;
+
+ cthread = current_thread();
+ if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+ site = NULL;
+ while (frameptr != NULL)
+ {
+ /* Verify thread stack bounds */
+ if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+ if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+ {
+ site = OSKextGetAllocationSiteForCaller(retaddr);
+ break;
+ }
+
+ frameptr = frameptr_next;
+ }
+ return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[256/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+ uint64_t avail;
+ uint64_t idx;
+
+ if (site->tag) return;
+
+ idx = 0;
+ while (TRUE)
+ {
+ avail = free_tag_bits[idx];
+ if (avail)
+ {
+ tag = __builtin_clzll(avail);
+ avail &= ~(1ULL << (63 - tag));
+ free_tag_bits[idx] = avail;
+ tag += (idx << 6);
+ break;
+ }
+ idx++;
+ if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0])))
+ {
+ tag = VM_KERN_MEMORY_ANY;
+ break;
+ }
+ }
+ site->tag = tag;
+ if (VM_KERN_MEMORY_ANY != tag)
+ {
+ assert(!vm_allocation_sites[tag]);
+ vm_allocation_sites[tag] = site;
+ }
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+ uint64_t avail;
+ uint32_t idx;
+ uint64_t bit;
+
+ if (VM_KERN_MEMORY_ANY == tag) return;
+
+ idx = (tag >> 6);
+ avail = free_tag_bits[idx];
+ tag &= 63;
+ bit = (1ULL << (63 - tag));
+ assert(!(avail & bit));
+ free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+ vm_tag_t tag;
+ for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+
+ if (VM_TAG_BT & site->flags)
+ {
+ tag = vm_tag_bt();
+ if (VM_KERN_MEMORY_NONE != tag) return (tag);
+ }
+
+ if (!site->tag)
+ {
+ lck_spin_lock(&vm_allocation_sites_lock);
+ vm_tag_alloc_locked(site);
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ }
+
+ return (site->tag);
+}
+
+static void
+vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object)
+{
+ if (!object->wired_page_count) return;
+ if (object != kernel_object)
+ {
+ assert(object->wire_tag < num_sites);
+ sites[object->wire_tag].size += ptoa_64(object->wired_page_count);
+ }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites,
+ unsigned int num_sites, vm_object_t object);
+
+static void
+vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites,
+ vm_page_iterate_proc proc, purgeable_q_t queue,
+ int group)
+{
+ vm_object_t object;
+
+ for (object = (vm_object_t) queue_first(&queue->objq[group]);
+ !queue_end(&queue->objq[group], (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(sites, num_sites, object);
+ }
+}
+
+static void
+vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites,
+ vm_page_iterate_proc proc)
+{
+ purgeable_q_t volatile_q;
+ queue_head_t * nonvolatile_q;
+ vm_object_t object;
+ int group;
+
+ lck_spin_lock(&vm_objects_wired_lock);
+ queue_iterate(&vm_objects_wired,
+ object,
+ vm_object_t,
+ objq)
+ {
+ proc(sites, num_sites, object);
+ }
+ lck_spin_unlock(&vm_objects_wired_lock);
+
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+ nonvolatile_q = &purgeable_nonvolatile_queue;
+ for (object = (vm_object_t) queue_first(nonvolatile_q);
+ !queue_end(nonvolatile_q, (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(sites, num_sites, object);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0);
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+ {
+ vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group);
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+}
+
+static uint64_t
+process_account(mach_memory_info_t * sites, unsigned int __unused num_sites)
+{
+ uint64_t found;
+ unsigned int idx;
+ vm_allocation_site_t * site;
+
+ assert(num_sites >= VM_KERN_MEMORY_COUNT);
+ found = 0;
+ for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++)
+ {
+ found += sites[idx].size;
+ if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+ {
+ sites[idx].site = idx;
+ sites[idx].flags |= VM_KERN_SITE_TAG;
+ if (VM_KERN_MEMORY_ZONE == idx) sites[idx].flags |= VM_KERN_SITE_HIDE;
+ else sites[idx].flags |= VM_KERN_SITE_WIRED;
+ continue;
+ }
+ lck_spin_lock(&vm_allocation_sites_lock);
+ if ((site = vm_allocation_sites[idx]))
+ {
+ if (sites[idx].size)
+ {
+ sites[idx].flags |= VM_KERN_SITE_WIRED;
+ if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags))
+ {
+ sites[idx].site = OSKextGetKmodIDForSite(site);
+ sites[idx].flags |= VM_KERN_SITE_KMOD;
+ }
+ else
+ {
+ sites[idx].site = VM_KERNEL_UNSLIDE(site);
+ sites[idx].flags |= VM_KERN_SITE_KERNEL;
+ }
+ site = NULL;
+ }
+ else
+ {
+ vm_tag_free_locked(site->tag);
+ site->tag = VM_KERN_MEMORY_NONE;
+ vm_allocation_sites[idx] = NULL;
+ if (!(VM_TAG_UNLOAD & site->flags)) site = NULL;
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ if (site) OSKextFreeSite(site);
+ }
+ return (found);
+}
+
+kern_return_t
+vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites)
+{
+ enum { kMaxKernelDepth = 1 };
+ vm_map_t maps [kMaxKernelDepth];
+ vm_map_entry_t entries[kMaxKernelDepth];
+ vm_map_t map;
+ vm_map_entry_t entry;
+ vm_object_offset_t offset;
+ vm_page_t page;
+ int stackIdx, count;
+ uint64_t wired_size;
+ uint64_t wired_managed_size;
+ uint64_t wired_reserved_size;
+ mach_memory_info_t * counts;
+
+ bzero(sites, num_sites * sizeof(mach_memory_info_t));
+
+ vm_page_iterate_objects(sites, num_sites, &vm_page_count_object);
+
+ wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+ wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+ assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT));
+ counts = &sites[VM_KERN_MEMORY_COUNT];
+
+#define SET_COUNT(xcount, xsize, xflags) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+ SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
+ SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].free = (xfree); \
+ counts[xcount].largest = (xlargest); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER;
+
+ vm_map_size_t map_size, map_free, map_largest;
+
+ vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+ vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+ vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+ map = kernel_map;
+ stackIdx = 0;
+ while (map)
+ {
+ vm_map_lock(map);
+ for (entry = map->hdr.links.next; map; entry = entry->links.next)
+ {
+ if (entry->is_sub_map)
+ {
+ assert(stackIdx < kMaxKernelDepth);
+ maps[stackIdx] = map;
+ entries[stackIdx] = entry;
+ stackIdx++;
+ map = VME_SUBMAP(entry);
+ entry = NULL;
+ break;
+ }
+ if (VME_OBJECT(entry) == kernel_object)
+ {
+ count = 0;
+ vm_object_lock(VME_OBJECT(entry));
+ for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+ {
+ page = vm_page_lookup(VME_OBJECT(entry), offset);
+ if (page && VM_PAGE_WIRED(page)) count++;
+ }
+ vm_object_unlock(VME_OBJECT(entry));
+
+ if (count)
+ {
+ assert(VME_ALIAS(entry) < num_sites);
+ sites[VME_ALIAS(entry)].size += ptoa_64(count);
+ }
+ }
+ if (entry == vm_map_last_entry(map))
+ {
+ vm_map_unlock(map);
+ if (!stackIdx) map = NULL;
+ else
+ {
+ --stackIdx;
+ map = maps[stackIdx];
+ entry = entries[stackIdx];
+ }
+ }
+ }
+ }
+
+ process_account(sites, num_sites);
+
+ return (KERN_SUCCESS);
+}