X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/39236c6e673c41db228275375ab7fdb0f837b292..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/vm/vm_resident.c diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 2bb8118c2..25c1edb26 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -64,6 +64,7 @@ #include #include +#include #include #include @@ -76,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -84,6 +86,7 @@ #include /* kernel_memory_allocate() */ #include #include +#include #include #include @@ -92,6 +95,10 @@ #include #include +#if CONFIG_PHANTOM_CACHE +#include +#endif + #include #include @@ -119,8 +126,9 @@ __private_extern__ void vm_page_init_lck_grp(void); static void vm_page_free_prepare(vm_page_t page); static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); +static void vm_tag_init(void); - +uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; /* * Associated with page of user-allocatable memory is a @@ -146,7 +154,7 @@ uint32_t vm_page_pages; * or VP, table.] */ typedef struct { - vm_page_t pages; + vm_page_packed_t page_list; #if MACH_PAGE_HASH_STATS int cur_count; /* current count */ int hi_count; /* high water mark */ @@ -164,6 +172,18 @@ uint32_t vm_page_bucket_hash; /* Basic bucket hash */ unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */ lck_spin_t *vm_page_bucket_locks; +lck_spin_t vm_objects_wired_lock; +lck_spin_t vm_allocation_sites_lock; + +#if VM_PAGE_BUCKETS_CHECK +boolean_t vm_page_buckets_check_ready = FALSE; +#if VM_PAGE_FAKE_BUCKETS +vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */ +vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; +#endif /* VM_PAGE_FAKE_BUCKETS */ +#endif /* VM_PAGE_BUCKETS_CHECK */ + +extern int not_in_kdp; #if MACH_PAGE_HASH_STATS @@ -238,14 +258,13 @@ ppnum_t vm_page_lowest = 0; unsigned int vm_colors; unsigned int vm_color_mask; /* mask is == (vm_colors-1) */ unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */ +unsigned int vm_free_magazine_refill_limit = 0; queue_head_t vm_page_queue_free[MAX_COLORS]; unsigned int vm_page_free_wanted; unsigned int vm_page_free_wanted_privileged; unsigned int vm_page_free_count; unsigned int vm_page_fictitious_count; -unsigned int vm_page_free_count_minimum; /* debugging */ - /* * Occasionally, the virtual memory system uses * resident page structures that do not refer to @@ -303,21 +322,28 @@ queue_head_t vm_page_queue_inactive; queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ queue_head_t vm_page_queue_throttled; +queue_head_t vm_objects_wired; + unsigned int vm_page_active_count; unsigned int vm_page_inactive_count; unsigned int vm_page_anonymous_count; unsigned int vm_page_throttled_count; unsigned int vm_page_speculative_count; + unsigned int vm_page_wire_count; +unsigned int vm_page_stolen_count; unsigned int vm_page_wire_count_initial; +unsigned int vm_page_pages_initial; unsigned int vm_page_gobble_count = 0; -unsigned int vm_page_wire_count_warning = 0; -unsigned int vm_page_gobble_count_warning = 0; + +#define VM_PAGE_WIRE_COUNT_WARNING 0 +#define VM_PAGE_GOBBLE_COUNT_WARNING 0 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */ uint64_t vm_page_purged_count = 0; /* total count of purged pages */ +unsigned int vm_page_xpmapped_external_count = 0; unsigned int vm_page_external_count = 0; unsigned int vm_page_internal_count = 0; unsigned int vm_page_pageable_external_count = 0; @@ -347,7 +373,6 @@ ppnum_t max_valid_low_ppnum = 0xffffffff; unsigned int vm_page_free_target = 0; unsigned int vm_page_free_min = 0; unsigned int vm_page_throttle_limit = 0; -uint32_t vm_page_creation_throttle = 0; unsigned int vm_page_inactive_target = 0; unsigned int vm_page_anonymous_min = 0; unsigned int vm_page_inactive_min = 0; @@ -379,7 +404,9 @@ struct vm_page_stats_reusable vm_page_stats_reusable; void vm_set_page_size(void) { - page_mask = page_size - 1; + page_size = PAGE_SIZE; + page_mask = PAGE_MASK; + page_shift = PAGE_SHIFT; if ((page_mask & page_size) != 0) panic("vm_set_page_size: page size not a power of two"); @@ -389,6 +416,8 @@ vm_set_page_size(void) break; } +#define COLOR_GROUPS_TO_STEAL 4 + /* Called once during statup, once the cache geometry is known. */ @@ -414,6 +443,8 @@ vm_page_set_colors( void ) vm_colors = n; vm_color_mask = n - 1; + + vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL; } @@ -511,7 +542,7 @@ vm_page_bootstrap( m->pageq.prev = NULL; m->listq.next = NULL; m->listq.prev = NULL; - m->next = VM_PAGE_NULL; + m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL); m->object = VM_OBJECT_NULL; /* reset later */ m->offset = (vm_object_offset_t) -1; /* reset later */ @@ -535,6 +566,7 @@ vm_page_bootstrap( m->busy = TRUE; m->wanted = FALSE; m->tabled = FALSE; + m->hashed = FALSE; m->fictitious = FALSE; m->pmapped = FALSE; m->wpmapped = FALSE; @@ -552,12 +584,13 @@ vm_page_bootstrap( m->encrypted_cleaning = FALSE; m->cs_validated = FALSE; m->cs_tainted = FALSE; + m->cs_nx = FALSE; m->no_cache = FALSE; m->reusable = FALSE; m->slid = FALSE; - m->was_dirty = FALSE; m->xpmapped = FALSE; m->compressor = FALSE; + m->written_by_kernel = FALSE; m->__unused_object_bits = 0; /* @@ -584,6 +617,8 @@ vm_page_bootstrap( purgeable_queues[i].debug_count_objects = 0; #endif }; + purgeable_nonvolatile_count = 0; + queue_init(&purgeable_nonvolatile_queue); for (i = 0; i < MAX_COLORS; i++ ) queue_init(&vm_page_queue_free[i]); @@ -594,6 +629,7 @@ vm_page_bootstrap( queue_init(&vm_page_queue_cleaned); queue_init(&vm_page_queue_throttled); queue_init(&vm_page_queue_anonymous); + queue_init(&vm_objects_wired); for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) { queue_init(&vm_page_queue_speculative[i].age_q); @@ -610,7 +646,9 @@ vm_page_bootstrap( /* * Steal memory for the map and zone subsystems. */ + kernel_debug_string_simple("zone_steal_memory"); zone_steal_memory(); + kernel_debug_string_simple("vm_map_steal_memory"); vm_map_steal_memory(); /* @@ -656,10 +694,36 @@ vm_page_bootstrap( if (vm_page_hash_mask & vm_page_bucket_count) printf("vm_page_bootstrap: WARNING -- strange page hash\n"); +#if VM_PAGE_BUCKETS_CHECK +#if VM_PAGE_FAKE_BUCKETS + /* + * Allocate a decoy set of page buckets, to detect + * any stomping there. + */ + vm_page_fake_buckets = (vm_page_bucket_t *) + pmap_steal_memory(vm_page_bucket_count * + sizeof(vm_page_bucket_t)); + vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets; + vm_page_fake_buckets_end = + vm_map_round_page((vm_page_fake_buckets_start + + (vm_page_bucket_count * + sizeof (vm_page_bucket_t))), + PAGE_MASK); + char *cp; + for (cp = (char *)vm_page_fake_buckets_start; + cp < (char *)vm_page_fake_buckets_end; + cp++) { + *cp = 0x5a; + } +#endif /* VM_PAGE_FAKE_BUCKETS */ +#endif /* VM_PAGE_BUCKETS_CHECK */ + + kernel_debug_string_simple("vm_page_buckets"); vm_page_buckets = (vm_page_bucket_t *) pmap_steal_memory(vm_page_bucket_count * sizeof(vm_page_bucket_t)); + kernel_debug_string_simple("vm_page_bucket_locks"); vm_page_bucket_locks = (lck_spin_t *) pmap_steal_memory(vm_page_bucket_lock_count * sizeof(lck_spin_t)); @@ -667,7 +731,7 @@ vm_page_bootstrap( for (i = 0; i < vm_page_bucket_count; i++) { register vm_page_bucket_t *bucket = &vm_page_buckets[i]; - bucket->pages = VM_PAGE_NULL; + bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL); #if MACH_PAGE_HASH_STATS bucket->cur_count = 0; bucket->hi_count = 0; @@ -677,6 +741,14 @@ vm_page_bootstrap( for (i = 0; i < vm_page_bucket_lock_count; i++) lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr); + lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); + lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); + vm_tag_init(); + +#if VM_PAGE_BUCKETS_CHECK + vm_page_buckets_check_ready = TRUE; +#endif /* VM_PAGE_BUCKETS_CHECK */ + /* * Machine-dependent code allocates the resident page table. * It uses vm_page_init to initialize the page frames. @@ -685,6 +757,7 @@ vm_page_bootstrap( * to get the alignment right. */ + kernel_debug_string_simple("pmap_startup"); pmap_startup(&virtual_space_start, &virtual_space_end); virtual_space_start = round_page(virtual_space_start); virtual_space_end = trunc_page(virtual_space_end); @@ -702,11 +775,12 @@ vm_page_bootstrap( assert((unsigned int) atop_64(max_mem) == atop_64(max_mem)); vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */ vm_page_wire_count_initial = vm_page_wire_count; - vm_page_free_count_minimum = vm_page_free_count; + vm_page_pages_initial = vm_page_pages; printf("vm_page_bootstrap: %d free pages and %d wired pages\n", vm_page_free_count, vm_page_wire_count); + kernel_debug_string_simple("vm_page_bootstrap complete"); simple_lock_init(&vm_paging_lock, 0); } @@ -781,12 +855,13 @@ pmap_steal_memory( * Account for newly stolen memory */ vm_page_wire_count++; - + vm_page_stolen_count++; } return (void *) addr; } +void vm_page_release_startup(vm_page_t mem); void pmap_startup( vm_offset_t *startp, @@ -796,6 +871,22 @@ pmap_startup( ppnum_t phys_page; addr64_t tmpaddr; + +#if defined(__LP64__) + /* + * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use + */ + assert(sizeof(struct vm_page) == 64); + + /* + * make sure we are aligned on a 64 byte boundary + * for VM_PAGE_PACK_PTR (it clips off the low-order + * 6 bits of the pointer) + */ + if (virtual_space_start != virtual_space_end) + virtual_space_start = round_page(virtual_space_start); +#endif + /* * We calculate how many page frames we will have * and then allocate the page structures in one chunk. @@ -810,6 +901,7 @@ pmap_startup( /* * Initialize the page frames. */ + kernel_debug_string_simple("Initialize the page frames"); for (i = 0, pages_initialized = 0; i < npages; i++) { if (!pmap_next_page(&phys_page)) break; @@ -822,6 +914,15 @@ pmap_startup( } vm_pages_count = pages_initialized; +#if defined(__LP64__) + + if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0]) + panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]); + + if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1]) + panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]); +#endif + kernel_debug_string_simple("page fill/release"); /* * Check if we want to initialize pages to a known value */ @@ -844,7 +945,7 @@ pmap_startup( // free low -> high so high is preferred for (i = 1; i <= pages_initialized; i++) { if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ - vm_page_release(&vm_pages[i - 1]); + vm_page_release_startup(&vm_pages[i - 1]); } } else @@ -858,9 +959,11 @@ pmap_startup( */ for (i = pages_initialized; i > 0; i--) { if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ - vm_page_release(&vm_pages[i - 1]); + vm_page_release_startup(&vm_pages[i - 1]); } + VM_CHECK_MEMORYSTATUS; + #if 0 { vm_page_t xx, xxo, xxl; @@ -924,6 +1027,7 @@ pmap_startup( void vm_page_module_init(void) { + uint64_t vm_page_zone_pages, vm_page_zone_data_size; vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0, PAGE_SIZE, "vm pages"); @@ -936,13 +1040,18 @@ vm_page_module_init(void) zone_change(vm_page_zone, Z_EXHAUST, TRUE); zone_change(vm_page_zone, Z_FOREIGN, TRUE); zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE); - /* - * Adjust zone statistics to account for the real pages allocated - * in vm_page_create(). [Q: is this really what we want?] - */ - vm_page_zone->count += vm_page_pages; - vm_page_zone->sum_count += vm_page_pages; - vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size; + /* + * Adjust zone statistics to account for the real pages allocated + * in vm_page_create(). [Q: is this really what we want?] + */ + vm_page_zone->count += vm_page_pages; + vm_page_zone->sum_count += vm_page_pages; + vm_page_zone_data_size = vm_page_pages * vm_page_zone->elem_size; + vm_page_zone->cur_size += vm_page_zone_data_size; + vm_page_zone_pages = ((round_page(vm_page_zone_data_size)) / PAGE_SIZE); + OSAddAtomic64(vm_page_zone_pages, &(vm_page_zone->page_count)); + /* since zone accounts for these, take them out of stolen */ + VM_PAGE_MOVE_STOLEN(vm_page_zone_pages); } /* @@ -1003,7 +1112,17 @@ vm_page_insert( vm_object_t object, vm_object_offset_t offset) { - vm_page_insert_internal(mem, object, offset, FALSE, TRUE, FALSE); + vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL); +} + +void +vm_page_insert_wired( + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset, + vm_tag_t tag) +{ + vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL); } void @@ -1011,13 +1130,17 @@ vm_page_insert_internal( vm_page_t mem, vm_object_t object, vm_object_offset_t offset, + vm_tag_t tag, boolean_t queues_lock_held, boolean_t insert_in_hash, - boolean_t batch_pmap_op) + boolean_t batch_pmap_op, + boolean_t batch_accounting, + uint64_t *delayed_ledger_update) { - vm_page_bucket_t *bucket; - lck_spin_t *bucket_lock; - int hash_id; + vm_page_bucket_t *bucket; + lck_spin_t *bucket_lock; + int hash_id; + task_t owner; XPR(XPR_VM_PAGE, "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", @@ -1032,10 +1155,10 @@ vm_page_insert_internal( assert(page_aligned(offset)); - if (object == vm_submap_object) { - /* the vm_submap_object is only a placeholder for submaps */ - panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset); - } + assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE)); + + /* the vm_submap_object is only a placeholder for submaps */ + assert(object != vm_submap_object); vm_object_lock_assert_exclusive(object); #if DEBUG @@ -1043,9 +1166,9 @@ vm_page_insert_internal( queues_lock_held ? LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED); #endif /* DEBUG */ - + if (insert_in_hash == TRUE) { -#if DEBUG +#if DEBUG || VM_PAGE_CHECK_BUCKETS if (mem->tabled || mem->object != VM_OBJECT_NULL) panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", @@ -1055,7 +1178,14 @@ vm_page_insert_internal( /* only insert "pageout" pages into "pageout" objects, * and normal pages into normal objects */ +#if 00 + /* + * For some reason, this assertion gets tripped + * but it's mostly harmless, so let's disable it + * for now. + */ assert(object->pageout == mem->pageout); +#endif /* 00 */ assert(vm_page_lookup(object, offset) == VM_PAGE_NULL); @@ -1075,13 +1205,15 @@ vm_page_insert_internal( lck_spin_lock(bucket_lock); - mem->next = bucket->pages; - bucket->pages = mem; + mem->next_m = bucket->page_list; + bucket->page_list = VM_PAGE_PACK_PTR(mem); + assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list)); + #if MACH_PAGE_HASH_STATS if (++bucket->cur_count > bucket->hi_count) bucket->hi_count = bucket->cur_count; #endif /* MACH_PAGE_HASH_STATS */ - + mem->hashed = TRUE; lck_spin_unlock(bucket_lock); } @@ -1097,7 +1229,8 @@ vm_page_insert_internal( /* * Now link into the object's list of backed pages. */ - VM_PAGE_INSERT(mem, object); + queue_enter(&object->memq, mem, vm_page_t, listq); + object->memq_hint = mem; mem->tabled = TRUE; /* @@ -1106,14 +1239,25 @@ vm_page_insert_internal( object->resident_page_count++; if (VM_PAGE_WIRED(mem)) { - object->wired_page_count++; + if (!mem->private && !mem->fictitious) + { + if (!object->wired_page_count) + { + assert(VM_KERN_MEMORY_NONE != tag); + object->wire_tag = tag; + VM_OBJECT_WIRED(object); + } + } + object->wired_page_count++; } assert(object->resident_page_count >= object->wired_page_count); - if (object->internal) { - OSAddAtomic(1, &vm_page_internal_count); - } else { - OSAddAtomic(1, &vm_page_external_count); + if (batch_accounting == FALSE) { + if (object->internal) { + OSAddAtomic(1, &vm_page_internal_count); + } else { + OSAddAtomic(1, &vm_page_external_count); + } } /* @@ -1133,11 +1277,43 @@ vm_page_insert_internal( OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count); } + if (object->purgable == VM_PURGABLE_DENY) { + owner = TASK_NULL; + } else { + owner = object->vo_purgeable_owner; + } + if (owner && + (object->purgable == VM_PURGABLE_NONVOLATILE || + VM_PAGE_WIRED(mem))) { + + if (delayed_ledger_update) + *delayed_ledger_update += PAGE_SIZE; + else { + /* more non-volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* more footprint */ + ledger_credit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } + + } else if (owner && + (object->purgable == VM_PURGABLE_VOLATILE || + object->purgable == VM_PURGABLE_EMPTY)) { + assert(! VM_PAGE_WIRED(mem)); + /* more volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } + if (object->purgable == VM_PURGABLE_VOLATILE) { if (VM_PAGE_WIRED(mem)) { - OSAddAtomic(1, &vm_page_purgeable_wired_count); + OSAddAtomic(+1, &vm_page_purgeable_wired_count); } else { - OSAddAtomic(1, &vm_page_purgeable_count); + OSAddAtomic(+1, &vm_page_purgeable_count); } } else if (object->purgable == VM_PURGABLE_EMPTY && mem->throttled) { @@ -1155,6 +1331,25 @@ vm_page_insert_internal( if (queues_lock_held == FALSE) vm_page_unlock_queues(); } + +#if VM_OBJECT_TRACKING_OP_MODIFIED + if (vm_object_tracking_inited && + object->internal && + object->resident_page_count == 0 && + object->pager == NULL && + object->shadow != NULL && + object->shadow->copy == object) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int numsaved = 0; + + numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_MODIFIED, + bt, + numsaved); + } +#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */ } /* @@ -1184,7 +1379,7 @@ vm_page_replace( VM_PAGE_CHECK(mem); #endif vm_object_lock_assert_exclusive(object); -#if DEBUG +#if DEBUG || VM_PAGE_CHECK_BUCKETS if (mem->tabled || mem->object != VM_OBJECT_NULL) panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", @@ -1209,31 +1404,33 @@ vm_page_replace( lck_spin_lock(bucket_lock); - if (bucket->pages) { - vm_page_t *mp = &bucket->pages; - vm_page_t m = *mp; + if (bucket->page_list) { + vm_page_packed_t *mp = &bucket->page_list; + vm_page_t m = VM_PAGE_UNPACK_PTR(*mp); do { if (m->object == object && m->offset == offset) { /* * Remove old page from hash list */ - *mp = m->next; + *mp = m->next_m; + m->hashed = FALSE; found_m = m; break; } - mp = &m->next; - } while ((m = *mp)); + mp = &m->next_m; + } while ((m = VM_PAGE_UNPACK_PTR(*mp))); - mem->next = bucket->pages; + mem->next_m = bucket->page_list; } else { - mem->next = VM_PAGE_NULL; + mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL); } /* * insert new page at head of hash list */ - bucket->pages = mem; + bucket->page_list = VM_PAGE_PACK_PTR(mem); + mem->hashed = TRUE; lck_spin_unlock(bucket_lock); @@ -1245,7 +1442,7 @@ vm_page_replace( */ vm_page_free_unlocked(found_m, FALSE); } - vm_page_insert_internal(mem, object, offset, FALSE, FALSE, FALSE); + vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL); } /* @@ -1266,6 +1463,7 @@ vm_page_remove( vm_page_t this; lck_spin_t *bucket_lock; int hash_id; + task_t owner; XPR(XPR_VM_PAGE, "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", @@ -1293,30 +1491,30 @@ vm_page_remove( lck_spin_lock(bucket_lock); - if ((this = bucket->pages) == mem) { + if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) { /* optimize for common case */ - bucket->pages = mem->next; + bucket->page_list = mem->next_m; } else { - vm_page_t *prev; + vm_page_packed_t *prev; - for (prev = &this->next; - (this = *prev) != mem; - prev = &this->next) + for (prev = &this->next_m; + (this = VM_PAGE_UNPACK_PTR(*prev)) != mem; + prev = &this->next_m) continue; - *prev = this->next; + *prev = this->next_m; } #if MACH_PAGE_HASH_STATS bucket->cur_count--; #endif /* MACH_PAGE_HASH_STATS */ - + mem->hashed = FALSE; lck_spin_unlock(bucket_lock); } /* * Now remove from the object's list of backed pages. */ - VM_PAGE_REMOVE(mem); + vm_page_remove_internal(mem); /* * And show that the object has one fewer resident @@ -1327,11 +1525,19 @@ vm_page_remove( mem->object->resident_page_count--; if (mem->object->internal) { +#if DEBUG assert(vm_page_internal_count); +#endif /* DEBUG */ + OSAddAtomic(-1, &vm_page_internal_count); } else { assert(vm_page_external_count); OSAddAtomic(-1, &vm_page_external_count); + + if (mem->xpmapped) { + assert(vm_page_xpmapped_external_count); + OSAddAtomic(-1, &vm_page_xpmapped_external_count); + } } if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) { if (mem->object->resident_page_count == 0) @@ -1341,6 +1547,9 @@ vm_page_remove( if (VM_PAGE_WIRED(mem)) { assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } } assert(mem->object->resident_page_count >= mem->object->wired_page_count); @@ -1357,6 +1566,31 @@ vm_page_remove( vm_page_stats_reusable.reused_remove++; } + if (mem->object->purgable == VM_PURGABLE_DENY) { + owner = TASK_NULL; + } else { + owner = mem->object->vo_purgeable_owner; + } + if (owner && + (mem->object->purgable == VM_PURGABLE_NONVOLATILE || + VM_PAGE_WIRED(mem))) { + /* less non-volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* less footprint */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } else if (owner && + (mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY)) { + assert(! VM_PAGE_WIRED(mem)); + /* less volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } if (mem->object->purgable == VM_PURGABLE_VOLATILE) { if (VM_PAGE_WIRED(mem)) { assert(vm_page_purgeable_wired_count > 0); @@ -1384,13 +1618,55 @@ vm_page_remove( * The object must be locked. No side effects. */ -unsigned long vm_page_lookup_hint = 0; -unsigned long vm_page_lookup_hint_next = 0; -unsigned long vm_page_lookup_hint_prev = 0; -unsigned long vm_page_lookup_hint_miss = 0; -unsigned long vm_page_lookup_bucket_NULL = 0; -unsigned long vm_page_lookup_miss = 0; +#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10 + +#if DEBUG_VM_PAGE_LOOKUP + +struct { + uint64_t vpl_total; + uint64_t vpl_empty_obj; + uint64_t vpl_bucket_NULL; + uint64_t vpl_hit_hint; + uint64_t vpl_hit_hint_next; + uint64_t vpl_hit_hint_prev; + uint64_t vpl_fast; + uint64_t vpl_slow; + uint64_t vpl_hit; + uint64_t vpl_miss; + + uint64_t vpl_fast_elapsed; + uint64_t vpl_slow_elapsed; +} vm_page_lookup_stats __attribute__((aligned(8))); + +#endif + +#define KDP_VM_PAGE_WALK_MAX 1000 + +vm_page_t +kdp_vm_page_lookup( + vm_object_t object, + vm_object_offset_t offset) +{ + vm_page_t cur_page; + int num_traversed = 0; + + if (not_in_kdp) { + panic("panic: kdp_vm_page_lookup done outside of kernel debugger"); + } + + queue_iterate(&object->memq, cur_page, vm_page_t, listq) { + if (cur_page->offset == offset) { + return cur_page; + } + num_traversed++; + + if (num_traversed >= KDP_VM_PAGE_WALK_MAX) { + return VM_PAGE_NULL; + } + } + return VM_PAGE_NULL; +} vm_page_t vm_page_lookup( @@ -1400,18 +1676,32 @@ vm_page_lookup( vm_page_t mem; vm_page_bucket_t *bucket; queue_entry_t qe; - lck_spin_t *bucket_lock; + lck_spin_t *bucket_lock = NULL; int hash_id; +#if DEBUG_VM_PAGE_LOOKUP + uint64_t start, elapsed; + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total); +#endif vm_object_lock_assert_held(object); + + if (object->resident_page_count == 0) { +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj); +#endif + return (VM_PAGE_NULL); + } + mem = object->memq_hint; if (mem != VM_PAGE_NULL) { assert(mem->object == object); if (mem->offset == offset) { - vm_page_lookup_hint++; - return mem; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint); +#endif + return (mem); } qe = queue_next(&mem->listq); @@ -1422,9 +1712,11 @@ vm_page_lookup( assert(next_page->object == object); if (next_page->offset == offset) { - vm_page_lookup_hint_next++; object->memq_hint = next_page; /* new hint */ - return next_page; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next); +#endif + return (next_page); } } qe = queue_prev(&mem->listq); @@ -1436,9 +1728,11 @@ vm_page_lookup( assert(prev_page->object == object); if (prev_page->offset == offset) { - vm_page_lookup_hint_prev++; object->memq_hint = prev_page; /* new hint */ - return prev_page; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev); +#endif + return (prev_page); } } } @@ -1456,38 +1750,73 @@ vm_page_lookup( * at outside the scope of the hash bucket lock... this is a * really cheap optimiztion to avoid taking the lock */ - if (bucket->pages == VM_PAGE_NULL) { - vm_page_lookup_bucket_NULL++; - + if (!bucket->page_list) { +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL); +#endif return (VM_PAGE_NULL); } - bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); - - for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { -#if 0 +#if DEBUG_VM_PAGE_LOOKUP + start = mach_absolute_time(); +#endif + if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) { /* - * we don't hold the page queue lock - * so this check isn't safe to make + * on average, it's roughly 3 times faster to run a short memq list + * than to take the spin lock and go through the hash list */ - VM_PAGE_CHECK(mem); + mem = (vm_page_t)queue_first(&object->memq); + + while (!queue_end(&object->memq, (queue_entry_t)mem)) { + + if (mem->offset == offset) + break; + + mem = (vm_page_t)queue_next(&mem->listq); + } + if (queue_end(&object->memq, (queue_entry_t)mem)) + mem = NULL; + } else { + + bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; + + lck_spin_lock(bucket_lock); + + for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) { +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ + VM_PAGE_CHECK(mem); #endif - if ((mem->object == object) && (mem->offset == offset)) - break; + if ((mem->object == object) && (mem->offset == offset)) + break; + } + lck_spin_unlock(bucket_lock); } - lck_spin_unlock(bucket_lock); +#if DEBUG_VM_PAGE_LOOKUP + elapsed = mach_absolute_time() - start; + + if (bucket_lock) { + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow); + OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed); + } else { + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast); + OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed); + } + if (mem != VM_PAGE_NULL) + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit); + else + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss); +#endif if (mem != VM_PAGE_NULL) { - if (object->memq_hint != VM_PAGE_NULL) { - vm_page_lookup_hint_miss++; - } assert(mem->object == object); - object->memq_hint = mem; - } else - vm_page_lookup_miss++; - return(mem); + object->memq_hint = mem; + } + return (mem); } @@ -1506,10 +1835,13 @@ vm_page_rename( vm_object_offset_t new_offset, boolean_t encrypted_ok) { - boolean_t internal_to_external, external_to_internal; + boolean_t internal_to_external, external_to_internal; + vm_tag_t tag; assert(mem->object != new_object); + assert(mem->object); + /* * ENCRYPTED SWAP: * The encryption key is based on the page's memory object @@ -1548,7 +1880,7 @@ vm_page_rename( * up there anyway, and we don't do vm_page_rename's frequently enough * for this to matter. */ - VM_PAGE_QUEUES_REMOVE(mem); + vm_page_queues_remove(mem); vm_page_activate(mem); } if (mem->active || mem->inactive || mem->speculative) { @@ -1560,8 +1892,9 @@ vm_page_rename( } } + tag = mem->object->wire_tag; vm_page_remove(mem, TRUE); - vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE, FALSE); + vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL); if (internal_to_external) { vm_page_pageable_internal_count--; @@ -1740,7 +2073,7 @@ void vm_page_more_fictitious(void) retval = kernel_memory_allocate(zone_map, &addr, PAGE_SIZE, VM_PROT_ALL, - KMA_KOBJECT|KMA_NOPAGEWAIT); + KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE); if (retval != KERN_SUCCESS) { /* * No page was available. Drop the @@ -1752,9 +2085,6 @@ void vm_page_more_fictitious(void) return; } - /* Increment zone page count. We account for all memory managed by the zone in z->page_count */ - OSAddAtomic64(1, &(vm_page_zone->page_count)); - zcram(vm_page_zone, addr, PAGE_SIZE); lck_mtx_unlock(&vm_page_alloc_lock); @@ -1781,7 +2111,7 @@ vm_pool_low(void) * this is an interface to support bring-up of drivers * on platforms with physical memory > 4G... */ -int vm_himemory_mode = 0; +int vm_himemory_mode = 2; /* @@ -1874,8 +2204,6 @@ vm_page_grablo(void) * request from the per-cpu queue. */ -#define COLOR_GROUPS_TO_STEAL 4 - vm_page_t vm_page_grab( void ) @@ -1889,9 +2217,9 @@ vm_page_grab( void ) return_page_from_cpu_list: PROCESSOR_DATA(current_processor(), page_grab_count) += 1; PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next; - mem->pageq.next = NULL; enable_preemption(); + mem->pageq.next = NULL; assert(mem->listq.next == NULL && mem->listq.prev == NULL); assert(mem->tabled == FALSE); @@ -1918,19 +2246,18 @@ return_page_from_cpu_list: * Optionally produce warnings if the wire or gobble * counts exceed some threshold. */ - if (vm_page_wire_count_warning > 0 - && vm_page_wire_count >= vm_page_wire_count_warning) { +#if VM_PAGE_WIRE_COUNT_WARNING + if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) { printf("mk: vm_page_grab(): high wired page count of %d\n", vm_page_wire_count); - assert(vm_page_wire_count < vm_page_wire_count_warning); } - if (vm_page_gobble_count_warning > 0 - && vm_page_gobble_count >= vm_page_gobble_count_warning) { +#endif +#if VM_PAGE_GOBBLE_COUNT_WARNING + if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) { printf("mk: vm_page_grab(): high gobbled page count of %d\n", vm_page_gobble_count); - assert(vm_page_gobble_count < vm_page_gobble_count_warning); } - +#endif lck_mtx_lock_spin(&vm_page_queue_free_lock); /* @@ -1975,17 +2302,17 @@ return_page_from_cpu_list: if (vm_page_free_count <= vm_page_free_reserved) pages_to_steal = 1; else { - pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors; - - if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved)) + if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) + pages_to_steal = vm_free_magazine_refill_limit; + else pages_to_steal = (vm_page_free_count - vm_page_free_reserved); } color = PROCESSOR_DATA(current_processor(), start_color); head = tail = NULL; + vm_page_free_count -= pages_to_steal; + while (pages_to_steal--) { - if (--vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; while (queue_empty(&vm_page_queue_free[color])) color = (color + 1) & vm_color_mask; @@ -2010,7 +2337,6 @@ return_page_from_cpu_list: tail->pageq.next = (queue_t)mem; tail = mem; - mem->pageq.prev = NULL; assert(mem->listq.next == NULL && mem->listq.prev == NULL); assert(mem->tabled == FALSE); assert(mem->object == VM_OBJECT_NULL); @@ -2026,6 +2352,8 @@ return_page_from_cpu_list: assert(!mem->wpmapped); assert(!pmap_is_noencrypt(mem->phys_page)); } + lck_mtx_unlock(&vm_page_queue_free_lock); + PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next; PROCESSOR_DATA(current_processor(), start_color) = color; @@ -2036,8 +2364,6 @@ return_page_from_cpu_list: mem = head; mem->pageq.next = NULL; - lck_mtx_unlock(&vm_page_queue_free_lock); - enable_preemption(); } /* @@ -2167,6 +2493,32 @@ vm_page_release( VM_CHECK_MEMORYSTATUS; } +/* + * This version of vm_page_release() is used only at startup + * when we are single-threaded and pages are being released + * for the first time. Hence, no locking or unnecessary checks are made. + * Note: VM_CHECK_MEMORYSTATUS invoked by the caller. + */ +void +vm_page_release_startup( + register vm_page_t mem) +{ + queue_t queue_free; + + if (vm_lopage_free_count < vm_lopage_free_limit && + mem->phys_page < max_valid_low_ppnum) { + mem->lopage = TRUE; + vm_lopage_free_count++; + queue_free = &vm_lopage_queue_free; + } else { + mem->lopage = FALSE; + mem->free = TRUE; + vm_page_free_count++; + queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask]; + } + queue_enter_first(queue_free, mem, vm_page_t, pageq); +} + /* * vm_page_wait: * @@ -2256,24 +2608,6 @@ vm_page_alloc( return(mem); } -vm_page_t -vm_page_alloclo( - vm_object_t object, - vm_object_offset_t offset) -{ - register vm_page_t mem; - - vm_object_lock_assert_exclusive(object); - mem = vm_page_grablo(); - if (mem == VM_PAGE_NULL) - return VM_PAGE_NULL; - - vm_page_insert(mem, object, offset); - - return(mem); -} - - /* * vm_page_alloc_guard: * @@ -2326,11 +2660,12 @@ vm_page_free_prepare_queues( VM_PAGE_CHECK(mem); assert(!mem->free); assert(!mem->cleaning); -#if DEBUG + +#if MACH_ASSERT || DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (mem->free) panic("vm_page_free: freeing page on free list\n"); -#endif +#endif /* MACH_ASSERT || DEBUG */ if (mem->object) { vm_object_lock_assert_exclusive(mem->object); } @@ -2346,12 +2681,16 @@ vm_page_free_prepare_queues( counter(++c_laundry_pages_freed); } - VM_PAGE_QUEUES_REMOVE(mem); /* clears local/active/inactive/throttled/speculative */ + vm_page_queues_remove(mem); /* clears local/active/inactive/throttled/speculative */ if (VM_PAGE_WIRED(mem)) { if (mem->object) { assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } + assert(mem->object->resident_page_count >= mem->object->wired_page_count); @@ -2360,6 +2699,30 @@ vm_page_free_prepare_queues( assert(vm_page_purgeable_wired_count > 0); OSAddAtomic(-1, &vm_page_purgeable_wired_count); } + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* + * While wired, this page was accounted + * as "non-volatile" but it should now + * be accounted as "volatile". + */ + /* one less "non-volatile"... */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* ... and "phys_footprint" */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + /* one more "volatile" */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } } if (!mem->private && !mem->fictitious) vm_page_wire_count--; @@ -2604,9 +2967,13 @@ vm_page_free_list( * * The page's object and the page queues must be locked. */ + + void vm_page_wire( - register vm_page_t mem) + register vm_page_t mem, + vm_tag_t tag, + boolean_t check_memorystatus) { // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */ @@ -2634,10 +3001,21 @@ vm_page_wire( mem->pageout = FALSE; vm_pageout_throttle_up(mem); } - VM_PAGE_QUEUES_REMOVE(mem); + vm_page_queues_remove(mem); if (mem->object) { + + if (!mem->private && !mem->fictitious) + { + if (!mem->object->wired_page_count) + { + assert(VM_KERN_MEMORY_NONE != tag); + mem->object->wire_tag = tag; + VM_OBJECT_WIRED(mem->object); + } + } mem->object->wired_page_count++; + assert(mem->object->resident_page_count >= mem->object->wired_page_count); if (mem->object->purgable == VM_PURGABLE_VOLATILE) { @@ -2645,6 +3023,25 @@ vm_page_wire( OSAddAtomic(-1, &vm_page_purgeable_count); OSAddAtomic(1, &vm_page_purgeable_wired_count); } + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* less volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + /* more not-quite-volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* more footprint */ + ledger_credit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } if (mem->object->all_reusable) { /* * Wired pages are not counted as "re-usable" @@ -2671,8 +3068,9 @@ vm_page_wire( vm_page_gobble_count--; mem->gobbled = FALSE; - VM_CHECK_MEMORYSTATUS; - + if (check_memorystatus == TRUE) { + VM_CHECK_MEMORYSTATUS; + } /* * ENCRYPTED SWAP: * The page could be encrypted, but @@ -2688,32 +3086,6 @@ vm_page_wire( VM_PAGE_CHECK(mem); } -/* - * vm_page_gobble: - * - * Mark this page as consumed by the vm/ipc/xmm subsystems. - * - * Called only for freshly vm_page_grab()ed pages - w/ nothing locked. - */ -void -vm_page_gobble( - register vm_page_t mem) -{ - vm_page_lockspin_queues(); - VM_PAGE_CHECK(mem); - - assert(!mem->gobbled); - assert( !VM_PAGE_WIRED(mem)); - - if (!mem->gobbled && !VM_PAGE_WIRED(mem)) { - if (!mem->private && !mem->fictitious) - vm_page_wire_count++; - } - vm_page_gobble_count++; - mem->gobbled = TRUE; - vm_page_unlock_queues(); -} - /* * vm_page_unwire: * @@ -2732,16 +3104,21 @@ vm_page_unwire( VM_PAGE_CHECK(mem); assert(VM_PAGE_WIRED(mem)); + assert(!mem->gobbled); assert(mem->object != VM_OBJECT_NULL); #if DEBUG vm_object_lock_assert_exclusive(mem->object); lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif if (--mem->wire_count == 0) { - assert(!mem->private && !mem->fictitious); - vm_page_wire_count--; + if (!mem->private && !mem->fictitious) { + vm_page_wire_count--; + } assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } assert(mem->object->resident_page_count >= mem->object->wired_page_count); if (mem->object->purgable == VM_PURGABLE_VOLATILE) { @@ -2749,7 +3126,25 @@ vm_page_unwire( assert(vm_page_purgeable_wired_count > 0); OSAddAtomic(-1, &vm_page_purgeable_wired_count); } - assert(!mem->laundry); + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* more volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + /* less not-quite-volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* less footprint */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } assert(mem->object != kernel_object); assert(mem->pageq.next == NULL && mem->pageq.prev == NULL); @@ -2817,13 +3212,13 @@ vm_page_deactivate_internal( } /* * if this page is currently on the pageout queue, we can't do the - * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case) + * vm_page_queues_remove (which doesn't handle the pageout queue case) * and we can't remove it manually since we would need the object lock * (which is not required here) to decrement the activity_in_progress * reference which is held on the object while the page is in the pageout queue... * just let the normal laundry processing proceed */ - if (m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m))) + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m))) return; if (!m->absent && clear_hw_reference == TRUE) @@ -2833,13 +3228,14 @@ vm_page_deactivate_internal( m->no_cache = FALSE; if (!m->inactive) { - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && m->dirty && m->object->internal && (m->object->purgable == VM_PURGABLE_DENY || m->object->purgable == VM_PURGABLE_NONVOLATILE || m->object->purgable == VM_PURGABLE_VOLATILE)) { + vm_page_check_pageable_safe(m); queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq); m->throttled = TRUE; vm_page_throttled_count++; @@ -2850,7 +3246,7 @@ vm_page_deactivate_internal( vm_page_speculative_recreated++; #endif } else { - VM_PAGE_ENQUEUE_INACTIVE(m, FALSE); + vm_page_enqueue_inactive(m, FALSE); } } } @@ -2883,17 +3279,18 @@ void vm_page_enqueue_cleaned(vm_page_t m) } /* * if this page is currently on the pageout queue, we can't do the - * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case) + * vm_page_queues_remove (which doesn't handle the pageout queue case) * and we can't remove it manually since we would need the object lock * (which is not required here) to decrement the activity_in_progress * reference which is held on the object while the page is in the pageout queue... * just let the normal laundry processing proceed */ - if (m->clean_queue || m->pageout_queue || m->private || m->fictitious) + if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious) return; - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); + vm_page_check_pageable_safe(m); queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq); m->clean_queue = TRUE; vm_page_cleaned_count++; @@ -2917,12 +3314,6 @@ void vm_page_enqueue_cleaned(vm_page_t m) * The page queues must be locked. */ -#if CONFIG_JETSAM -#if LATENCY_JETSAM -extern struct vm_page jetsam_latency_page[NUM_OF_JETSAM_LATENCY_TOKENS]; -#endif /* LATENCY_JETSAM */ -#endif /* CONFIG_JETSAM */ - void vm_page_activate( register vm_page_t m) @@ -2946,13 +3337,13 @@ vm_page_activate( } /* * if this page is currently on the pageout queue, we can't do the - * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case) + * vm_page_queues_remove (which doesn't handle the pageout queue case) * and we can't remove it manually since we would need the object lock * (which is not required here) to decrement the activity_in_progress * reference which is held on the object while the page is in the pageout queue... * just let the normal laundry processing proceed */ - if (m->pageout_queue || m->private || m->fictitious || m->compressor) + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor) return; #if DEBUG @@ -2965,10 +3356,10 @@ vm_page_activate( DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL); } - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); if ( !VM_PAGE_WIRED(m)) { - + vm_page_check_pageable_safe(m); if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && m->dirty && m->object->internal && (m->object->purgable == VM_PURGABLE_DENY || @@ -2986,37 +3377,6 @@ vm_page_activate( } else { vm_page_pageable_external_count++; } -#if LATENCY_JETSAM - if (jlp_init) { - uint64_t now = mach_absolute_time(); - uint64_t delta = now - jlp_time; - clock_sec_t jl_secs = 0; - clock_usec_t jl_usecs = 0; - vm_page_t jlp; - - absolutetime_to_microtime(delta, &jl_secs, &jl_usecs); - - jl_usecs += jl_secs * USEC_PER_SEC; - if (jl_usecs >= JETSAM_LATENCY_TOKEN_AGE) { - - jlp = &jetsam_latency_page[jlp_current]; - if (jlp->active) { - queue_remove(&vm_page_queue_active, jlp, vm_page_t, pageq); - } - queue_enter(&vm_page_queue_active, jlp, vm_page_t, pageq); - - jlp->active = TRUE; - - jlp->offset = now; - jlp_time = jlp->offset; - - if(++jlp_current == NUM_OF_JETSAM_LATENCY_TOKENS) { - jlp_current = 0; - } - - } - } -#endif /* LATENCY_JETSAM */ } m->reference = TRUE; m->no_cache = FALSE; @@ -3040,7 +3400,8 @@ vm_page_speculate( struct vm_speculative_age_q *aq; VM_PAGE_CHECK(m); - assert(m->object != kernel_object); + vm_page_check_pageable_safe(m); + assert(m->phys_page != vm_page_guard_addr); #if DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); @@ -3049,16 +3410,16 @@ vm_page_speculate( /* * if this page is currently on the pageout queue, we can't do the - * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case) + * vm_page_queues_remove (which doesn't handle the pageout queue case) * and we can't remove it manually since we would need the object lock * (which is not required here) to decrement the activity_in_progress * reference which is held on the object while the page is in the pageout queue... * just let the normal laundry processing proceed */ - if (m->pageout_queue || m->private || m->fictitious || m->compressor) + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor) return; - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); if ( !VM_PAGE_WIRED(m)) { mach_timespec_t ts; @@ -3183,20 +3544,20 @@ vm_page_lru( #endif /* * if this page is currently on the pageout queue, we can't do the - * VM_PAGE_QUEUES_REMOVE (which doesn't handle the pageout queue case) + * vm_page_queues_remove (which doesn't handle the pageout queue case) * and we can't remove it manually since we would need the object lock * (which is not required here) to decrement the activity_in_progress * reference which is held on the object while the page is in the pageout queue... * just let the normal laundry processing proceed */ - if (m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m))) + if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m))) return; m->no_cache = FALSE; - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); - VM_PAGE_ENQUEUE_INACTIVE(m, FALSE); + vm_page_enqueue_inactive(m, FALSE); } @@ -3314,6 +3675,7 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) { VM_PAGE_CHECK(m); + vm_page_check_pageable_safe(m); assert(m->local); assert(!m->active); assert(!m->inactive); @@ -3554,7 +3916,7 @@ _vm_page_print( printf("vm_page %p: \n", p); printf(" pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev); printf(" listq: next=%p prev=%p\n", p->listq.next, p->listq.prev); - printf(" next=%p\n", p->next); + printf(" next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m)); printf(" object=%p offset=0x%llx\n", p->object, p->offset); printf(" wire_count=%u\n", p->wire_count); @@ -3592,9 +3954,10 @@ _vm_page_print( (p->unusual ? "" : "!"), (p->encrypted ? "" : "!"), (p->encrypted_cleaning ? "" : "!")); - printf(" %scs_validated, %scs_tainted, %sno_cache\n", + printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n", (p->cs_validated ? "" : "!"), (p->cs_tainted ? "" : "!"), + (p->cs_nx ? "" : "!"), (p->no_cache ? "" : "!")); printf("phys_page=0x%x\n", p->phys_page); @@ -3637,6 +4000,7 @@ vm_page_verify_contiguous( /* * Check the free lists for proper length etc. */ +static boolean_t vm_page_verify_this_free_list_enabled = FALSE; static unsigned int vm_page_verify_free_list( queue_head_t *vm_page_queue, @@ -3649,6 +4013,9 @@ vm_page_verify_free_list( vm_page_t prev_m; boolean_t found_page; + if (! vm_page_verify_this_free_list_enabled) + return 0; + found_page = FALSE; npages = 0; prev_m = (vm_page_t) vm_page_queue; @@ -3706,18 +4073,32 @@ vm_page_verify_free_list( return npages; } -static boolean_t vm_page_verify_free_lists_enabled = FALSE; +static boolean_t vm_page_verify_all_free_lists_enabled = FALSE; static void vm_page_verify_free_lists( void ) { unsigned int color, npages, nlopages; + boolean_t toggle = TRUE; - if (! vm_page_verify_free_lists_enabled) + if (! vm_page_verify_all_free_lists_enabled) return; npages = 0; lck_mtx_lock(&vm_page_queue_free_lock); + + if (vm_page_verify_this_free_list_enabled == TRUE) { + /* + * This variable has been set globally for extra checking of + * each free list Q. Since we didn't set it, we don't own it + * and we shouldn't toggle it. + */ + toggle = FALSE; + } + + if (toggle == TRUE) { + vm_page_verify_this_free_list_enabled = TRUE; + } for( color = 0; color < vm_colors; color++ ) { npages += vm_page_verify_free_list(&vm_page_queue_free[color], @@ -3731,6 +4112,10 @@ vm_page_verify_free_lists( void ) "npages %u free_count %d nlopages %u lo_free_count %u", npages, vm_page_free_count, nlopages, vm_lopage_free_count); + if (toggle == TRUE) { + vm_page_verify_this_free_list_enabled = FALSE; + } + lck_mtx_unlock(&vm_page_queue_free_lock); } @@ -3758,6 +4143,11 @@ vm_page_queues_assert( #endif /* MACH_ASSERT */ + + + +extern boolean_t (* volatile consider_buffer_cache_collect)(int); + /* * CONTIGUOUS PAGE ALLOCATION * @@ -3840,21 +4230,23 @@ vm_page_find_contiguous( unsigned int idx_last_contig_page_found = 0; int free_considered, free_available; int substitute_needed; - boolean_t wrapped; + boolean_t wrapped, zone_gc_called = FALSE; #if DEBUG clock_sec_t tv_start_sec, tv_end_sec; clock_usec_t tv_start_usec, tv_end_usec; #endif -#if MACH_ASSERT + int yielded = 0; int dumped_run = 0; int stolen_pages = 0; int compressed_pages = 0; -#endif + if (contig_pages == 0) return VM_PAGE_NULL; +full_scan_again: + #if MACH_ASSERT vm_page_verify_free_lists(); #endif @@ -3864,6 +4256,8 @@ vm_page_find_contiguous( PAGE_REPLACEMENT_ALLOWED(TRUE); vm_page_lock_queues(); + + lck_mtx_lock(&vm_page_queue_free_lock); RESET_STATE_OF_RUN(); @@ -4033,9 +4427,9 @@ did_consider: */ free_available = vm_page_free_count - vm_page_free_reserved; considered = 0; -#if MACH_ASSERT + yielded++; -#endif + goto retry; } considered++; @@ -4116,12 +4510,6 @@ did_consider: vm_page_free_count--; } } - /* - * adjust global freelist counts - */ - if (vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; - if( flags & KMA_LOMEM) vm_page_lomem_find_contiguous_last_idx = page_idx; else @@ -4245,7 +4633,7 @@ did_consider: assert(!m1->gobbled); assert(!m1->private); m2->no_cache = m1->no_cache; - m2->xpmapped = m1->xpmapped; + m2->xpmapped = 0; assert(!m1->busy); assert(!m1->wanted); assert(!m1->fictitious); @@ -4265,6 +4653,7 @@ did_consider: assert(!m1->encrypted_cleaning); m2->cs_validated = m1->cs_validated; m2->cs_tainted = m1->cs_tainted; + m2->cs_nx = m1->cs_nx; /* * If m1 had really been reusable, @@ -4278,9 +4667,16 @@ did_consider: assert(!m1->lopage); m2->slid = m1->slid; - m2->was_dirty = m1->was_dirty; m2->compressor = m1->compressor; + /* + * page may need to be flushed if + * it is marshalled into a UPL + * that is going to be used by a device + * that doesn't support coherency + */ + m2->written_by_kernel = TRUE; + /* * make sure we clear the ref/mod state * from the pmap layer... else we risk @@ -4309,7 +4705,7 @@ did_consider: * now put the substitute page * on the object */ - vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE, FALSE); + vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL); if (m2->compressor) { m2->pmapped = TRUE; @@ -4317,9 +4713,9 @@ did_consider: PMAP_ENTER(kernel_pmap, m2->offset, m2, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE); -#if MACH_ASSERT + compressed_pages++; -#endif + } else { if (m2->reference) vm_page_activate(m2); @@ -4340,9 +4736,9 @@ did_consider: */ vm_page_free_prepare(m1); } -#if MACH_ASSERT + stolen_pages++; -#endif + } m1->pageq.next = (queue_entry_t) m; m1->pageq.prev = NULL; @@ -4357,9 +4753,9 @@ did_consider: if (m != VM_PAGE_NULL) { vm_page_free_list(m, FALSE); } -#if MACH_ASSERT + dumped_run++; -#endif + /* * want the index of the last * page in this run that was @@ -4445,6 +4841,23 @@ done_scanning: #if MACH_ASSERT vm_page_verify_free_lists(); #endif + if (m == NULL && zone_gc_called == FALSE) { + printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n", + __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, + scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count); + + if (consider_buffer_cache_collect != NULL) { + (void)(*consider_buffer_cache_collect)(1); + } + + consider_zone_gc(TRUE); + + zone_gc_called = TRUE; + + printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count); + goto full_scan_again; + } + return m; } @@ -4526,6 +4939,7 @@ unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT; void vm_page_do_delayed_work( vm_object_t object, + vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count) { @@ -4566,13 +4980,16 @@ vm_page_do_delayed_work( if (dwp->dw_mask & DW_vm_pageout_throttle_up) vm_pageout_throttle_up(m); - +#if CONFIG_PHANTOM_CACHE + if (dwp->dw_mask & DW_vm_phantom_cache_update) + vm_phantom_cache_update(m); +#endif if (dwp->dw_mask & DW_vm_page_wire) - vm_page_wire(m); + vm_page_wire(m, tag, FALSE); else if (dwp->dw_mask & DW_vm_page_unwire) { boolean_t queueit; - queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE; + queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE; vm_page_unwire(m, queueit); } @@ -4626,7 +5043,7 @@ vm_page_do_delayed_work( vm_page_lru(m); else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) { if ( !m->pageout_queue) - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); } if (dwp->dw_mask & DW_set_reference) m->reference = TRUE; @@ -4635,11 +5052,11 @@ vm_page_do_delayed_work( if (dwp->dw_mask & DW_move_page) { if ( !m->pageout_queue) { - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); assert(m->object != kernel_object); - VM_PAGE_ENQUEUE_INACTIVE(m, FALSE); + vm_page_enqueue_inactive(m, FALSE); } } if (dwp->dw_mask & DW_clear_busy) @@ -4722,8 +5139,6 @@ vm_page_get_phys_page(vm_page_t page) static vm_page_t hibernate_gobble_queue; -extern boolean_t (* volatile consider_buffer_cache_collect)(int); - static int hibernate_drain_pageout_queue(struct vm_pageout_queue *); static int hibernate_flush_dirty_pages(int); static int hibernate_flush_queue(queue_head_t *, int); @@ -4762,6 +5177,7 @@ struct hibernate_statistics { int cd_found_laundry; int cd_found_dirty; int cd_found_xpmapped; + int cd_skipped_xpmapped; int cd_local_free; int cd_total_free; int cd_vm_page_wire_count; @@ -4772,6 +5188,13 @@ struct hibernate_statistics { } hibernate_stats; +/* + * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image + * so that we don't overrun the estimated image size, which would + * result in a hibernation failure. + */ +#define HIBERNATE_XPMAPPED_LIMIT 40000 + static int hibernate_drain_pageout_queue(struct vm_pageout_queue *q) @@ -4874,7 +5297,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount) goto reenter_pg_on_q; } - vm_pageout_scan_wants_object = m_object; vm_page_unlock_queues(); mutex_pause(try_failed_count++); @@ -4884,7 +5306,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount) continue; } else { l_object = m_object; - vm_pageout_scan_wants_object = VM_OBJECT_NULL; } } if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) { @@ -4950,7 +5371,6 @@ hibernate_flush_queue(queue_head_t *q, int qcount) vm_object_unlock(l_object); l_object = NULL; } - vm_pageout_scan_wants_object = VM_OBJECT_NULL; while (retval == 0) { @@ -4993,16 +5413,16 @@ hibernate_flush_queue(queue_head_t *q, int qcount) /* * we've already factored out pages in the laundry which * means this page can't be on the pageout queue so it's - * safe to do the VM_PAGE_QUEUES_REMOVE + * safe to do the vm_page_queues_remove */ assert(!m->pageout_queue); - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); - if (COMPRESSED_PAGER_IS_ACTIVE) - pmap_disconnect(m->phys_page); + if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE) + pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL); - vm_pageout_cluster(m, FALSE); + (void)vm_pageout_cluster(m, FALSE, FALSE, FALSE); hibernate_stats.hibernate_found_dirty++; @@ -5023,7 +5443,6 @@ next_pg: vm_object_unlock(l_object); l_object = NULL; } - vm_pageout_scan_wants_object = VM_OBJECT_NULL; vm_page_unlock_queues(); @@ -5039,8 +5458,6 @@ hibernate_flush_dirty_pages(int pass) struct vm_speculative_age_q *aq; uint32_t i; - bzero(&hibernate_stats, sizeof(struct hibernate_statistics)); - if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) vm_page_reactivate_local(i, TRUE, FALSE); @@ -5104,6 +5521,13 @@ hibernate_flush_dirty_pages(int pass) } +void +hibernate_reset_stats() +{ + bzero(&hibernate_stats, sizeof(struct hibernate_statistics)); +} + + int hibernate_flush_memory() { @@ -5118,16 +5542,13 @@ hibernate_flush_memory() if (COMPRESSED_PAGER_IS_ACTIVE) { - if ((retval = hibernate_flush_dirty_pages(2)) == 0) { - KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0); vm_compressor_flush(); KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0); - } } - if (retval == 0 && consider_buffer_cache_collect != NULL) { + if (consider_buffer_cache_collect != NULL) { unsigned int orig_wire_count; KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -5191,38 +5612,6 @@ hibernate_page_list_zero(hibernate_page_list_t *list) } } -void -hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time) -{ - uint32_t i; - vm_page_t m; - uint64_t start, end, timeout, nsec; - clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout); - clock_get_uptime(&start); - - for (i = 0; i < gobble_count; i++) - { - while (VM_PAGE_NULL == (m = vm_page_grab())) - { - clock_get_uptime(&end); - if (end >= timeout) - break; - VM_PAGE_WAIT(); - } - if (!m) - break; - m->busy = FALSE; - vm_page_gobble(m); - - m->pageq.next = (queue_entry_t) hibernate_gobble_queue; - hibernate_gobble_queue = m; - } - - clock_get_uptime(&end); - absolutetime_to_nanoseconds(end - start, &nsec); - HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL); -} - void hibernate_free_gobble_pages(void) { @@ -5313,10 +5702,15 @@ hibernate_consider_discard(vm_page_t m, boolean_t preflight) if (discard == FALSE) { if (!preflight) hibernate_stats.cd_found_dirty++; - } else if (m->xpmapped && m->reference) { - if (!preflight) - hibernate_stats.cd_found_xpmapped++; - discard = FALSE; + } else if (m->xpmapped && m->reference && !object->internal) { + if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) { + if (!preflight) + hibernate_stats.cd_found_xpmapped++; + discard = FALSE; + } else { + if (!preflight) + hibernate_stats.cd_skipped_xpmapped++; + } } } while (FALSE); @@ -5337,14 +5731,14 @@ hibernate_discard_page(vm_page_t m) */ return; -#if DEBUG +#if MACH_ASSERT || DEBUG vm_object_t object = m->object; if (!vm_object_lock_try(m->object)) panic("hibernate_discard_page(%p) !vm_object_lock_try", m); #else /* No need to lock page queue for token delete, hibernate_vm_unlock() makes sure these locks are uncontended before sleep */ -#endif /* !DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ if (m->pmapped == TRUE) { @@ -5368,13 +5762,26 @@ hibernate_discard_page(vm_page_t m) vm_purgeable_token_delete_first(old_queue); } m->object->purgable = VM_PURGABLE_EMPTY; + + /* + * Purgeable ledgers: pages of VOLATILE and EMPTY objects are + * accounted in the "volatile" ledger, so no change here. + * We have to update vm_page_purgeable_count, though, since we're + * effectively purging this object. + */ + unsigned int delta; + assert(m->object->resident_page_count >= m->object->wired_page_count); + delta = (m->object->resident_page_count - m->object->wired_page_count); + assert(vm_page_purgeable_count >= delta); + assert(delta > 0); + OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count); } vm_page_free(m); -#if DEBUG +#if MACH_ASSERT || DEBUG vm_object_unlock(object); -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ } /* @@ -5447,7 +5854,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, boolean_t discard_all; boolean_t discard; - HIBLOG("hibernate_page_list_setall(preflight %d) start %p, %p\n", preflight, page_list, page_list_wired); + HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight); if (preflight) { page_list = NULL; @@ -5458,7 +5865,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, discard_all = will_discard; } -#if DEBUG +#if MACH_ASSERT || DEBUG if (!preflight) { vm_page_lock_queues(); @@ -5470,7 +5877,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, } } } -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0); @@ -5603,8 +6010,8 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, m = next; } - m = (vm_page_t) queue_first(&vm_page_queue_inactive); - while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) + m = (vm_page_t) queue_first(&vm_page_queue_cleaned); + while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m)) { next = (vm_page_t) m->pageq.next; discard = FALSE; @@ -5615,19 +6022,42 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, if (m->dirty) count_discard_purgeable++; else - count_discard_inactive++; + count_discard_cleaned++; discard = discard_all; } else - count_inactive++; + count_cleaned++; count_wire--; if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); if (discard) hibernate_discard_page(m); m = next; } - m = (vm_page_t) queue_first(&vm_page_queue_cleaned); - while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m)) + m = (vm_page_t) queue_first(&vm_page_queue_active); + while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + discard = FALSE; + if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) + && hibernate_consider_discard(m, preflight)) + { + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (m->dirty) + count_discard_purgeable++; + else + count_discard_active++; + discard = discard_all; + } + else + count_active++; + count_wire--; + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; + } + + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) { next = (vm_page_t) m->pageq.next; discard = FALSE; @@ -5638,11 +6068,11 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, if (m->dirty) count_discard_purgeable++; else - count_discard_cleaned++; + count_discard_inactive++; discard = discard_all; } else - count_cleaned++; + count_inactive++; count_wire--; if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); if (discard) hibernate_discard_page(m); @@ -5672,29 +6102,6 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, } } - m = (vm_page_t) queue_first(&vm_page_queue_active); - while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) - { - next = (vm_page_t) m->pageq.next; - discard = FALSE; - if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); - if (m->dirty) - count_discard_purgeable++; - else - count_discard_active++; - discard = discard_all; - } - else - count_active++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); - if (discard) hibernate_discard_page(m); - m = next; - } - queue_iterate(&compressor_object->memq, m, vm_page_t, listq) { count_compressor++; @@ -5748,11 +6155,14 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, discard_all ? "did" : "could", count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); + if (hibernate_stats.cd_skipped_xpmapped) + HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped); + *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned; if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active; -#if DEBUG +#if MACH_ASSERT || DEBUG if (!preflight) { if (vm_page_local_q) { @@ -5764,7 +6174,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, } vm_page_unlock_queues(); } -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ if (preflight) { lck_mtx_unlock(&vm_page_queue_free_lock); @@ -5789,7 +6199,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) uint32_t count_discard_speculative = 0; -#if DEBUG +#if MACH_ASSERT || DEBUG vm_page_lock_queues(); if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) { @@ -5798,7 +6208,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) VPL_LOCK(&lq->vpl_lock); } } -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ clock_get_uptime(&start); @@ -5877,7 +6287,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) m = next; } -#if DEBUG +#if MACH_ASSERT || DEBUG if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) { struct vpl *lq; @@ -5886,7 +6296,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) } } vm_page_unlock_queues(); -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ clock_get_uptime(&end); absolutetime_to_nanoseconds(end - start, &nsec); @@ -6007,7 +6417,7 @@ hibernate_hash_insert_page(vm_page_t mem) vm_page_bucket_t *bucket; int hash_id; - assert(mem->tabled); + assert(mem->hashed); assert(mem->object); assert(mem->offset != (vm_object_offset_t) -1); @@ -6017,8 +6427,8 @@ hibernate_hash_insert_page(vm_page_t mem) hash_id = vm_page_hash(mem->object, mem->offset); bucket = &vm_page_buckets[hash_id]; - mem->next = bucket->pages; - bucket->pages = mem; + mem->next_m = bucket->page_list; + bucket->page_list = VM_PAGE_PACK_PTR(mem); } @@ -6078,8 +6488,8 @@ hibernate_rebuild_vm_structs(void) * hibernate_teardown_vm_structs leaves the location where * this vm_page_t must be located in "next". */ - tmem = mem->next; - mem->next = NULL; + tmem = VM_PAGE_UNPACK_PTR(mem->next_m); + mem->next_m = VM_PAGE_PACK_PTR(NULL); sindx = (int)(tmem - &vm_pages[0]); @@ -6091,7 +6501,7 @@ hibernate_rebuild_vm_structs(void) *tmem = *mem; mem = tmem; } - if (mem->tabled) + if (mem->hashed) hibernate_hash_insert_page(mem); /* * the 'hole' between this vm_page_t and the previous @@ -6108,14 +6518,14 @@ hibernate_rebuild_vm_structs(void) assert(vm_page_free_count == hibernate_teardown_vm_page_free_count); /* - * process the list of vm_page_t's that were tabled in the hash, + * process the list of vm_page_t's that were entered in the hash, * but were not located in the vm_pages arrary... these are * vm_page_t's that were created on the fly (i.e. fictitious) */ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) { - mem_next = mem->next; + mem_next = VM_PAGE_UNPACK_PTR(mem->next_m); - mem->next = NULL; + mem->next_m = VM_PAGE_PACK_PTR(NULL); hibernate_hash_insert_page(mem); } hibernate_rebuild_hash_list = NULL; @@ -6160,14 +6570,13 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l bucket = &vm_page_buckets[i]; - for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) { - - assert(mem->tabled); + for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) { + assert(mem->hashed); - mem_next = mem->next; + mem_next = VM_PAGE_UNPACK_PTR(mem->next_m); if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) { - mem->next = hibernate_rebuild_hash_list; + mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list); hibernate_rebuild_hash_list = mem; } } @@ -6211,7 +6620,7 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l * as an indicator to the rebuild function that * we don't have to move it */ - mem->next = mem; + mem->next_m = VM_PAGE_PACK_PTR(mem); if (vm_pages[compact_target_indx].free) { /* @@ -6285,7 +6694,7 @@ vm_page_info( bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; lck_spin_lock(bucket_lock); - for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next) + for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m)) bucket_count++; lck_spin_unlock(bucket_lock); @@ -6297,3 +6706,654 @@ vm_page_info( return vm_page_bucket_count; } #endif /* MACH_VM_DEBUG */ + +#if VM_PAGE_BUCKETS_CHECK +void +vm_page_buckets_check(void) +{ + unsigned int i; + vm_page_t p; + unsigned int p_hash; + vm_page_bucket_t *bucket; + lck_spin_t *bucket_lock; + + if (!vm_page_buckets_check_ready) { + return; + } + +#if HIBERNATION + if (hibernate_rebuild_needed || + hibernate_rebuild_hash_list) { + panic("BUCKET_CHECK: hibernation in progress: " + "rebuild_needed=%d rebuild_hash_list=%p\n", + hibernate_rebuild_needed, + hibernate_rebuild_hash_list); + } +#endif /* HIBERNATION */ + +#if VM_PAGE_FAKE_BUCKETS + char *cp; + for (cp = (char *) vm_page_fake_buckets_start; + cp < (char *) vm_page_fake_buckets_end; + cp++) { + if (*cp != 0x5a) { + panic("BUCKET_CHECK: corruption at %p in fake buckets " + "[0x%llx:0x%llx]\n", + cp, + (uint64_t) vm_page_fake_buckets_start, + (uint64_t) vm_page_fake_buckets_end); + } + } +#endif /* VM_PAGE_FAKE_BUCKETS */ + + for (i = 0; i < vm_page_bucket_count; i++) { + bucket = &vm_page_buckets[i]; + if (!bucket->page_list) { + continue; + } + + bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; + lck_spin_lock(bucket_lock); + p = VM_PAGE_UNPACK_PTR(bucket->page_list); + while (p != VM_PAGE_NULL) { + if (!p->hashed) { + panic("BUCKET_CHECK: page %p (%p,0x%llx) " + "hash %d in bucket %d at %p " + "is not hashed\n", + p, p->object, p->offset, + p_hash, i, bucket); + } + p_hash = vm_page_hash(p->object, p->offset); + if (p_hash != i) { + panic("BUCKET_CHECK: corruption in bucket %d " + "at %p: page %p object %p offset 0x%llx " + "hash %d\n", + i, bucket, p, p->object, p->offset, + p_hash); + } + p = VM_PAGE_UNPACK_PTR(p->next_m); + } + lck_spin_unlock(bucket_lock); + } + +// printf("BUCKET_CHECK: checked buckets\n"); +} +#endif /* VM_PAGE_BUCKETS_CHECK */ + +/* + * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the + * local queues if they exist... its the only spot in the system where we add pages + * to those queues... once on those queues, those pages can only move to one of the + * global page queues or the free queues... they NEVER move from local q to local q. + * the 'local' state is stable when vm_page_queues_remove is called since we're behind + * the global vm_page_queue_lock at this point... we still need to take the local lock + * in case this operation is being run on a different CPU then the local queue's identity, + * but we don't have to worry about the page moving to a global queue or becoming wired + * while we're grabbing the local lock since those operations would require the global + * vm_page_queue_lock to be held, and we already own it. + * + * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id... + * 'wired' and local are ALWAYS mutually exclusive conditions. + */ +void +vm_page_queues_remove(vm_page_t mem) +{ + boolean_t was_pageable; + + VM_PAGE_QUEUES_ASSERT(mem, 1); + assert(!mem->pageout_queue); + /* + * if (mem->pageout_queue) + * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue... + * the caller is responsible for determing if the page is on that queue, and if so, must + * either first remove it (it needs both the page queues lock and the object lock to do + * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove + */ + if (mem->local) { + struct vpl *lq; + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->inactive && !mem->speculative); + assert(!mem->active && !mem->throttled); + assert(!mem->clean_queue); + assert(!mem->fictitious); + lq = &vm_page_local_q[mem->local_id].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + queue_remove(&lq->vpl_queue, + mem, vm_page_t, pageq); + mem->local = FALSE; + mem->local_id = 0; + lq->vpl_count--; + if (mem->object->internal) { + lq->vpl_internal_count--; + } else { + lq->vpl_external_count--; + } + VPL_UNLOCK(&lq->vpl_lock); + was_pageable = FALSE; + } + + else if (mem->active) { + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->inactive && !mem->speculative); + assert(!mem->clean_queue); + assert(!mem->throttled); + assert(!mem->fictitious); + queue_remove(&vm_page_queue_active, + mem, vm_page_t, pageq); + mem->active = FALSE; + vm_page_active_count--; + was_pageable = TRUE; + } + + else if (mem->inactive) { + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->active && !mem->speculative); + assert(!mem->throttled); + assert(!mem->fictitious); + vm_page_inactive_count--; + if (mem->clean_queue) { + queue_remove(&vm_page_queue_cleaned, + mem, vm_page_t, pageq); + mem->clean_queue = FALSE; + vm_page_cleaned_count--; + } else { + if (mem->object->internal) { + queue_remove(&vm_page_queue_anonymous, + mem, vm_page_t, pageq); + vm_page_anonymous_count--; + } else { + queue_remove(&vm_page_queue_inactive, + mem, vm_page_t, pageq); + } + vm_purgeable_q_advance_all(); + } + mem->inactive = FALSE; + was_pageable = TRUE; + } + + else if (mem->throttled) { + assert(mem->object != compressor_object); + assert(!mem->active && !mem->inactive); + assert(!mem->speculative); + assert(!mem->fictitious); + queue_remove(&vm_page_queue_throttled, + mem, vm_page_t, pageq); + mem->throttled = FALSE; + vm_page_throttled_count--; + was_pageable = FALSE; + } + + else if (mem->speculative) { + assert(mem->object != compressor_object); + assert(!mem->active && !mem->inactive); + assert(!mem->throttled); + assert(!mem->fictitious); + remque(&mem->pageq); + mem->speculative = FALSE; + vm_page_speculative_count--; + was_pageable = TRUE; + } + + else if (mem->pageq.next || mem->pageq.prev) { + was_pageable = FALSE; + panic("vm_page_queues_remove: unmarked page on Q"); + } else { + was_pageable = FALSE; + } + + mem->pageq.next = NULL; + mem->pageq.prev = NULL; + VM_PAGE_QUEUES_ASSERT(mem, 0); + if (was_pageable) { + if (mem->object->internal) { + vm_page_pageable_internal_count--; + } else { + vm_page_pageable_external_count--; + } + } +} + +void +vm_page_remove_internal(vm_page_t page) +{ + vm_object_t __object = page->object; + if (page == __object->memq_hint) { + vm_page_t __new_hint; + queue_entry_t __qe; + __qe = queue_next(&page->listq); + if (queue_end(&__object->memq, __qe)) { + __qe = queue_prev(&page->listq); + if (queue_end(&__object->memq, __qe)) { + __qe = NULL; + } + } + __new_hint = (vm_page_t) __qe; + __object->memq_hint = __new_hint; + } + queue_remove(&__object->memq, page, vm_page_t, listq); +} + +void +vm_page_enqueue_inactive(vm_page_t mem, boolean_t first) +{ + VM_PAGE_QUEUES_ASSERT(mem, 0); + assert(!mem->fictitious); + assert(!mem->laundry); + assert(!mem->pageout_queue); + vm_page_check_pageable_safe(mem); + if (mem->object->internal) { + if (first == TRUE) + queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); + else + queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); + vm_page_anonymous_count++; + vm_page_pageable_internal_count++; + } else { + if (first == TRUE) + queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); + else + queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); + vm_page_pageable_external_count++; + } + mem->inactive = TRUE; + vm_page_inactive_count++; + token_new_pagecount++; +} + +/* + * Pages from special kernel objects shouldn't + * be placed on pageable queues. + */ +void +vm_page_check_pageable_safe(vm_page_t page) +{ + if (page->object == kernel_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from kernel object (%p) to pageable queue", kernel_object); + } + + if (page->object == compressor_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from compressor object (%p) to pageable queue", compressor_object); + } + + if (page->object == vm_submap_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from submap object (%p) to pageable queue", vm_submap_object); + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * wired page diagnose + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include + +vm_allocation_site_t * +vm_allocation_sites[VM_KERN_MEMORY_COUNT]; + +vm_tag_t +vm_tag_bt(void) +{ + uintptr_t* frameptr; + uintptr_t* frameptr_next; + uintptr_t retaddr; + uintptr_t kstackb, kstackt; + const vm_allocation_site_t * site; + thread_t cthread; + + cthread = current_thread(); + if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK; + + kstackb = cthread->kernel_stack; + kstackt = kstackb + kernel_stack_size; + + /* Load stack frame pointer (EBP on x86) into frameptr */ + frameptr = __builtin_frame_address(0); + site = NULL; + while (frameptr != NULL) + { + /* Verify thread stack bounds */ + if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break; + + /* Next frame pointer is pointed to by the previous one */ + frameptr_next = (uintptr_t*) *frameptr; + + /* Pull return address from one spot above the frame pointer */ + retaddr = *(frameptr + 1); + + if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) + { + site = OSKextGetAllocationSiteForCaller(retaddr); + break; + } + + frameptr = frameptr_next; + } + return (site ? site->tag : VM_KERN_MEMORY_NONE); +} + +static uint64_t free_tag_bits[256/64]; + +void +vm_tag_alloc_locked(vm_allocation_site_t * site) +{ + vm_tag_t tag; + uint64_t avail; + uint64_t idx; + + if (site->tag) return; + + idx = 0; + while (TRUE) + { + avail = free_tag_bits[idx]; + if (avail) + { + tag = __builtin_clzll(avail); + avail &= ~(1ULL << (63 - tag)); + free_tag_bits[idx] = avail; + tag += (idx << 6); + break; + } + idx++; + if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0]))) + { + tag = VM_KERN_MEMORY_ANY; + break; + } + } + site->tag = tag; + if (VM_KERN_MEMORY_ANY != tag) + { + assert(!vm_allocation_sites[tag]); + vm_allocation_sites[tag] = site; + } +} + +static void +vm_tag_free_locked(vm_tag_t tag) +{ + uint64_t avail; + uint32_t idx; + uint64_t bit; + + if (VM_KERN_MEMORY_ANY == tag) return; + + idx = (tag >> 6); + avail = free_tag_bits[idx]; + tag &= 63; + bit = (1ULL << (63 - tag)); + assert(!(avail & bit)); + free_tag_bits[idx] = (avail | bit); +} + +static void +vm_tag_init(void) +{ + vm_tag_t tag; + for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) + { + vm_tag_free_locked(tag); + } +} + +vm_tag_t +vm_tag_alloc(vm_allocation_site_t * site) +{ + vm_tag_t tag; + + if (VM_TAG_BT & site->flags) + { + tag = vm_tag_bt(); + if (VM_KERN_MEMORY_NONE != tag) return (tag); + } + + if (!site->tag) + { + lck_spin_lock(&vm_allocation_sites_lock); + vm_tag_alloc_locked(site); + lck_spin_unlock(&vm_allocation_sites_lock); + } + + return (site->tag); +} + +static void +vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object) +{ + if (!object->wired_page_count) return; + if (object != kernel_object) + { + assert(object->wire_tag < num_sites); + sites[object->wire_tag].size += ptoa_64(object->wired_page_count); + } +} + +typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites, + unsigned int num_sites, vm_object_t object); + +static void +vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites, + vm_page_iterate_proc proc, purgeable_q_t queue, + int group) +{ + vm_object_t object; + + for (object = (vm_object_t) queue_first(&queue->objq[group]); + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) + { + proc(sites, num_sites, object); + } +} + +static void +vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites, + vm_page_iterate_proc proc) +{ + purgeable_q_t volatile_q; + queue_head_t * nonvolatile_q; + vm_object_t object; + int group; + + lck_spin_lock(&vm_objects_wired_lock); + queue_iterate(&vm_objects_wired, + object, + vm_object_t, + objq) + { + proc(sites, num_sites, object); + } + lck_spin_unlock(&vm_objects_wired_lock); + + lck_mtx_lock(&vm_purgeable_queue_lock); + nonvolatile_q = &purgeable_nonvolatile_queue; + for (object = (vm_object_t) queue_first(nonvolatile_q); + !queue_end(nonvolatile_q, (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) + { + proc(sites, num_sites, object); + } + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0); + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + { + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group); + } + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + { + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group); + } + lck_mtx_unlock(&vm_purgeable_queue_lock); +} + +static uint64_t +process_account(mach_memory_info_t * sites, unsigned int __unused num_sites) +{ + uint64_t found; + unsigned int idx; + vm_allocation_site_t * site; + + assert(num_sites >= VM_KERN_MEMORY_COUNT); + found = 0; + for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++) + { + found += sites[idx].size; + if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) + { + sites[idx].site = idx; + sites[idx].flags |= VM_KERN_SITE_TAG; + if (VM_KERN_MEMORY_ZONE == idx) sites[idx].flags |= VM_KERN_SITE_HIDE; + else sites[idx].flags |= VM_KERN_SITE_WIRED; + continue; + } + lck_spin_lock(&vm_allocation_sites_lock); + if ((site = vm_allocation_sites[idx])) + { + if (sites[idx].size) + { + sites[idx].flags |= VM_KERN_SITE_WIRED; + if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags)) + { + sites[idx].site = OSKextGetKmodIDForSite(site); + sites[idx].flags |= VM_KERN_SITE_KMOD; + } + else + { + sites[idx].site = VM_KERNEL_UNSLIDE(site); + sites[idx].flags |= VM_KERN_SITE_KERNEL; + } + site = NULL; + } + else + { + vm_tag_free_locked(site->tag); + site->tag = VM_KERN_MEMORY_NONE; + vm_allocation_sites[idx] = NULL; + if (!(VM_TAG_UNLOAD & site->flags)) site = NULL; + } + } + lck_spin_unlock(&vm_allocation_sites_lock); + if (site) OSKextFreeSite(site); + } + return (found); +} + +kern_return_t +vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites) +{ + enum { kMaxKernelDepth = 1 }; + vm_map_t maps [kMaxKernelDepth]; + vm_map_entry_t entries[kMaxKernelDepth]; + vm_map_t map; + vm_map_entry_t entry; + vm_object_offset_t offset; + vm_page_t page; + int stackIdx, count; + uint64_t wired_size; + uint64_t wired_managed_size; + uint64_t wired_reserved_size; + mach_memory_info_t * counts; + + bzero(sites, num_sites * sizeof(mach_memory_info_t)); + + vm_page_iterate_objects(sites, num_sites, &vm_page_count_object); + + wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count); + wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count); + wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial); + + assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT)); + counts = &sites[VM_KERN_MEMORY_COUNT]; + +#define SET_COUNT(xcount, xsize, xflags) \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags; + + SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0); + SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0); + SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0); + SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); + +#define SET_MAP(xcount, xsize, xfree, xlargest) \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].free = (xfree); \ + counts[xcount].largest = (xlargest); \ + counts[xcount].flags = VM_KERN_SITE_COUNTER; + + vm_map_size_t map_size, map_free, map_largest; + + vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest); + + vm_map_sizes(zone_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest); + + vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest); + + map = kernel_map; + stackIdx = 0; + while (map) + { + vm_map_lock(map); + for (entry = map->hdr.links.next; map; entry = entry->links.next) + { + if (entry->is_sub_map) + { + assert(stackIdx < kMaxKernelDepth); + maps[stackIdx] = map; + entries[stackIdx] = entry; + stackIdx++; + map = VME_SUBMAP(entry); + entry = NULL; + break; + } + if (VME_OBJECT(entry) == kernel_object) + { + count = 0; + vm_object_lock(VME_OBJECT(entry)); + for (offset = entry->links.start; offset < entry->links.end; offset += page_size) + { + page = vm_page_lookup(VME_OBJECT(entry), offset); + if (page && VM_PAGE_WIRED(page)) count++; + } + vm_object_unlock(VME_OBJECT(entry)); + + if (count) + { + assert(VME_ALIAS(entry) < num_sites); + sites[VME_ALIAS(entry)].size += ptoa_64(count); + } + } + if (entry == vm_map_last_entry(map)) + { + vm_map_unlock(map); + if (!stackIdx) map = NULL; + else + { + --stackIdx; + map = maps[stackIdx]; + entry = entries[stackIdx]; + } + } + } + } + + process_account(sites, num_sites); + + return (KERN_SUCCESS); +}