X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d1ecb069dfe24481e4a83f44cb5217a2b06746d7..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/vm/vm_resident.c diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 9552295d8..25c1edb26 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -64,6 +64,7 @@ #include #include +#include #include #include @@ -76,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -84,39 +86,49 @@ #include /* kernel_memory_allocate() */ #include #include +#include #include -#include /* (BRINGUP) */ -#include /* (BRINGUP) */ +#include #include #include #include +#include -#include - - -#if CONFIG_EMBEDDED -#include +#if CONFIG_PHANTOM_CACHE +#include #endif +#include + #include +boolean_t hibernate_cleaning_in_progress = FALSE; boolean_t vm_page_free_verify = TRUE; -int speculative_age_index = 0; -int speculative_steal_index = 0; +uint32_t vm_lopage_free_count = 0; +uint32_t vm_lopage_free_limit = 0; +uint32_t vm_lopage_lowater = 0; +boolean_t vm_lopage_refill = FALSE; +boolean_t vm_lopage_needed = FALSE; + lck_mtx_ext_t vm_page_queue_lock_ext; lck_mtx_ext_t vm_page_queue_free_lock_ext; lck_mtx_ext_t vm_purgeable_queue_lock_ext; +int speculative_age_index = 0; +int speculative_steal_index = 0; struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1]; __private_extern__ void vm_page_init_lck_grp(void); -static void vm_page_free_prepare(vm_page_t page); +static void vm_page_free_prepare(vm_page_t page); +static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); +static void vm_tag_init(void); +uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; /* * Associated with page of user-allocatable memory is a @@ -131,7 +143,7 @@ static void vm_page_free_prepare(vm_page_t page); vm_offset_t virtual_space_start; vm_offset_t virtual_space_end; -int vm_page_pages; +uint32_t vm_page_pages; /* * The vm_page_lookup() routine, which provides for fast @@ -142,7 +154,7 @@ int vm_page_pages; * or VP, table.] */ typedef struct { - vm_page_t pages; + vm_page_packed_t page_list; #if MACH_PAGE_HASH_STATS int cur_count; /* current count */ int hi_count; /* high water mark */ @@ -160,6 +172,18 @@ uint32_t vm_page_bucket_hash; /* Basic bucket hash */ unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */ lck_spin_t *vm_page_bucket_locks; +lck_spin_t vm_objects_wired_lock; +lck_spin_t vm_allocation_sites_lock; + +#if VM_PAGE_BUCKETS_CHECK +boolean_t vm_page_buckets_check_ready = FALSE; +#if VM_PAGE_FAKE_BUCKETS +vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */ +vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; +#endif /* VM_PAGE_FAKE_BUCKETS */ +#endif /* VM_PAGE_BUCKETS_CHECK */ + +extern int not_in_kdp; #if MACH_PAGE_HASH_STATS @@ -224,6 +248,7 @@ struct vm_page vm_page_template; vm_page_t vm_pages = VM_PAGE_NULL; unsigned int vm_pages_count = 0; +ppnum_t vm_page_lowest = 0; /* * Resident pages that represent real memory @@ -233,15 +258,13 @@ unsigned int vm_pages_count = 0; unsigned int vm_colors; unsigned int vm_color_mask; /* mask is == (vm_colors-1) */ unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */ +unsigned int vm_free_magazine_refill_limit = 0; queue_head_t vm_page_queue_free[MAX_COLORS]; -vm_page_t vm_page_queue_fictitious; unsigned int vm_page_free_wanted; unsigned int vm_page_free_wanted_privileged; unsigned int vm_page_free_count; unsigned int vm_page_fictitious_count; -unsigned int vm_page_free_count_minimum; /* debugging */ - /* * Occasionally, the virtual memory system uses * resident page structures that do not refer to @@ -254,6 +277,8 @@ unsigned int vm_page_free_count_minimum; /* debugging */ zone_t vm_page_zone; vm_locks_array_t vm_page_locks; decl_lck_mtx_data(,vm_page_alloc_lock) +lck_mtx_ext_t vm_page_alloc_lock_ext; + unsigned int io_throttle_zero_fill; unsigned int vm_page_local_q_count = 0; @@ -261,6 +286,9 @@ unsigned int vm_page_local_q_soft_limit = 250; unsigned int vm_page_local_q_hard_limit = 500; struct vplq *vm_page_local_q = NULL; +/* N.B. Guard and fictitious pages must not + * be assigned a zero phys_page value. + */ /* * Fictitious pages don't have a physical address, * but we must initialize phys_page to something. @@ -285,38 +313,55 @@ ppnum_t vm_page_guard_addr = (ppnum_t) -2; * system (pageout daemon). These queues are * defined here, but are shared by the pageout * module. The inactive queue is broken into - * inactive and zf for convenience as the + * file backed and anonymous for convenience as the * pageout daemon often assignes a higher - * affinity to zf pages + * importance to anonymous pages (less likely to pick) */ queue_head_t vm_page_queue_active; queue_head_t vm_page_queue_inactive; -queue_head_t vm_page_queue_zf; /* inactive memory queue for zero fill */ +queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ queue_head_t vm_page_queue_throttled; +queue_head_t vm_objects_wired; + unsigned int vm_page_active_count; unsigned int vm_page_inactive_count; +unsigned int vm_page_anonymous_count; unsigned int vm_page_throttled_count; unsigned int vm_page_speculative_count; + unsigned int vm_page_wire_count; +unsigned int vm_page_stolen_count; +unsigned int vm_page_wire_count_initial; +unsigned int vm_page_pages_initial; unsigned int vm_page_gobble_count = 0; -unsigned int vm_page_wire_count_warning = 0; -unsigned int vm_page_gobble_count_warning = 0; + +#define VM_PAGE_WIRE_COUNT_WARNING 0 +#define VM_PAGE_GOBBLE_COUNT_WARNING 0 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */ uint64_t vm_page_purged_count = 0; /* total count of purged pages */ +unsigned int vm_page_xpmapped_external_count = 0; +unsigned int vm_page_external_count = 0; +unsigned int vm_page_internal_count = 0; +unsigned int vm_page_pageable_external_count = 0; +unsigned int vm_page_pageable_internal_count = 0; + #if DEVELOPMENT || DEBUG unsigned int vm_page_speculative_recreated = 0; unsigned int vm_page_speculative_created = 0; unsigned int vm_page_speculative_used = 0; #endif -ppnum_t vm_lopage_poolstart = 0; -ppnum_t vm_lopage_poolend = 0; -int vm_lopage_poolsize = 0; +queue_head_t vm_page_queue_cleaned; + +unsigned int vm_page_cleaned_count = 0; +unsigned int vm_pageout_enqueued_cleaned = 0; + uint64_t max_valid_dma_address = 0xffffffffffffffffULL; +ppnum_t max_valid_low_ppnum = 0xffffffff; /* @@ -328,12 +373,13 @@ uint64_t max_valid_dma_address = 0xffffffffffffffffULL; unsigned int vm_page_free_target = 0; unsigned int vm_page_free_min = 0; unsigned int vm_page_throttle_limit = 0; -uint32_t vm_page_creation_throttle = 0; unsigned int vm_page_inactive_target = 0; +unsigned int vm_page_anonymous_min = 0; unsigned int vm_page_inactive_min = 0; unsigned int vm_page_free_reserved = 0; unsigned int vm_page_throttle_count = 0; + /* * The VM system has a couple of heuristics for deciding * that pages are "uninteresting" and should be placed @@ -358,7 +404,9 @@ struct vm_page_stats_reusable vm_page_stats_reusable; void vm_set_page_size(void) { - page_mask = page_size - 1; + page_size = PAGE_SIZE; + page_mask = PAGE_MASK; + page_shift = PAGE_SHIFT; if ((page_mask & page_size) != 0) panic("vm_set_page_size: page size not a power of two"); @@ -368,6 +416,8 @@ vm_set_page_size(void) break; } +#define COLOR_GROUPS_TO_STEAL 4 + /* Called once during statup, once the cache geometry is known. */ @@ -393,6 +443,8 @@ vm_page_set_colors( void ) vm_colors = n; vm_color_mask = n - 1; + + vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL; } @@ -420,6 +472,9 @@ vm_page_init_lck_grp(void) lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr); lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr); lck_attr_setdefault(&vm_page_lck_attr); + lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr); + + vm_compressor_init_locks(); } void @@ -444,6 +499,8 @@ vm_page_init_local_q() VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr); queue_init(&lq->vpl_queue); lq->vpl_count = 0; + lq->vpl_internal_count = 0; + lq->vpl_external_count = 0; } vm_page_local_q_count = num_cpus; @@ -485,7 +542,7 @@ vm_page_bootstrap( m->pageq.prev = NULL; m->listq.next = NULL; m->listq.prev = NULL; - m->next = VM_PAGE_NULL; + m->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL); m->object = VM_OBJECT_NULL; /* reset later */ m->offset = (vm_object_offset_t) -1; /* reset later */ @@ -509,6 +566,7 @@ vm_page_bootstrap( m->busy = TRUE; m->wanted = FALSE; m->tabled = FALSE; + m->hashed = FALSE; m->fictitious = FALSE; m->pmapped = FALSE; m->wpmapped = FALSE; @@ -524,16 +582,17 @@ vm_page_bootstrap( m->unusual = FALSE; m->encrypted = FALSE; m->encrypted_cleaning = FALSE; - m->list_req_pending = FALSE; - m->dump_cleaning = FALSE; m->cs_validated = FALSE; m->cs_tainted = FALSE; + m->cs_nx = FALSE; m->no_cache = FALSE; - m->zero_fill = FALSE; m->reusable = FALSE; + m->slid = FALSE; + m->xpmapped = FALSE; + m->compressor = FALSE; + m->written_by_kernel = FALSE; m->__unused_object_bits = 0; - /* * Initialize the page queues. */ @@ -558,15 +617,19 @@ vm_page_bootstrap( purgeable_queues[i].debug_count_objects = 0; #endif }; + purgeable_nonvolatile_count = 0; + queue_init(&purgeable_nonvolatile_queue); for (i = 0; i < MAX_COLORS; i++ ) queue_init(&vm_page_queue_free[i]); + queue_init(&vm_lopage_queue_free); - vm_page_queue_fictitious = VM_PAGE_NULL; queue_init(&vm_page_queue_active); queue_init(&vm_page_queue_inactive); + queue_init(&vm_page_queue_cleaned); queue_init(&vm_page_queue_throttled); - queue_init(&vm_page_queue_zf); + queue_init(&vm_page_queue_anonymous); + queue_init(&vm_objects_wired); for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) { queue_init(&vm_page_queue_speculative[i].age_q); @@ -583,9 +646,10 @@ vm_page_bootstrap( /* * Steal memory for the map and zone subsystems. */ - - vm_map_steal_memory(); + kernel_debug_string_simple("zone_steal_memory"); zone_steal_memory(); + kernel_debug_string_simple("vm_map_steal_memory"); + vm_map_steal_memory(); /* * Allocate (and initialize) the virtual-to-physical @@ -630,10 +694,36 @@ vm_page_bootstrap( if (vm_page_hash_mask & vm_page_bucket_count) printf("vm_page_bootstrap: WARNING -- strange page hash\n"); +#if VM_PAGE_BUCKETS_CHECK +#if VM_PAGE_FAKE_BUCKETS + /* + * Allocate a decoy set of page buckets, to detect + * any stomping there. + */ + vm_page_fake_buckets = (vm_page_bucket_t *) + pmap_steal_memory(vm_page_bucket_count * + sizeof(vm_page_bucket_t)); + vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets; + vm_page_fake_buckets_end = + vm_map_round_page((vm_page_fake_buckets_start + + (vm_page_bucket_count * + sizeof (vm_page_bucket_t))), + PAGE_MASK); + char *cp; + for (cp = (char *)vm_page_fake_buckets_start; + cp < (char *)vm_page_fake_buckets_end; + cp++) { + *cp = 0x5a; + } +#endif /* VM_PAGE_FAKE_BUCKETS */ +#endif /* VM_PAGE_BUCKETS_CHECK */ + + kernel_debug_string_simple("vm_page_buckets"); vm_page_buckets = (vm_page_bucket_t *) pmap_steal_memory(vm_page_bucket_count * sizeof(vm_page_bucket_t)); + kernel_debug_string_simple("vm_page_bucket_locks"); vm_page_bucket_locks = (lck_spin_t *) pmap_steal_memory(vm_page_bucket_lock_count * sizeof(lck_spin_t)); @@ -641,7 +731,7 @@ vm_page_bootstrap( for (i = 0; i < vm_page_bucket_count; i++) { register vm_page_bucket_t *bucket = &vm_page_buckets[i]; - bucket->pages = VM_PAGE_NULL; + bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL); #if MACH_PAGE_HASH_STATS bucket->cur_count = 0; bucket->hi_count = 0; @@ -651,6 +741,14 @@ vm_page_bootstrap( for (i = 0; i < vm_page_bucket_lock_count; i++) lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr); + lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); + lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); + vm_tag_init(); + +#if VM_PAGE_BUCKETS_CHECK + vm_page_buckets_check_ready = TRUE; +#endif /* VM_PAGE_BUCKETS_CHECK */ + /* * Machine-dependent code allocates the resident page table. * It uses vm_page_init to initialize the page frames. @@ -659,6 +757,7 @@ vm_page_bootstrap( * to get the alignment right. */ + kernel_debug_string_simple("pmap_startup"); pmap_startup(&virtual_space_start, &virtual_space_end); virtual_space_start = round_page(virtual_space_start); virtual_space_end = trunc_page(virtual_space_end); @@ -674,12 +773,14 @@ vm_page_bootstrap( * all VM managed pages are "free", courtesy of pmap_startup. */ assert((unsigned int) atop_64(max_mem) == atop_64(max_mem)); - vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count; /* initial value */ - vm_page_free_count_minimum = vm_page_free_count; + vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */ + vm_page_wire_count_initial = vm_page_wire_count; + vm_page_pages_initial = vm_page_pages; printf("vm_page_bootstrap: %d free pages and %d wired pages\n", vm_page_free_count, vm_page_wire_count); + kernel_debug_string_simple("vm_page_bootstrap complete"); simple_lock_init(&vm_paging_lock, 0); } @@ -726,7 +827,7 @@ pmap_steal_memory( addr = virtual_space_start; virtual_space_start += size; - kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */ + //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */ /* * Allocate and map physical pages to back new virtual pages. @@ -735,12 +836,8 @@ pmap_steal_memory( for (vaddr = round_page(addr); vaddr < addr + size; vaddr += PAGE_SIZE) { -#if defined(__LP64__) - if (!pmap_next_page_k64(&phys_page)) -#else - if (!pmap_next_page(&phys_page)) -#endif + if (!pmap_next_page_hi(&phys_page)) panic("pmap_steal_memory"); /* @@ -752,18 +849,19 @@ pmap_steal_memory( #endif pmap_enter(kernel_pmap, vaddr, phys_page, - VM_PROT_READ|VM_PROT_WRITE, + VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); /* * Account for newly stolen memory */ vm_page_wire_count++; - + vm_page_stolen_count++; } return (void *) addr; } +void vm_page_release_startup(vm_page_t mem); void pmap_startup( vm_offset_t *startp, @@ -772,8 +870,22 @@ pmap_startup( unsigned int i, npages, pages_initialized, fill, fillval; ppnum_t phys_page; addr64_t tmpaddr; - unsigned int num_of_lopages = 0; - unsigned int last_index; + + +#if defined(__LP64__) + /* + * struct vm_page must be of size 64 due to VM_PAGE_PACK_PTR use + */ + assert(sizeof(struct vm_page) == 64); + + /* + * make sure we are aligned on a 64 byte boundary + * for VM_PAGE_PACK_PTR (it clips off the low-order + * 6 bits of the pointer) + */ + if (virtual_space_start != virtual_space_end) + virtual_space_start = round_page(virtual_space_start); +#endif /* * We calculate how many page frames we will have @@ -789,64 +901,51 @@ pmap_startup( /* * Initialize the page frames. */ + kernel_debug_string_simple("Initialize the page frames"); for (i = 0, pages_initialized = 0; i < npages; i++) { if (!pmap_next_page(&phys_page)) break; + if (pages_initialized == 0 || phys_page < vm_page_lowest) + vm_page_lowest = phys_page; - vm_page_init(&vm_pages[i], phys_page); + vm_page_init(&vm_pages[i], phys_page, FALSE); vm_page_pages++; pages_initialized++; } vm_pages_count = pages_initialized; +#if defined(__LP64__) + + if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0])) != &vm_pages[0]) + panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]); + + if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1])) != &vm_pages[vm_pages_count-1]) + panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]); +#endif + kernel_debug_string_simple("page fill/release"); /* * Check if we want to initialize pages to a known value */ fill = 0; /* Assume no fill */ if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1; /* Set fill */ - - - /* - * if vm_lopage_poolsize is non-zero, than we need to reserve - * a pool of pages whose addresess are less than 4G... this pool - * is used by drivers whose hardware can't DMA beyond 32 bits... - * - * note that I'm assuming that the page list is ascending and - * ordered w/r to the physical address +#if DEBUG + /* This slows down booting the DEBUG kernel, particularly on + * large memory systems, but is worthwhile in deterministically + * trapping uninitialized memory usage. */ - for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) { - vm_page_t m; - - m = &vm_pages[i]; - - if (m->phys_page >= (1 << (32 - PAGE_SHIFT))) - panic("couldn't reserve the lopage pool: not enough lo pages\n"); - - if (m->phys_page < vm_lopage_poolend) - panic("couldn't reserve the lopage pool: page list out of order\n"); - - vm_lopage_poolend = m->phys_page; - - if (vm_lopage_poolstart == 0) - vm_lopage_poolstart = m->phys_page; - else { - if (m->phys_page < vm_lopage_poolstart) - panic("couldn't reserve the lopage pool: page list out of order\n"); - } - - if (fill) - fillPage(m->phys_page, fillval); /* Fill the page with a know value if requested at boot */ - - vm_page_release(m); - } - last_index = i; - + if (fill == 0) { + fill = 1; + fillval = 0xDEB8F177; + } +#endif + if (fill) + kprintf("Filling vm_pages with pattern: 0x%x\n", fillval); // -debug code remove if (2 == vm_himemory_mode) { // free low -> high so high is preferred - for (i = last_index + 1; i <= pages_initialized; i++) { + for (i = 1; i <= pages_initialized; i++) { if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ - vm_page_release(&vm_pages[i - 1]); + vm_page_release_startup(&vm_pages[i - 1]); } } else @@ -858,11 +957,13 @@ pmap_startup( * the devices (which must address physical memory) happy if * they require several consecutive pages. */ - for (i = pages_initialized; i > last_index; i--) { + for (i = pages_initialized; i > 0; i--) { if(fill) fillPage(vm_pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ - vm_page_release(&vm_pages[i - 1]); + vm_page_release_startup(&vm_pages[i - 1]); } + VM_CHECK_MEMORYSTATUS; + #if 0 { vm_page_t xx, xxo, xxl; @@ -926,6 +1027,7 @@ pmap_startup( void vm_page_module_init(void) { + uint64_t vm_page_zone_pages, vm_page_zone_data_size; vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), 0, PAGE_SIZE, "vm pages"); @@ -933,18 +1035,23 @@ vm_page_module_init(void) zone_debug_disable(vm_page_zone); #endif /* ZONE_DEBUG */ + zone_change(vm_page_zone, Z_CALLERACCT, FALSE); zone_change(vm_page_zone, Z_EXPAND, FALSE); zone_change(vm_page_zone, Z_EXHAUST, TRUE); zone_change(vm_page_zone, Z_FOREIGN, TRUE); - - /* - * Adjust zone statistics to account for the real pages allocated - * in vm_page_create(). [Q: is this really what we want?] - */ - vm_page_zone->count += vm_page_pages; - vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size; - - lck_mtx_init(&vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr); + zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE); + /* + * Adjust zone statistics to account for the real pages allocated + * in vm_page_create(). [Q: is this really what we want?] + */ + vm_page_zone->count += vm_page_pages; + vm_page_zone->sum_count += vm_page_pages; + vm_page_zone_data_size = vm_page_pages * vm_page_zone->elem_size; + vm_page_zone->cur_size += vm_page_zone_data_size; + vm_page_zone_pages = ((round_page(vm_page_zone_data_size)) / PAGE_SIZE); + OSAddAtomic64(vm_page_zone_pages, &(vm_page_zone->page_count)); + /* since zone accounts for these, take them out of stolen */ + VM_PAGE_MOVE_STOLEN(vm_page_zone_pages); } /* @@ -967,11 +1074,13 @@ vm_page_create( for (phys_page = start; phys_page < end; phys_page++) { - while ((m = (vm_page_t) vm_page_grab_fictitious()) + while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page)) == VM_PAGE_NULL) vm_page_more_fictitious(); - vm_page_init(m, phys_page); + m->fictitious = FALSE; + pmap_clear_noencrypt(phys_page); + vm_page_pages++; vm_page_release(m); } @@ -1003,7 +1112,17 @@ vm_page_insert( vm_object_t object, vm_object_offset_t offset) { - vm_page_insert_internal(mem, object, offset, FALSE, TRUE); + vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL); +} + +void +vm_page_insert_wired( + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset, + vm_tag_t tag) +{ + vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL); } void @@ -1011,23 +1130,35 @@ vm_page_insert_internal( vm_page_t mem, vm_object_t object, vm_object_offset_t offset, + vm_tag_t tag, boolean_t queues_lock_held, - boolean_t insert_in_hash) + boolean_t insert_in_hash, + boolean_t batch_pmap_op, + boolean_t batch_accounting, + uint64_t *delayed_ledger_update) { - vm_page_bucket_t *bucket; - lck_spin_t *bucket_lock; - int hash_id; + vm_page_bucket_t *bucket; + lck_spin_t *bucket_lock; + int hash_id; + task_t owner; XPR(XPR_VM_PAGE, "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", object, offset, mem, 0,0); - +#if 0 + /* + * we may not hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(mem); +#endif - if (object == vm_submap_object) { - /* the vm_submap_object is only a placeholder for submaps */ - panic("vm_page_insert(vm_submap_object,0x%llx)\n", offset); - } + assert(page_aligned(offset)); + + assert(!VM_PAGE_WIRED(mem) || mem->private || mem->fictitious || (tag != VM_KERN_MEMORY_NONE)); + + /* the vm_submap_object is only a placeholder for submaps */ + assert(object != vm_submap_object); vm_object_lock_assert_exclusive(object); #if DEBUG @@ -1035,19 +1166,26 @@ vm_page_insert_internal( queues_lock_held ? LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED); #endif /* DEBUG */ - + if (insert_in_hash == TRUE) { -#if DEBUG +#if DEBUG || VM_PAGE_CHECK_BUCKETS if (mem->tabled || mem->object != VM_OBJECT_NULL) panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", mem, object, offset, mem->object, mem->offset); #endif - assert(!object->internal || offset < object->size); + assert(!object->internal || offset < object->vo_size); /* only insert "pageout" pages into "pageout" objects, * and normal pages into normal objects */ +#if 00 + /* + * For some reason, this assertion gets tripped + * but it's mostly harmless, so let's disable it + * for now. + */ assert(object->pageout == mem->pageout); +#endif /* 00 */ assert(vm_page_lookup(object, offset) == VM_PAGE_NULL); @@ -1067,20 +1205,32 @@ vm_page_insert_internal( lck_spin_lock(bucket_lock); - mem->next = bucket->pages; - bucket->pages = mem; + mem->next_m = bucket->page_list; + bucket->page_list = VM_PAGE_PACK_PTR(mem); + assert(mem == VM_PAGE_UNPACK_PTR(bucket->page_list)); + #if MACH_PAGE_HASH_STATS if (++bucket->cur_count > bucket->hi_count) bucket->hi_count = bucket->cur_count; #endif /* MACH_PAGE_HASH_STATS */ - + mem->hashed = TRUE; lck_spin_unlock(bucket_lock); } + + { + unsigned int cache_attr; + + cache_attr = object->wimg_bits & VM_WIMG_MASK; + + if (cache_attr != VM_WIMG_USE_DEFAULT) { + PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op); + } + } /* * Now link into the object's list of backed pages. */ - - VM_PAGE_INSERT(mem, object); + queue_enter(&object->memq, mem, vm_page_t, listq); + object->memq_hint = mem; mem->tabled = TRUE; /* @@ -1089,17 +1239,81 @@ vm_page_insert_internal( object->resident_page_count++; if (VM_PAGE_WIRED(mem)) { - object->wired_page_count++; + if (!mem->private && !mem->fictitious) + { + if (!object->wired_page_count) + { + assert(VM_KERN_MEMORY_NONE != tag); + object->wire_tag = tag; + VM_OBJECT_WIRED(object); + } + } + object->wired_page_count++; } assert(object->resident_page_count >= object->wired_page_count); + if (batch_accounting == FALSE) { + if (object->internal) { + OSAddAtomic(1, &vm_page_internal_count); + } else { + OSAddAtomic(1, &vm_page_external_count); + } + } + + /* + * It wouldn't make sense to insert a "reusable" page in + * an object (the page would have been marked "reusable" only + * at the time of a madvise(MADV_FREE_REUSABLE) if it was already + * in the object at that time). + * But a page could be inserted in a "all_reusable" object, if + * something faults it in (a vm_read() from another task or a + * "use-after-free" issue in user space, for example). It can + * also happen if we're relocating a page from that object to + * a different physical page during a physically-contiguous + * allocation. + */ assert(!mem->reusable); + if (mem->object->all_reusable) { + OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count); + } + + if (object->purgable == VM_PURGABLE_DENY) { + owner = TASK_NULL; + } else { + owner = object->vo_purgeable_owner; + } + if (owner && + (object->purgable == VM_PURGABLE_NONVOLATILE || + VM_PAGE_WIRED(mem))) { + + if (delayed_ledger_update) + *delayed_ledger_update += PAGE_SIZE; + else { + /* more non-volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* more footprint */ + ledger_credit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } + + } else if (owner && + (object->purgable == VM_PURGABLE_VOLATILE || + object->purgable == VM_PURGABLE_EMPTY)) { + assert(! VM_PAGE_WIRED(mem)); + /* more volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } if (object->purgable == VM_PURGABLE_VOLATILE) { if (VM_PAGE_WIRED(mem)) { - OSAddAtomic(1, &vm_page_purgeable_wired_count); + OSAddAtomic(+1, &vm_page_purgeable_wired_count); } else { - OSAddAtomic(1, &vm_page_purgeable_count); + OSAddAtomic(+1, &vm_page_purgeable_count); } } else if (object->purgable == VM_PURGABLE_EMPTY && mem->throttled) { @@ -1117,6 +1331,25 @@ vm_page_insert_internal( if (queues_lock_held == FALSE) vm_page_unlock_queues(); } + +#if VM_OBJECT_TRACKING_OP_MODIFIED + if (vm_object_tracking_inited && + object->internal && + object->resident_page_count == 0 && + object->pager == NULL && + object->shadow != NULL && + object->shadow->copy == object) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int numsaved = 0; + + numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_MODIFIED, + bt, + numsaved); + } +#endif /* VM_OBJECT_TRACKING_OP_MODIFIED */ } /* @@ -1138,9 +1371,15 @@ vm_page_replace( lck_spin_t *bucket_lock; int hash_id; +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(mem); +#endif vm_object_lock_assert_exclusive(object); -#if DEBUG +#if DEBUG || VM_PAGE_CHECK_BUCKETS if (mem->tabled || mem->object != VM_OBJECT_NULL) panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", @@ -1165,31 +1404,33 @@ vm_page_replace( lck_spin_lock(bucket_lock); - if (bucket->pages) { - vm_page_t *mp = &bucket->pages; - vm_page_t m = *mp; + if (bucket->page_list) { + vm_page_packed_t *mp = &bucket->page_list; + vm_page_t m = VM_PAGE_UNPACK_PTR(*mp); do { if (m->object == object && m->offset == offset) { /* * Remove old page from hash list */ - *mp = m->next; + *mp = m->next_m; + m->hashed = FALSE; found_m = m; break; } - mp = &m->next; - } while ((m = *mp)); + mp = &m->next_m; + } while ((m = VM_PAGE_UNPACK_PTR(*mp))); - mem->next = bucket->pages; + mem->next_m = bucket->page_list; } else { - mem->next = VM_PAGE_NULL; + mem->next_m = VM_PAGE_PACK_PTR(VM_PAGE_NULL); } /* * insert new page at head of hash list */ - bucket->pages = mem; + bucket->page_list = VM_PAGE_PACK_PTR(mem); + mem->hashed = TRUE; lck_spin_unlock(bucket_lock); @@ -1201,7 +1442,7 @@ vm_page_replace( */ vm_page_free_unlocked(found_m, FALSE); } - vm_page_insert_internal(mem, object, offset, FALSE, FALSE); + vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL); } /* @@ -1222,6 +1463,7 @@ vm_page_remove( vm_page_t this; lck_spin_t *bucket_lock; int hash_id; + task_t owner; XPR(XPR_VM_PAGE, "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", @@ -1231,8 +1473,14 @@ vm_page_remove( vm_object_lock_assert_exclusive(mem->object); assert(mem->tabled); assert(!mem->cleaning); + assert(!mem->laundry); +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(mem); - +#endif if (remove_from_hash == TRUE) { /* * Remove from the object_object/offset hash table @@ -1243,30 +1491,30 @@ vm_page_remove( lck_spin_lock(bucket_lock); - if ((this = bucket->pages) == mem) { + if ((this = VM_PAGE_UNPACK_PTR(bucket->page_list)) == mem) { /* optimize for common case */ - bucket->pages = mem->next; + bucket->page_list = mem->next_m; } else { - vm_page_t *prev; + vm_page_packed_t *prev; - for (prev = &this->next; - (this = *prev) != mem; - prev = &this->next) + for (prev = &this->next_m; + (this = VM_PAGE_UNPACK_PTR(*prev)) != mem; + prev = &this->next_m) continue; - *prev = this->next; + *prev = this->next_m; } #if MACH_PAGE_HASH_STATS bucket->cur_count--; #endif /* MACH_PAGE_HASH_STATS */ - + mem->hashed = FALSE; lck_spin_unlock(bucket_lock); } /* * Now remove from the object's list of backed pages. */ - VM_PAGE_REMOVE(mem); + vm_page_remove_internal(mem); /* * And show that the object has one fewer resident @@ -1275,9 +1523,33 @@ vm_page_remove( assert(mem->object->resident_page_count > 0); mem->object->resident_page_count--; + + if (mem->object->internal) { +#if DEBUG + assert(vm_page_internal_count); +#endif /* DEBUG */ + + OSAddAtomic(-1, &vm_page_internal_count); + } else { + assert(vm_page_external_count); + OSAddAtomic(-1, &vm_page_external_count); + + if (mem->xpmapped) { + assert(vm_page_xpmapped_external_count); + OSAddAtomic(-1, &vm_page_xpmapped_external_count); + } + } + if (!mem->object->internal && (mem->object->objq.next || mem->object->objq.prev)) { + if (mem->object->resident_page_count == 0) + vm_object_cache_remove(mem->object); + } + if (VM_PAGE_WIRED(mem)) { assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } } assert(mem->object->resident_page_count >= mem->object->wired_page_count); @@ -1294,6 +1566,31 @@ vm_page_remove( vm_page_stats_reusable.reused_remove++; } + if (mem->object->purgable == VM_PURGABLE_DENY) { + owner = TASK_NULL; + } else { + owner = mem->object->vo_purgeable_owner; + } + if (owner && + (mem->object->purgable == VM_PURGABLE_NONVOLATILE || + VM_PAGE_WIRED(mem))) { + /* less non-volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* less footprint */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } else if (owner && + (mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY)) { + assert(! VM_PAGE_WIRED(mem)); + /* less volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } if (mem->object->purgable == VM_PURGABLE_VOLATILE) { if (VM_PAGE_WIRED(mem)) { assert(vm_page_purgeable_wired_count > 0); @@ -1303,6 +1600,9 @@ vm_page_remove( OSAddAtomic(-1, &vm_page_purgeable_count); } } + if (mem->object->set_cache_attr == TRUE) + pmap_set_cache_attributes(mem->phys_page, 0); + mem->tabled = FALSE; mem->object = VM_OBJECT_NULL; mem->offset = (vm_object_offset_t) -1; @@ -1318,13 +1618,55 @@ vm_page_remove( * The object must be locked. No side effects. */ -unsigned long vm_page_lookup_hint = 0; -unsigned long vm_page_lookup_hint_next = 0; -unsigned long vm_page_lookup_hint_prev = 0; -unsigned long vm_page_lookup_hint_miss = 0; -unsigned long vm_page_lookup_bucket_NULL = 0; -unsigned long vm_page_lookup_miss = 0; +#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10 + +#if DEBUG_VM_PAGE_LOOKUP + +struct { + uint64_t vpl_total; + uint64_t vpl_empty_obj; + uint64_t vpl_bucket_NULL; + uint64_t vpl_hit_hint; + uint64_t vpl_hit_hint_next; + uint64_t vpl_hit_hint_prev; + uint64_t vpl_fast; + uint64_t vpl_slow; + uint64_t vpl_hit; + uint64_t vpl_miss; + + uint64_t vpl_fast_elapsed; + uint64_t vpl_slow_elapsed; +} vm_page_lookup_stats __attribute__((aligned(8))); + +#endif + +#define KDP_VM_PAGE_WALK_MAX 1000 + +vm_page_t +kdp_vm_page_lookup( + vm_object_t object, + vm_object_offset_t offset) +{ + vm_page_t cur_page; + int num_traversed = 0; + + if (not_in_kdp) { + panic("panic: kdp_vm_page_lookup done outside of kernel debugger"); + } + + queue_iterate(&object->memq, cur_page, vm_page_t, listq) { + if (cur_page->offset == offset) { + return cur_page; + } + num_traversed++; + if (num_traversed >= KDP_VM_PAGE_WALK_MAX) { + return VM_PAGE_NULL; + } + } + + return VM_PAGE_NULL; +} vm_page_t vm_page_lookup( @@ -1334,18 +1676,32 @@ vm_page_lookup( vm_page_t mem; vm_page_bucket_t *bucket; queue_entry_t qe; - lck_spin_t *bucket_lock; + lck_spin_t *bucket_lock = NULL; int hash_id; +#if DEBUG_VM_PAGE_LOOKUP + uint64_t start, elapsed; + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total); +#endif vm_object_lock_assert_held(object); + + if (object->resident_page_count == 0) { +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj); +#endif + return (VM_PAGE_NULL); + } + mem = object->memq_hint; if (mem != VM_PAGE_NULL) { assert(mem->object == object); if (mem->offset == offset) { - vm_page_lookup_hint++; - return mem; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint); +#endif + return (mem); } qe = queue_next(&mem->listq); @@ -1356,9 +1712,11 @@ vm_page_lookup( assert(next_page->object == object); if (next_page->offset == offset) { - vm_page_lookup_hint_next++; object->memq_hint = next_page; /* new hint */ - return next_page; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next); +#endif + return (next_page); } } qe = queue_prev(&mem->listq); @@ -1370,9 +1728,11 @@ vm_page_lookup( assert(prev_page->object == object); if (prev_page->offset == offset) { - vm_page_lookup_hint_prev++; object->memq_hint = prev_page; /* new hint */ - return prev_page; +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev); +#endif + return (prev_page); } } } @@ -1390,32 +1750,73 @@ vm_page_lookup( * at outside the scope of the hash bucket lock... this is a * really cheap optimiztion to avoid taking the lock */ - if (bucket->pages == VM_PAGE_NULL) { - vm_page_lookup_bucket_NULL++; - + if (!bucket->page_list) { +#if DEBUG_VM_PAGE_LOOKUP + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL); +#endif return (VM_PAGE_NULL); } - bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); +#if DEBUG_VM_PAGE_LOOKUP + start = mach_absolute_time(); +#endif + if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) { + /* + * on average, it's roughly 3 times faster to run a short memq list + * than to take the spin lock and go through the hash list + */ + mem = (vm_page_t)queue_first(&object->memq); - for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { - VM_PAGE_CHECK(mem); - if ((mem->object == object) && (mem->offset == offset)) - break; + while (!queue_end(&object->memq, (queue_entry_t)mem)) { + + if (mem->offset == offset) + break; + + mem = (vm_page_t)queue_next(&mem->listq); + } + if (queue_end(&object->memq, (queue_entry_t)mem)) + mem = NULL; + } else { + + bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; + + lck_spin_lock(bucket_lock); + + for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = VM_PAGE_UNPACK_PTR(mem->next_m)) { +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ + VM_PAGE_CHECK(mem); +#endif + if ((mem->object == object) && (mem->offset == offset)) + break; + } + lck_spin_unlock(bucket_lock); } - lck_spin_unlock(bucket_lock); +#if DEBUG_VM_PAGE_LOOKUP + elapsed = mach_absolute_time() - start; + + if (bucket_lock) { + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow); + OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed); + } else { + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast); + OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed); + } + if (mem != VM_PAGE_NULL) + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit); + else + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss); +#endif if (mem != VM_PAGE_NULL) { - if (object->memq_hint != VM_PAGE_NULL) { - vm_page_lookup_hint_miss++; - } assert(mem->object == object); - object->memq_hint = mem; - } else - vm_page_lookup_miss++; - return(mem); + object->memq_hint = mem; + } + return (mem); } @@ -1434,8 +1835,13 @@ vm_page_rename( vm_object_offset_t new_offset, boolean_t encrypted_ok) { + boolean_t internal_to_external, external_to_internal; + vm_tag_t tag; + assert(mem->object != new_object); + assert(mem->object); + /* * ENCRYPTED SWAP: * The encryption key is based on the page's memory object @@ -1464,8 +1870,39 @@ vm_page_rename( */ vm_page_lockspin_queues(); + internal_to_external = FALSE; + external_to_internal = FALSE; + + if (mem->local) { + /* + * it's much easier to get the vm_page_pageable_xxx accounting correct + * if we first move the page to the active queue... it's going to end + * up there anyway, and we don't do vm_page_rename's frequently enough + * for this to matter. + */ + vm_page_queues_remove(mem); + vm_page_activate(mem); + } + if (mem->active || mem->inactive || mem->speculative) { + if (mem->object->internal && !new_object->internal) { + internal_to_external = TRUE; + } + if (!mem->object->internal && new_object->internal) { + external_to_internal = TRUE; + } + } + + tag = mem->object->wire_tag; vm_page_remove(mem, TRUE); - vm_page_insert_internal(mem, new_object, new_offset, TRUE, TRUE); + vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL); + + if (internal_to_external) { + vm_page_pageable_internal_count--; + vm_page_pageable_external_count++; + } else if (external_to_internal) { + vm_page_pageable_external_count--; + vm_page_pageable_internal_count++; + } vm_page_unlock_queues(); } @@ -1480,11 +1917,40 @@ vm_page_rename( void vm_page_init( vm_page_t mem, - ppnum_t phys_page) + ppnum_t phys_page, + boolean_t lopage) { assert(phys_page); + +#if DEBUG + if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) { + if (!(pmap_valid_page(phys_page))) { + panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page); + } + } +#endif *mem = vm_page_template; mem->phys_page = phys_page; +#if 0 + /* + * we're leaving this turned off for now... currently pages + * come off the free list and are either immediately dirtied/referenced + * due to zero-fill or COW faults, or are used to read or write files... + * in the file I/O case, the UPL mechanism takes care of clearing + * the state of the HW ref/mod bits in a somewhat fragile way. + * Since we may change the way this works in the future (to toughen it up), + * I'm leaving this as a reminder of where these bits could get cleared + */ + + /* + * make sure both the h/w referenced and modified bits are + * clear at this point... we are especially dependent on + * not finding a 'stale' h/w modified in a number of spots + * once this page goes back into use + */ + pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED); +#endif + mem->lopage = lopage; } /* @@ -1494,24 +1960,25 @@ vm_page_init( * Returns VM_PAGE_NULL if there are no free pages. */ int c_vm_page_grab_fictitious = 0; +int c_vm_page_grab_fictitious_failed = 0; int c_vm_page_release_fictitious = 0; int c_vm_page_more_fictitious = 0; -extern vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); - vm_page_t vm_page_grab_fictitious_common( ppnum_t phys_addr) { - register vm_page_t m; + vm_page_t m; + + if ((m = (vm_page_t)zget(vm_page_zone))) { - m = (vm_page_t)zget(vm_page_zone); - if (m) { - vm_page_init(m, phys_addr); + vm_page_init(m, phys_addr, FALSE); m->fictitious = TRUE; - } - c_vm_page_grab_fictitious++; + c_vm_page_grab_fictitious++; + } else + c_vm_page_grab_fictitious_failed++; + return m; } @@ -1527,35 +1994,30 @@ vm_page_grab_guard(void) return vm_page_grab_fictitious_common(vm_page_guard_addr); } + /* * vm_page_release_fictitious: * - * Release a fictitious page to the free list. + * Release a fictitious page to the zone pool */ - void vm_page_release_fictitious( - register vm_page_t m) + vm_page_t m) { assert(!m->free); - assert(m->busy); assert(m->fictitious); assert(m->phys_page == vm_page_fictitious_addr || m->phys_page == vm_page_guard_addr); c_vm_page_release_fictitious++; -#if DEBUG - if (m->free) - panic("vm_page_release_fictitious"); -#endif - m->free = TRUE; + zfree(vm_page_zone, m); } /* * vm_page_more_fictitious: * - * Add more fictitious pages to the free list. + * Add more fictitious pages to the zone. * Allowed to block. This routine is way intimate * with the zones code, for several reasons: * 1. we need to carve some page structures out of physical @@ -1569,23 +2031,13 @@ vm_page_release_fictitious( * permanent allocation of a resource. * 3. To smooth allocation humps, we allocate single pages * with kernel_memory_allocate(), and cram them into the - * zone. This also allows us to initialize the vm_page_t's - * on the way into the zone, so that zget() always returns - * an initialized structure. The zone free element pointer - * and the free page pointer are both the first item in the - * vm_page_t. - * 4. By having the pages in the zone pre-initialized, we need - * not keep 2 levels of lists. The garbage collector simply - * scans our list, and reduces physical memory usage as it - * sees fit. + * zone. */ void vm_page_more_fictitious(void) { - register vm_page_t m; - vm_offset_t addr; - kern_return_t retval; - int i; + vm_offset_t addr; + kern_return_t retval; c_vm_page_more_fictitious++; @@ -1621,10 +2073,10 @@ void vm_page_more_fictitious(void) retval = kernel_memory_allocate(zone_map, &addr, PAGE_SIZE, VM_PROT_ALL, - KMA_KOBJECT|KMA_NOPAGEWAIT); + KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE); if (retval != KERN_SUCCESS) { /* - * No page was available. Tell the pageout daemon, drop the + * No page was available. Drop the * lock to give another thread a chance at it, and * wait for the pageout daemon to make progress. */ @@ -1632,18 +2084,9 @@ void vm_page_more_fictitious(void) vm_page_wait(THREAD_UNINT); return; } - /* - * Initialize as many vm_page_t's as will fit on this page. This - * depends on the zone code disturbing ONLY the first item of - * each zone element. - */ - m = (vm_page_t)addr; - for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) { - vm_page_init(m, vm_page_fictitious_addr); - m->fictitious = TRUE; - m++; - } - zcram(vm_page_zone, (void *) addr, PAGE_SIZE); + + zcram(vm_page_zone, addr, PAGE_SIZE); + lck_mtx_unlock(&vm_page_alloc_lock); } @@ -1668,7 +2111,7 @@ vm_pool_low(void) * this is an interface to support bring-up of drivers * on platforms with physical memory > 4G... */ -int vm_himemory_mode = 0; +int vm_himemory_mode = 2; /* @@ -1676,43 +2119,65 @@ int vm_himemory_mode = 0; * incapable of generating DMAs with more than 32 bits * of address on platforms with physical memory > 4G... */ -unsigned int vm_lopage_free_count = 0; -unsigned int vm_lopage_max_count = 0; +unsigned int vm_lopages_allocated_q = 0; +unsigned int vm_lopages_allocated_cpm_success = 0; +unsigned int vm_lopages_allocated_cpm_failed = 0; queue_head_t vm_lopage_queue_free; vm_page_t vm_page_grablo(void) { - register vm_page_t mem; - unsigned int vm_lopage_alloc_count; + vm_page_t mem; - if (vm_lopage_poolsize == 0) + if (vm_lopage_needed == FALSE) return (vm_page_grab()); lck_mtx_lock_spin(&vm_page_queue_free_lock); - if (! queue_empty(&vm_lopage_queue_free)) { - queue_remove_first(&vm_lopage_queue_free, - mem, - vm_page_t, - pageq); - assert(mem->free); - assert(mem->busy); - assert(!mem->pmapped); - assert(!mem->wpmapped); + if ( !queue_empty(&vm_lopage_queue_free)) { + queue_remove_first(&vm_lopage_queue_free, + mem, + vm_page_t, + pageq); + assert(vm_lopage_free_count); - mem->pageq.next = NULL; - mem->pageq.prev = NULL; - mem->free = FALSE; + vm_lopage_free_count--; + vm_lopages_allocated_q++; - vm_lopage_free_count--; - vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count; - if (vm_lopage_alloc_count > vm_lopage_max_count) - vm_lopage_max_count = vm_lopage_alloc_count; + if (vm_lopage_free_count < vm_lopage_lowater) + vm_lopage_refill = TRUE; + + lck_mtx_unlock(&vm_page_queue_free_lock); } else { - mem = VM_PAGE_NULL; + lck_mtx_unlock(&vm_page_queue_free_lock); + + if (cpm_allocate(PAGE_SIZE, &mem, atop(0xffffffff), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) { + + lck_mtx_lock_spin(&vm_page_queue_free_lock); + vm_lopages_allocated_cpm_failed++; + lck_mtx_unlock(&vm_page_queue_free_lock); + + return (VM_PAGE_NULL); + } + mem->busy = TRUE; + + vm_page_lockspin_queues(); + + mem->gobbled = FALSE; + vm_page_gobble_count--; + vm_page_wire_count--; + + vm_lopages_allocated_cpm_success++; + vm_page_unlock_queues(); } - lck_mtx_unlock(&vm_page_queue_free_lock); + assert(mem->busy); + assert(!mem->free); + assert(!mem->pmapped); + assert(!mem->wpmapped); + assert(!pmap_is_noencrypt(mem->phys_page)); + + mem->pageq.next = NULL; + mem->pageq.prev = NULL; return (mem); } @@ -1739,8 +2204,6 @@ vm_page_grablo(void) * request from the per-cpu queue. */ -#define COLOR_GROUPS_TO_STEAL 4 - vm_page_t vm_page_grab( void ) @@ -1754,9 +2217,9 @@ vm_page_grab( void ) return_page_from_cpu_list: PROCESSOR_DATA(current_processor(), page_grab_count) += 1; PROCESSOR_DATA(current_processor(), free_pages) = mem->pageq.next; - mem->pageq.next = NULL; enable_preemption(); + mem->pageq.next = NULL; assert(mem->listq.next == NULL && mem->listq.prev == NULL); assert(mem->tabled == FALSE); @@ -1768,6 +2231,11 @@ return_page_from_cpu_list: assert(!mem->encrypted); assert(!mem->pmapped); assert(!mem->wpmapped); + assert(!mem->active); + assert(!mem->inactive); + assert(!mem->throttled); + assert(!mem->speculative); + assert(!pmap_is_noencrypt(mem->phys_page)); return mem; } @@ -1778,19 +2246,18 @@ return_page_from_cpu_list: * Optionally produce warnings if the wire or gobble * counts exceed some threshold. */ - if (vm_page_wire_count_warning > 0 - && vm_page_wire_count >= vm_page_wire_count_warning) { +#if VM_PAGE_WIRE_COUNT_WARNING + if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) { printf("mk: vm_page_grab(): high wired page count of %d\n", vm_page_wire_count); - assert(vm_page_wire_count < vm_page_wire_count_warning); } - if (vm_page_gobble_count_warning > 0 - && vm_page_gobble_count >= vm_page_gobble_count_warning) { +#endif +#if VM_PAGE_GOBBLE_COUNT_WARNING + if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) { printf("mk: vm_page_grab(): high gobbled page count of %d\n", vm_page_gobble_count); - assert(vm_page_gobble_count < vm_page_gobble_count_warning); } - +#endif lck_mtx_lock_spin(&vm_page_queue_free_lock); /* @@ -1835,17 +2302,17 @@ return_page_from_cpu_list: if (vm_page_free_count <= vm_page_free_reserved) pages_to_steal = 1; else { - pages_to_steal = COLOR_GROUPS_TO_STEAL * vm_colors; - - if (pages_to_steal > (vm_page_free_count - vm_page_free_reserved)) + if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) + pages_to_steal = vm_free_magazine_refill_limit; + else pages_to_steal = (vm_page_free_count - vm_page_free_reserved); } color = PROCESSOR_DATA(current_processor(), start_color); head = tail = NULL; + vm_page_free_count -= pages_to_steal; + while (pages_to_steal--) { - if (--vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; while (queue_empty(&vm_page_queue_free[color])) color = (color + 1) & vm_color_mask; @@ -1857,6 +2324,11 @@ return_page_from_cpu_list: mem->pageq.next = NULL; mem->pageq.prev = NULL; + assert(!mem->active); + assert(!mem->inactive); + assert(!mem->throttled); + assert(!mem->speculative); + color = (color + 1) & vm_color_mask; if (head == NULL) @@ -1865,7 +2337,6 @@ return_page_from_cpu_list: tail->pageq.next = (queue_t)mem; tail = mem; - mem->pageq.prev = NULL; assert(mem->listq.next == NULL && mem->listq.prev == NULL); assert(mem->tabled == FALSE); assert(mem->object == VM_OBJECT_NULL); @@ -1879,7 +2350,10 @@ return_page_from_cpu_list: assert(!mem->encrypted); assert(!mem->pmapped); assert(!mem->wpmapped); + assert(!pmap_is_noencrypt(mem->phys_page)); } + lck_mtx_unlock(&vm_page_queue_free_lock); + PROCESSOR_DATA(current_processor(), free_pages) = head->pageq.next; PROCESSOR_DATA(current_processor(), start_color) = color; @@ -1890,8 +2364,6 @@ return_page_from_cpu_list: mem = head; mem->pageq.next = NULL; - lck_mtx_unlock(&vm_page_queue_free_lock); - enable_preemption(); } /* @@ -1905,29 +2377,12 @@ return_page_from_cpu_list: * it doesn't really matter. */ if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && - ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min))) - thread_wakeup((event_t) &vm_page_free_wanted); - -#if CONFIG_EMBEDDED - { - int percent_avail; - - /* - * Decide if we need to poke the memorystatus notification thread. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail <= (kern_memorystatus_level - 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); - } - } -#endif + ((vm_page_free_count < vm_page_free_target) && + ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min))) + thread_wakeup((event_t) &vm_page_free_wanted); + VM_CHECK_MEMORYSTATUS; + // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ return mem; @@ -1946,29 +2401,21 @@ vm_page_release( unsigned int color; int need_wakeup = 0; int need_priv_wakeup = 0; -#if 0 - unsigned int pindex; - phys_entry *physent; - physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */ - if(physent->ppLink & ppN) { /* (BRINGUP) */ - panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page); - } - physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */ -#endif + assert(!mem->private && !mem->fictitious); if (vm_page_free_verify) { assert(pmap_verify_free(mem->phys_page)); } // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ + pmap_clear_noencrypt(mem->phys_page); lck_mtx_lock_spin(&vm_page_queue_free_lock); #if DEBUG if (mem->free) panic("vm_page_release"); #endif - mem->free = TRUE; assert(mem->busy); assert(!mem->laundry); @@ -1978,7 +2425,9 @@ vm_page_release( assert(mem->listq.next == NULL && mem->listq.prev == NULL); - if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) { + if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) && + vm_lopage_free_count < vm_lopage_free_limit && + mem->phys_page < max_valid_low_ppnum) { /* * this exists to support hardware controllers * incapable of generating DMAs with more than 32 bits @@ -1989,7 +2438,15 @@ vm_page_release( vm_page_t, pageq); vm_lopage_free_count++; + + if (vm_lopage_free_count >= vm_lopage_free_limit) + vm_lopage_refill = FALSE; + + mem->lopage = TRUE; } else { + mem->lopage = FALSE; + mem->free = TRUE; + color = mem->phys_page & vm_color_mask; queue_enter_first(&vm_page_queue_free[color], mem, @@ -2033,25 +2490,33 @@ vm_page_release( else if (need_wakeup) thread_wakeup_one((event_t) &vm_page_free_count); -#if CONFIG_EMBEDDED - { - int percent_avail; + VM_CHECK_MEMORYSTATUS; +} - /* - * Decide if we need to poke the memorystatus notification thread. - * Locking is not a big issue, as only a single thread delivers these. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail >= (kern_memorystatus_level + 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); - } +/* + * This version of vm_page_release() is used only at startup + * when we are single-threaded and pages are being released + * for the first time. Hence, no locking or unnecessary checks are made. + * Note: VM_CHECK_MEMORYSTATUS invoked by the caller. + */ +void +vm_page_release_startup( + register vm_page_t mem) +{ + queue_t queue_free; + + if (vm_lopage_free_count < vm_lopage_free_limit && + mem->phys_page < max_valid_low_ppnum) { + mem->lopage = TRUE; + vm_lopage_free_count++; + queue_free = &vm_lopage_queue_free; + } else { + mem->lopage = FALSE; + mem->free = TRUE; + vm_page_free_count++; + queue_free = &vm_page_queue_free[mem->phys_page & vm_color_mask]; } -#endif + queue_enter_first(queue_free, mem, vm_page_t, pageq); } /* @@ -2103,8 +2568,12 @@ vm_page_wait( if (need_wakeup) thread_wakeup((event_t)&vm_page_free_wanted); - if (wait_result == THREAD_WAITING) + if (wait_result == THREAD_WAITING) { + VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START, + vm_page_free_wanted_privileged, vm_page_free_wanted, 0, 0); wait_result = thread_block(THREAD_CONTINUE_NULL); + VM_DEBUG_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0); + } return(wait_result == THREAD_AWAKENED); } else { @@ -2139,24 +2608,6 @@ vm_page_alloc( return(mem); } -vm_page_t -vm_page_alloclo( - vm_object_t object, - vm_object_offset_t offset) -{ - register vm_page_t mem; - - vm_object_lock_assert_exclusive(object); - mem = vm_page_grablo(); - if (mem == VM_PAGE_NULL) - return VM_PAGE_NULL; - - vm_page_insert(mem, object, offset); - - return(mem); -} - - /* * vm_page_alloc_guard: * @@ -2186,16 +2637,16 @@ vm_page_alloc_guard( counter(unsigned int c_laundry_pages_freed = 0;) /* - * vm_page_free: + * vm_page_free_prepare: * - * Returns the given page to the free list, - * disassociating it with any VM object. + * Removes page from any queue it may be on + * and disassociates it from its VM object. * * Object and page queues must be locked prior to entry. */ static void vm_page_free_prepare( - register vm_page_t mem) + vm_page_t mem) { vm_page_free_prepare_queues(mem); vm_page_free_prepare_object(mem, TRUE); @@ -2209,35 +2660,69 @@ vm_page_free_prepare_queues( VM_PAGE_CHECK(mem); assert(!mem->free); assert(!mem->cleaning); - assert(!mem->pageout); -#if DEBUG + +#if MACH_ASSERT || DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (mem->free) panic("vm_page_free: freeing page on free list\n"); -#endif +#endif /* MACH_ASSERT || DEBUG */ if (mem->object) { vm_object_lock_assert_exclusive(mem->object); } - if (mem->laundry) { /* * We may have to free a page while it's being laundered * if we lost its pager (due to a forced unmount, for example). - * We need to call vm_pageout_throttle_up() before removing - * the page from its VM object, so that we can find out on - * which pageout queue the page is on. + * We need to call vm_pageout_steal_laundry() before removing + * the page from its VM object, so that we can remove it + * from its pageout queue and adjust the laundry accounting */ - vm_pageout_throttle_up(mem); + vm_pageout_steal_laundry(mem, TRUE); counter(++c_laundry_pages_freed); } - VM_PAGE_QUEUES_REMOVE(mem); /* clears local/active/inactive/throttled/speculative */ + + vm_page_queues_remove(mem); /* clears local/active/inactive/throttled/speculative */ if (VM_PAGE_WIRED(mem)) { if (mem->object) { assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } + assert(mem->object->resident_page_count >= mem->object->wired_page_count); + + if (mem->object->purgable == VM_PURGABLE_VOLATILE) { + OSAddAtomic(+1, &vm_page_purgeable_count); + assert(vm_page_purgeable_wired_count > 0); + OSAddAtomic(-1, &vm_page_purgeable_wired_count); + } + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* + * While wired, this page was accounted + * as "non-volatile" but it should now + * be accounted as "volatile". + */ + /* one less "non-volatile"... */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* ... and "phys_footprint" */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + /* one more "volatile" */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + } } if (!mem->private && !mem->fictitious) vm_page_wire_count--; @@ -2256,10 +2741,6 @@ vm_page_free_prepare_object( vm_page_t mem, boolean_t remove_from_hash) { - if (mem->object) { - vm_object_lock_assert_exclusive(mem->object); - } - if (mem->tabled) vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */ @@ -2270,33 +2751,26 @@ vm_page_free_prepare_object( mem->fictitious = TRUE; mem->phys_page = vm_page_fictitious_addr; } - if (mem->fictitious) { - /* Some of these may be unnecessary */ - mem->gobbled = FALSE; - mem->busy = TRUE; - mem->absent = FALSE; - mem->error = FALSE; - mem->dirty = FALSE; - mem->precious = FALSE; - mem->reference = FALSE; - mem->encrypted = FALSE; - mem->encrypted_cleaning = FALSE; - mem->pmapped = FALSE; - mem->wpmapped = FALSE; - mem->reusable = FALSE; - } else { - if (mem->zero_fill == TRUE) - VM_ZF_COUNT_DECR(); - vm_page_init(mem, mem->phys_page); + if ( !mem->fictitious) { + vm_page_init(mem, mem->phys_page, mem->lopage); } } +/* + * vm_page_free: + * + * Returns the given page to the free list, + * disassociating it with any VM object. + * + * Object and page queues must be locked prior to entry. + */ void vm_page_free( vm_page_t mem) { vm_page_free_prepare(mem); + if (mem->fictitious) { vm_page_release_fictitious(mem); } else { @@ -2323,207 +2797,163 @@ vm_page_free_unlocked( } } + /* * Free a list of pages. The list can be up to several hundred pages, * as blocked up by vm_pageout_scan(). * The big win is not having to take the free list lock once - * per page. We sort the incoming pages into n lists, one for - * each color. + * per page. */ void vm_page_free_list( - vm_page_t mem, + vm_page_t freeq, boolean_t prepare_object) { + vm_page_t mem; vm_page_t nxt; - int pg_count = 0; - int color; - int inuse_list_head = -1; + vm_page_t local_freeq; + int pg_count; - queue_head_t free_list[MAX_COLORS]; - int inuse[MAX_COLORS]; + while (freeq) { - for (color = 0; color < (signed) vm_colors; color++) { - queue_init(&free_list[color]); - } - - while (mem) { - assert(!mem->inactive); - assert(!mem->active); - assert(!mem->throttled); - assert(!mem->free); - assert(!mem->speculative); - assert(mem->pageq.prev == NULL); + pg_count = 0; + local_freeq = VM_PAGE_NULL; + mem = freeq; + + /* + * break up the processing into smaller chunks so + * that we can 'pipeline' the pages onto the + * free list w/o introducing too much + * contention on the global free queue lock + */ + while (mem && pg_count < 64) { + + assert(!mem->inactive); + assert(!mem->active); + assert(!mem->throttled); + assert(!mem->free); + assert(!mem->speculative); + assert(!VM_PAGE_WIRED(mem)); + assert(mem->pageq.prev == NULL); - nxt = (vm_page_t)(mem->pageq.next); + nxt = (vm_page_t)(mem->pageq.next); - if (prepare_object == TRUE) - vm_page_free_prepare_object(mem, TRUE); + if (vm_page_free_verify && !mem->fictitious && !mem->private) { + assert(pmap_verify_free(mem->phys_page)); + } + if (prepare_object == TRUE) + vm_page_free_prepare_object(mem, TRUE); - if (vm_page_free_verify && !mem->fictitious && !mem->private) { - assert(pmap_verify_free(mem->phys_page)); - } - assert(mem->busy); + if (!mem->fictitious) { + assert(mem->busy); - if (!mem->fictitious) { - if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) { - mem->pageq.next = NULL; - vm_page_release(mem); - } else { + if ((mem->lopage == TRUE || vm_lopage_refill == TRUE) && + vm_lopage_free_count < vm_lopage_free_limit && + mem->phys_page < max_valid_low_ppnum) { + mem->pageq.next = NULL; + vm_page_release(mem); + } else { + /* + * IMPORTANT: we can't set the page "free" here + * because that would make the page eligible for + * a physically-contiguous allocation (see + * vm_page_find_contiguous()) right away (we don't + * hold the vm_page_queue_free lock). That would + * cause trouble because the page is not actually + * in the free queue yet... + */ + mem->pageq.next = (queue_entry_t)local_freeq; + local_freeq = mem; + pg_count++; - /* - * IMPORTANT: we can't set the page "free" here - * because that would make the page eligible for - * a physically-contiguous allocation (see - * vm_page_find_contiguous()) right away (we don't - * hold the vm_page_queue_free lock). That would - * cause trouble because the page is not actually - * in the free queue yet... - */ - color = mem->phys_page & vm_color_mask; - if (queue_empty(&free_list[color])) { - inuse[color] = inuse_list_head; - inuse_list_head = color; + pmap_clear_noencrypt(mem->phys_page); } - queue_enter_first(&free_list[color], - mem, - vm_page_t, - pageq); - pg_count++; + } else { + assert(mem->phys_page == vm_page_fictitious_addr || + mem->phys_page == vm_page_guard_addr); + vm_page_release_fictitious(mem); } - } else { - assert(mem->phys_page == vm_page_fictitious_addr || - mem->phys_page == vm_page_guard_addr); - vm_page_release_fictitious(mem); + mem = nxt; } - mem = nxt; - } - if (pg_count) { - unsigned int avail_free_count; - unsigned int need_wakeup = 0; - unsigned int need_priv_wakeup = 0; + freeq = mem; + + if ( (mem = local_freeq) ) { + unsigned int avail_free_count; + unsigned int need_wakeup = 0; + unsigned int need_priv_wakeup = 0; - lck_mtx_lock_spin(&vm_page_queue_free_lock); + lck_mtx_lock_spin(&vm_page_queue_free_lock); - color = inuse_list_head; - - while( color != -1 ) { - vm_page_t first, last; - vm_page_t first_free; + while (mem) { + int color; + + nxt = (vm_page_t)(mem->pageq.next); - /* - * Now that we hold the vm_page_queue_free lock, - * it's safe to mark all pages in our local queue - * as "free"... - */ - queue_iterate(&free_list[color], - mem, - vm_page_t, - pageq) { assert(!mem->free); assert(mem->busy); mem->free = TRUE; - } - /* - * ... and insert our local queue at the head of - * the global free queue. - */ - first = (vm_page_t) queue_first(&free_list[color]); - last = (vm_page_t) queue_last(&free_list[color]); - first_free = (vm_page_t) queue_first(&vm_page_queue_free[color]); - if (queue_empty(&vm_page_queue_free[color])) { - queue_last(&vm_page_queue_free[color]) = - (queue_entry_t) last; - } else { - queue_prev(&first_free->pageq) = - (queue_entry_t) last; - } - queue_first(&vm_page_queue_free[color]) = - (queue_entry_t) first; - queue_prev(&first->pageq) = - (queue_entry_t) &vm_page_queue_free[color]; - queue_next(&last->pageq) = - (queue_entry_t) first_free; - - /* next color */ - color = inuse[color]; - } - - vm_page_free_count += pg_count; - avail_free_count = vm_page_free_count; - - if (vm_page_free_wanted_privileged > 0 && - avail_free_count > 0) { - if (avail_free_count < vm_page_free_wanted_privileged) { - need_priv_wakeup = avail_free_count; - vm_page_free_wanted_privileged -= - avail_free_count; - avail_free_count = 0; - } else { - need_priv_wakeup = vm_page_free_wanted_privileged; - vm_page_free_wanted_privileged = 0; - avail_free_count -= - vm_page_free_wanted_privileged; + color = mem->phys_page & vm_color_mask; + queue_enter_first(&vm_page_queue_free[color], + mem, + vm_page_t, + pageq); + mem = nxt; } - } + vm_page_free_count += pg_count; + avail_free_count = vm_page_free_count; - if (vm_page_free_wanted > 0 && - avail_free_count > vm_page_free_reserved) { - unsigned int available_pages; + if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) { + + if (avail_free_count < vm_page_free_wanted_privileged) { + need_priv_wakeup = avail_free_count; + vm_page_free_wanted_privileged -= avail_free_count; + avail_free_count = 0; + } else { + need_priv_wakeup = vm_page_free_wanted_privileged; + vm_page_free_wanted_privileged = 0; + avail_free_count -= vm_page_free_wanted_privileged; + } + } + if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) { + unsigned int available_pages; - available_pages = (avail_free_count - - vm_page_free_reserved); + available_pages = avail_free_count - vm_page_free_reserved; - if (available_pages >= vm_page_free_wanted) { - need_wakeup = vm_page_free_wanted; - vm_page_free_wanted = 0; - } else { - need_wakeup = available_pages; - vm_page_free_wanted -= available_pages; + if (available_pages >= vm_page_free_wanted) { + need_wakeup = vm_page_free_wanted; + vm_page_free_wanted = 0; + } else { + need_wakeup = available_pages; + vm_page_free_wanted -= available_pages; + } } - } - lck_mtx_unlock(&vm_page_queue_free_lock); + lck_mtx_unlock(&vm_page_queue_free_lock); - if (need_priv_wakeup != 0) { - /* - * There shouldn't be that many VM-privileged threads, - * so let's wake them all up, even if we don't quite - * have enough pages to satisfy them all. - */ - thread_wakeup((event_t)&vm_page_free_wanted_privileged); - } - if (need_wakeup != 0 && vm_page_free_wanted == 0) { - /* - * We don't expect to have any more waiters - * after this, so let's wake them all up at - * once. - */ - thread_wakeup((event_t) &vm_page_free_count); - } else for (; need_wakeup != 0; need_wakeup--) { - /* - * Wake up one waiter per page we just released. - */ - thread_wakeup_one((event_t) &vm_page_free_count); - } -#if CONFIG_EMBEDDED - { - int percent_avail; + if (need_priv_wakeup != 0) { + /* + * There shouldn't be that many VM-privileged threads, + * so let's wake them all up, even if we don't quite + * have enough pages to satisfy them all. + */ + thread_wakeup((event_t)&vm_page_free_wanted_privileged); + } + if (need_wakeup != 0 && vm_page_free_wanted == 0) { + /* + * We don't expect to have any more waiters + * after this, so let's wake them all up at + * once. + */ + thread_wakeup((event_t) &vm_page_free_count); + } else for (; need_wakeup != 0; need_wakeup--) { + /* + * Wake up one waiter per page we just released. + */ + thread_wakeup_one((event_t) &vm_page_free_count); + } - /* - * Decide if we need to poke the memorystatus notification thread. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail >= (kern_memorystatus_level + 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); - } + VM_CHECK_MEMORYSTATUS; } -#endif } } @@ -2537,9 +2967,13 @@ vm_page_free_list( * * The page's object and the page queues must be locked. */ + + void vm_page_wire( - register vm_page_t mem) + register vm_page_t mem, + vm_tag_t tag, + boolean_t check_memorystatus) { // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */ @@ -2562,10 +2996,26 @@ vm_page_wire( lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif if ( !VM_PAGE_WIRED(mem)) { - VM_PAGE_QUEUES_REMOVE(mem); + + if (mem->pageout_queue) { + mem->pageout = FALSE; + vm_pageout_throttle_up(mem); + } + vm_page_queues_remove(mem); if (mem->object) { + + if (!mem->private && !mem->fictitious) + { + if (!mem->object->wired_page_count) + { + assert(VM_KERN_MEMORY_NONE != tag); + mem->object->wire_tag = tag; + VM_OBJECT_WIRED(mem->object); + } + } mem->object->wired_page_count++; + assert(mem->object->resident_page_count >= mem->object->wired_page_count); if (mem->object->purgable == VM_PURGABLE_VOLATILE) { @@ -2573,6 +3023,25 @@ vm_page_wire( OSAddAtomic(-1, &vm_page_purgeable_count); OSAddAtomic(1, &vm_page_purgeable_wired_count); } + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* less volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + /* more not-quite-volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* more footprint */ + ledger_credit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } if (mem->object->all_reusable) { /* * Wired pages are not counted as "re-usable" @@ -2598,28 +3067,10 @@ vm_page_wire( if (mem->gobbled) vm_page_gobble_count--; mem->gobbled = FALSE; - if (mem->zero_fill == TRUE) { - mem->zero_fill = FALSE; - VM_ZF_COUNT_DECR(); - } -#if CONFIG_EMBEDDED - { - int percent_avail; - /* - * Decide if we need to poke the memorystatus notification thread. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail <= (kern_memorystatus_level - 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); + if (check_memorystatus == TRUE) { + VM_CHECK_MEMORYSTATUS; } - } -#endif /* * ENCRYPTED SWAP: * The page could be encrypted, but @@ -2635,32 +3086,6 @@ vm_page_wire( VM_PAGE_CHECK(mem); } -/* - * vm_page_gobble: - * - * Mark this page as consumed by the vm/ipc/xmm subsystems. - * - * Called only for freshly vm_page_grab()ed pages - w/ nothing locked. - */ -void -vm_page_gobble( - register vm_page_t mem) -{ - vm_page_lockspin_queues(); - VM_PAGE_CHECK(mem); - - assert(!mem->gobbled); - assert( !VM_PAGE_WIRED(mem)); - - if (!mem->gobbled && !VM_PAGE_WIRED(mem)) { - if (!mem->private && !mem->fictitious) - vm_page_wire_count++; - } - vm_page_gobble_count++; - mem->gobbled = TRUE; - vm_page_unlock_queues(); -} - /* * vm_page_unwire: * @@ -2671,23 +3096,29 @@ vm_page_gobble( */ void vm_page_unwire( - register vm_page_t mem) + vm_page_t mem, + boolean_t queueit) { // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */ VM_PAGE_CHECK(mem); assert(VM_PAGE_WIRED(mem)); + assert(!mem->gobbled); assert(mem->object != VM_OBJECT_NULL); #if DEBUG vm_object_lock_assert_exclusive(mem->object); lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif if (--mem->wire_count == 0) { - assert(!mem->private && !mem->fictitious); - vm_page_wire_count--; + if (!mem->private && !mem->fictitious) { + vm_page_wire_count--; + } assert(mem->object->wired_page_count > 0); mem->object->wired_page_count--; + if (!mem->object->wired_page_count) { + VM_OBJECT_UNWIRED(mem->object); + } assert(mem->object->resident_page_count >= mem->object->wired_page_count); if (mem->object->purgable == VM_PURGABLE_VOLATILE) { @@ -2695,32 +3126,38 @@ vm_page_unwire( assert(vm_page_purgeable_wired_count > 0); OSAddAtomic(-1, &vm_page_purgeable_wired_count); } - assert(!mem->laundry); + if ((mem->object->purgable == VM_PURGABLE_VOLATILE || + mem->object->purgable == VM_PURGABLE_EMPTY) && + mem->object->vo_purgeable_owner != TASK_NULL) { + task_t owner; + + owner = mem->object->vo_purgeable_owner; + /* more volatile bytes */ + ledger_credit(owner->ledger, + task_ledgers.purgeable_volatile, + PAGE_SIZE); + /* less not-quite-volatile bytes */ + ledger_debit(owner->ledger, + task_ledgers.purgeable_nonvolatile, + PAGE_SIZE); + /* less footprint */ + ledger_debit(owner->ledger, + task_ledgers.phys_footprint, + PAGE_SIZE); + } assert(mem->object != kernel_object); assert(mem->pageq.next == NULL && mem->pageq.prev == NULL); - if (mem->object->purgable == VM_PURGABLE_EMPTY) { - vm_page_deactivate(mem); - } else { - vm_page_activate(mem); - } -#if CONFIG_EMBEDDED - { - int percent_avail; - /* - * Decide if we need to poke the memorystatus notification thread. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail >= (kern_memorystatus_level + 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); - } + if (queueit == TRUE) { + if (mem->object->purgable == VM_PURGABLE_EMPTY) { + vm_page_deactivate(mem); + } else { + vm_page_activate(mem); + } } -#endif + + VM_CHECK_MEMORYSTATUS; + } VM_PAGE_CHECK(mem); } @@ -2763,6 +3200,8 @@ vm_page_deactivate_internal( * inactive queue. Note wired pages should not have * their reference bit cleared. */ + assert ( !(m->absent && !m->unusual)); + if (m->gobbled) { /* can this happen? */ assert( !VM_PAGE_WIRED(m)); @@ -2771,53 +3210,102 @@ vm_page_deactivate_internal( vm_page_gobble_count--; m->gobbled = FALSE; } - if (m->private || (VM_PAGE_WIRED(m))) + /* + * if this page is currently on the pageout queue, we can't do the + * vm_page_queues_remove (which doesn't handle the pageout queue case) + * and we can't remove it manually since we would need the object lock + * (which is not required here) to decrement the activity_in_progress + * reference which is held on the object while the page is in the pageout queue... + * just let the normal laundry processing proceed + */ + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor || (VM_PAGE_WIRED(m))) return; - if (!m->fictitious && !m->absent && clear_hw_reference == TRUE) + if (!m->absent && clear_hw_reference == TRUE) pmap_clear_reference(m->phys_page); m->reference = FALSE; m->no_cache = FALSE; if (!m->inactive) { - VM_PAGE_QUEUES_REMOVE(m); + vm_page_queues_remove(m); - assert(!m->laundry); - assert(m->pageq.next == NULL && m->pageq.prev == NULL); - - if (!IP_VALID(memory_manager_default) && + if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && m->dirty && m->object->internal && (m->object->purgable == VM_PURGABLE_DENY || m->object->purgable == VM_PURGABLE_NONVOLATILE || m->object->purgable == VM_PURGABLE_VOLATILE)) { + vm_page_check_pageable_safe(m); queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq); m->throttled = TRUE; vm_page_throttled_count++; } else { - if (!m->fictitious && m->object->named && m->object->ref_count == 1) { + if (m->object->named && m->object->ref_count == 1) { vm_page_speculate(m, FALSE); #if DEVELOPMENT || DEBUG vm_page_speculative_recreated++; #endif - return; } else { - if (m->zero_fill) { - queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq); - vm_zf_queue_count++; - } else { - queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); - } - } - m->inactive = TRUE; - if (!m->fictitious) { - vm_page_inactive_count++; - token_new_pagecount++; + vm_page_enqueue_inactive(m, FALSE); } } } } +/* + * vm_page_enqueue_cleaned + * + * Put the page on the cleaned queue, mark it cleaned, etc. + * Being on the cleaned queue (and having m->clean_queue set) + * does ** NOT ** guarantee that the page is clean! + * + * Call with the queues lock held. + */ + +void vm_page_enqueue_cleaned(vm_page_t m) +{ + assert(m->phys_page != vm_page_guard_addr); +#if DEBUG + lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); +#endif + assert( !(m->absent && !m->unusual)); + + if (m->gobbled) { + assert( !VM_PAGE_WIRED(m)); + if (!m->private && !m->fictitious) + vm_page_wire_count--; + vm_page_gobble_count--; + m->gobbled = FALSE; + } + /* + * if this page is currently on the pageout queue, we can't do the + * vm_page_queues_remove (which doesn't handle the pageout queue case) + * and we can't remove it manually since we would need the object lock + * (which is not required here) to decrement the activity_in_progress + * reference which is held on the object while the page is in the pageout queue... + * just let the normal laundry processing proceed + */ + if (m->laundry || m->clean_queue || m->pageout_queue || m->private || m->fictitious) + return; + + vm_page_queues_remove(m); + + vm_page_check_pageable_safe(m); + queue_enter(&vm_page_queue_cleaned, m, vm_page_t, pageq); + m->clean_queue = TRUE; + vm_page_cleaned_count++; + + m->inactive = TRUE; + vm_page_inactive_count++; + if (m->object->internal) { + vm_page_pageable_internal_count++; + } else { + vm_page_pageable_external_count++; + } + + vm_pageout_enqueued_cleaned++; +} + /* * vm_page_activate: * @@ -2838,6 +3326,8 @@ vm_page_activate( #if DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif + assert( !(m->absent && !m->unusual)); + if (m->gobbled) { assert( !VM_PAGE_WIRED(m)); if (!m->private && !m->fictitious) @@ -2845,7 +3335,15 @@ vm_page_activate( vm_page_gobble_count--; m->gobbled = FALSE; } - if (m->private) + /* + * if this page is currently on the pageout queue, we can't do the + * vm_page_queues_remove (which doesn't handle the pageout queue case) + * and we can't remove it manually since we would need the object lock + * (which is not required here) to decrement the activity_in_progress + * reference which is held on the object while the page is in the pageout queue... + * just let the normal laundry processing proceed + */ + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor) return; #if DEBUG @@ -2857,14 +3355,13 @@ vm_page_activate( DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL); DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL); } - - VM_PAGE_QUEUES_REMOVE(m); + + vm_page_queues_remove(m); if ( !VM_PAGE_WIRED(m)) { - assert(!m->laundry); - assert(m->pageq.next == NULL && m->pageq.prev == NULL); - if (!IP_VALID(memory_manager_default) && - !m->fictitious && m->dirty && m->object->internal && + vm_page_check_pageable_safe(m); + if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && + m->dirty && m->object->internal && (m->object->purgable == VM_PURGABLE_DENY || m->object->purgable == VM_PURGABLE_NONVOLATILE || m->object->purgable == VM_PURGABLE_VOLATILE)) { @@ -2874,8 +3371,12 @@ vm_page_activate( } else { queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); m->active = TRUE; - if (!m->fictitious) - vm_page_active_count++; + vm_page_active_count++; + if (m->object->internal) { + vm_page_pageable_internal_count++; + } else { + vm_page_pageable_external_count++; + } } m->reference = TRUE; m->no_cache = FALSE; @@ -2899,13 +3400,26 @@ vm_page_speculate( struct vm_speculative_age_q *aq; VM_PAGE_CHECK(m); - assert(m->object != kernel_object); + vm_page_check_pageable_safe(m); + assert(m->phys_page != vm_page_guard_addr); #if DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif + assert( !(m->absent && !m->unusual)); - VM_PAGE_QUEUES_REMOVE(m); + /* + * if this page is currently on the pageout queue, we can't do the + * vm_page_queues_remove (which doesn't handle the pageout queue case) + * and we can't remove it manually since we would need the object lock + * (which is not required here) to decrement the activity_in_progress + * reference which is held on the object while the page is in the pageout queue... + * just let the normal laundry processing proceed + */ + if (m->laundry || m->pageout_queue || m->private || m->fictitious || m->compressor) + return; + + vm_page_queues_remove(m); if ( !VM_PAGE_WIRED(m)) { mach_timespec_t ts; @@ -2926,8 +3440,8 @@ vm_page_speculate( /* * set the timer to begin a new group */ - aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000; - aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC; + aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000; + aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC; ADD_MACH_TIMESPEC(&aq->age_ts, &ts); } else { @@ -2950,8 +3464,8 @@ vm_page_speculate( if (!queue_empty(&aq->age_q)) vm_page_speculate_ageit(aq); - aq->age_ts.tv_sec = VM_PAGE_SPECULATIVE_Q_AGE_MS / 1000; - aq->age_ts.tv_nsec = (VM_PAGE_SPECULATIVE_Q_AGE_MS % 1000) * 1000 * NSEC_PER_USEC; + aq->age_ts.tv_sec = vm_page_speculative_q_age_ms / 1000; + aq->age_ts.tv_nsec = (vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC; ADD_MACH_TIMESPEC(&aq->age_ts, &ts); } @@ -2959,8 +3473,15 @@ vm_page_speculate( enqueue_tail(&aq->age_q, &m->pageq); m->speculative = TRUE; vm_page_speculative_count++; + if (m->object->internal) { + vm_page_pageable_internal_count++; + } else { + vm_page_pageable_external_count++; + } if (new == TRUE) { + vm_object_lock_assert_exclusive(m->object); + m->object->pages_created++; #if DEVELOPMENT || DEBUG vm_page_speculative_created++; @@ -3021,24 +3542,22 @@ vm_page_lru( #if DEBUG lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); #endif - if (m->active || m->reference) - return; - - if (m->private || (VM_PAGE_WIRED(m))) + /* + * if this page is currently on the pageout queue, we can't do the + * vm_page_queues_remove (which doesn't handle the pageout queue case) + * and we can't remove it manually since we would need the object lock + * (which is not required here) to decrement the activity_in_progress + * reference which is held on the object while the page is in the pageout queue... + * just let the normal laundry processing proceed + */ + if (m->laundry || m->pageout_queue || m->private || m->compressor || (VM_PAGE_WIRED(m))) return; m->no_cache = FALSE; - VM_PAGE_QUEUES_REMOVE(m); - - assert(!m->laundry); - assert(m->pageq.next == NULL && m->pageq.prev == NULL); + vm_page_queues_remove(m); - queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); - m->inactive = TRUE; - - vm_page_inactive_count++; - token_new_pagecount++; + vm_page_enqueue_inactive(m, FALSE); } @@ -3049,8 +3568,14 @@ vm_page_reactivate_all_throttled(void) vm_page_t first_active; vm_page_t m; int extra_active_count; + int extra_internal_count, extra_external_count; + + if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) + return; extra_active_count = 0; + extra_internal_count = 0; + extra_external_count = 0; vm_page_lock_queues(); if (! queue_empty(&vm_page_queue_throttled)) { /* @@ -3063,9 +3588,14 @@ vm_page_reactivate_all_throttled(void) assert(!m->inactive); assert(!m->speculative); assert(!VM_PAGE_WIRED(m)); - if (!m->fictitious) { - extra_active_count++; + + extra_active_count++; + if (m->object->internal) { + extra_internal_count++; + } else { + extra_external_count++; } + m->throttled = FALSE; m->active = TRUE; VM_PAGE_CHECK(m); @@ -3097,6 +3627,8 @@ vm_page_reactivate_all_throttled(void) * Adjust the global page counts. */ vm_page_active_count += extra_active_count; + vm_page_pageable_internal_count += extra_internal_count; + vm_page_pageable_external_count += extra_external_count; vm_page_throttled_count = 0; } assert(vm_page_throttled_count == 0); @@ -3143,6 +3675,7 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) queue_iterate(&lq->vpl_queue, m, vm_page_t, pageq) { VM_PAGE_CHECK(m); + vm_page_check_pageable_safe(m); assert(m->local); assert(!m->active); assert(!m->inactive); @@ -3185,7 +3718,11 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) * Adjust the global page counts. */ vm_page_active_count += lq->vpl_count; + vm_page_pageable_internal_count += lq->vpl_internal_count; + vm_page_pageable_external_count += lq->vpl_external_count; lq->vpl_count = 0; + lq->vpl_internal_count = 0; + lq->vpl_external_count = 0; } assert(queue_empty(&lq->vpl_queue)); @@ -3200,18 +3737,26 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) * * Zero-fill a part of the page. */ +#define PMAP_ZERO_PART_PAGE_IMPLEMENTED void vm_page_part_zero_fill( vm_page_t m, vm_offset_t m_pa, vm_size_t len) { - vm_page_t tmp; +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(m); +#endif + #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED pmap_zero_part_page(m->phys_page, m_pa, len); #else + vm_page_t tmp; while (1) { tmp = vm_page_grab(); if (tmp == VM_PAGE_NULL) { @@ -3246,8 +3791,13 @@ vm_page_zero_fill( XPR(XPR_VM_PAGE, "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n", m->object, m->offset, m, 0,0); - +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(m); +#endif // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */ pmap_zero_page(m->phys_page); @@ -3267,9 +3817,14 @@ vm_page_part_copy( vm_offset_t dst_pa, vm_size_t len) { +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(src_m); VM_PAGE_CHECK(dst_m); - +#endif pmap_copy_part_page(src_m->phys_page, src_pa, dst_m->phys_page, dst_pa, len); } @@ -3297,9 +3852,15 @@ vm_page_copy( src_m->object, src_m->offset, dest_m->object, dest_m->offset, 0); - +#if 0 + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ VM_PAGE_CHECK(src_m); VM_PAGE_CHECK(dest_m); +#endif + vm_object_lock_assert_held(src_m->object); /* * ENCRYPTED SWAP: @@ -3323,6 +3884,17 @@ vm_page_copy( vm_page_copy_cs_validations++; vm_page_validate_cs(src_m); } + + if (vm_page_is_slideable(src_m)) { + boolean_t was_busy = src_m->busy; + src_m->busy = TRUE; + (void) vm_page_slide(src_m, 0); + assert(src_m->busy); + if (!was_busy) { + PAGE_WAKEUP_DONE(src_m); + } + } + /* * Propagate the cs_tainted bit to the copy page. Do not propagate * the cs_validated bit. @@ -3331,7 +3903,8 @@ vm_page_copy( if (dest_m->cs_tainted) { vm_page_copy_cs_tainted++; } - + dest_m->slid = src_m->slid; + dest_m->error = src_m->error; /* sliding src_m might have failed... */ pmap_copy_page(src_m->phys_page, dest_m->phys_page); } @@ -3343,7 +3916,7 @@ _vm_page_print( printf("vm_page %p: \n", p); printf(" pageq: next=%p prev=%p\n", p->pageq.next, p->pageq.prev); printf(" listq: next=%p prev=%p\n", p->listq.next, p->listq.prev); - printf(" next=%p\n", p->next); + printf(" next=%p\n", VM_PAGE_UNPACK_PTR(p->next_m)); printf(" object=%p offset=0x%llx\n", p->object, p->offset); printf(" wire_count=%u\n", p->wire_count); @@ -3381,14 +3954,11 @@ _vm_page_print( (p->unusual ? "" : "!"), (p->encrypted ? "" : "!"), (p->encrypted_cleaning ? "" : "!")); - printf(" %slist_req_pending, %sdump_cleaning, %scs_validated, %scs_tainted, %sno_cache\n", - (p->list_req_pending ? "" : "!"), - (p->dump_cleaning ? "" : "!"), + printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n", (p->cs_validated ? "" : "!"), (p->cs_tainted ? "" : "!"), + (p->cs_nx ? "" : "!"), (p->no_cache ? "" : "!")); - printf(" %szero_fill\n", - (p->zero_fill ? "" : "!")); printf("phys_page=0x%x\n", p->phys_page); } @@ -3412,7 +3982,7 @@ vm_page_verify_contiguous( if (m->phys_page != prev_addr + 1) { printf("m %p prev_addr 0x%lx, current addr 0x%x\n", m, (long)prev_addr, m->phys_page); - printf("pages %p page_count %d\n", pages, page_count); + printf("pages %p page_count %d npages %d\n", pages, page_count, npages); panic("vm_page_verify_contiguous: not contiguous!"); } prev_addr = m->phys_page; @@ -3430,6 +4000,7 @@ vm_page_verify_contiguous( /* * Check the free lists for proper length etc. */ +static boolean_t vm_page_verify_this_free_list_enabled = FALSE; static unsigned int vm_page_verify_free_list( queue_head_t *vm_page_queue, @@ -3442,6 +4013,9 @@ vm_page_verify_free_list( vm_page_t prev_m; boolean_t found_page; + if (! vm_page_verify_this_free_list_enabled) + return 0; + found_page = FALSE; npages = 0; prev_m = (vm_page_t) vm_page_queue; @@ -3449,21 +4023,24 @@ vm_page_verify_free_list( m, vm_page_t, pageq) { + if (m == look_for_page) { found_page = TRUE; } if ((vm_page_t) m->pageq.prev != prev_m) panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n", color, npages, m, m->pageq.prev, prev_m); - if ( ! m->free ) - panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n", - color, npages, m); if ( ! m->busy ) panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n", color, npages, m); - if ( color != (unsigned int) -1 && (m->phys_page & vm_color_mask) != color) - panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n", - color, npages, m, m->phys_page & vm_color_mask, color); + if (color != (unsigned int) -1) { + if ((m->phys_page & vm_color_mask) != color) + panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n", + color, npages, m, m->phys_page & vm_color_mask, color); + if ( ! m->free ) + panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not free\n", + color, npages, m); + } ++npages; prev_m = m; } @@ -3480,13 +4057,12 @@ vm_page_verify_free_list( if (other_color == color) continue; vm_page_verify_free_list(&vm_page_queue_free[other_color], - other_color, look_for_page, FALSE); + other_color, look_for_page, FALSE); } - if (color != (unsigned int) -1) { + if (color == (unsigned int) -1) { vm_page_verify_free_list(&vm_lopage_queue_free, (unsigned int) -1, look_for_page, FALSE); } - panic("vm_page_verify_free_list(color=%u)\n", color); } if (!expect_page && found_page) { @@ -3497,24 +4073,37 @@ vm_page_verify_free_list( return npages; } -static boolean_t vm_page_verify_free_lists_enabled = FALSE; +static boolean_t vm_page_verify_all_free_lists_enabled = FALSE; static void vm_page_verify_free_lists( void ) { unsigned int color, npages, nlopages; + boolean_t toggle = TRUE; - if (! vm_page_verify_free_lists_enabled) + if (! vm_page_verify_all_free_lists_enabled) return; npages = 0; lck_mtx_lock(&vm_page_queue_free_lock); + + if (vm_page_verify_this_free_list_enabled == TRUE) { + /* + * This variable has been set globally for extra checking of + * each free list Q. Since we didn't set it, we don't own it + * and we shouldn't toggle it. + */ + toggle = FALSE; + } + + if (toggle == TRUE) { + vm_page_verify_this_free_list_enabled = TRUE; + } for( color = 0; color < vm_colors; color++ ) { npages += vm_page_verify_free_list(&vm_page_queue_free[color], - color, VM_PAGE_NULL, FALSE); + color, VM_PAGE_NULL, FALSE); } - nlopages = vm_page_verify_free_list(&vm_lopage_queue_free, (unsigned int) -1, VM_PAGE_NULL, FALSE); @@ -3522,6 +4111,11 @@ vm_page_verify_free_lists( void ) panic("vm_page_verify_free_lists: " "npages %u free_count %d nlopages %u lo_free_count %u", npages, vm_page_free_count, nlopages, vm_lopage_free_count); + + if (toggle == TRUE) { + vm_page_verify_this_free_list_enabled = FALSE; + } + lck_mtx_unlock(&vm_page_queue_free_lock); } @@ -3530,6 +4124,9 @@ vm_page_queues_assert( vm_page_t mem, int val) { +#if DEBUG + lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); +#endif if (mem->free + mem->active + mem->inactive + mem->speculative + mem->throttled + mem->pageout_queue > (val)) { _vm_page_print(mem); @@ -3540,11 +4137,17 @@ vm_page_queues_assert( assert(!mem->inactive); assert(!mem->speculative); assert(!mem->throttled); + assert(!mem->pageout_queue); } } #endif /* MACH_ASSERT */ + + + +extern boolean_t (* volatile consider_buffer_cache_collect)(int); + /* * CONTIGUOUS PAGE ALLOCATION * @@ -3627,27 +4230,34 @@ vm_page_find_contiguous( unsigned int idx_last_contig_page_found = 0; int free_considered, free_available; int substitute_needed; - boolean_t wrapped; + boolean_t wrapped, zone_gc_called = FALSE; #if DEBUG clock_sec_t tv_start_sec, tv_end_sec; clock_usec_t tv_start_usec, tv_end_usec; #endif -#if MACH_ASSERT + int yielded = 0; int dumped_run = 0; int stolen_pages = 0; -#endif + int compressed_pages = 0; + if (contig_pages == 0) return VM_PAGE_NULL; +full_scan_again: + #if MACH_ASSERT vm_page_verify_free_lists(); #endif #if DEBUG clock_get_system_microtime(&tv_start_sec, &tv_start_usec); #endif + PAGE_REPLACEMENT_ALLOWED(TRUE); + vm_page_lock_queues(); + + lck_mtx_lock(&vm_page_queue_free_lock); RESET_STATE_OF_RUN(); @@ -3690,27 +4300,16 @@ retry: /* no more low pages... */ break; } - if ( !(flags & KMA_LOMEM) && m->phys_page <= vm_lopage_poolend && - m->phys_page >= vm_lopage_poolstart) { - /* - * don't want to take pages from our - * reserved pool of low memory - * so don't consider it which - * means starting a new run - */ - RESET_STATE_OF_RUN(); - - } else if (!npages && ((m->phys_page & pnum_mask) != 0)) { + if (!npages & ((m->phys_page & pnum_mask) != 0)) { /* * not aligned */ RESET_STATE_OF_RUN(); } else if (VM_PAGE_WIRED(m) || m->gobbled || - m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted || - m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious || - m->cleaning || m->overwriting || m->restart || m->unusual || m->list_req_pending || - m->pageout) { + m->encrypted_cleaning || + m->pageout_queue || m->laundry || m->wanted || + m->cleaning || m->overwriting || m->pageout) { /* * page is in a transient state * or a state we don't want to deal @@ -3719,9 +4318,10 @@ retry: */ RESET_STATE_OF_RUN(); - } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) { + } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled && !m->compressor) { /* * page needs to be on one of our queues + * or it needs to belong to the compressor pool * in order for it to be stable behind the * locks we hold at this point... * if not, don't consider it which @@ -3772,7 +4372,7 @@ retry: * into a substitute page. */ #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL - if (m->pmapped || m->dirty) { + if (m->pmapped || m->dirty || m->precious) { substitute_needed++; } #else @@ -3807,12 +4407,16 @@ retry: } did_consider: if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) { - + + PAGE_REPLACEMENT_ALLOWED(FALSE); + lck_mtx_unlock(&vm_page_queue_free_lock); vm_page_unlock_queues(); mutex_pause(0); + PAGE_REPLACEMENT_ALLOWED(TRUE); + vm_page_lock_queues(); lck_mtx_lock(&vm_page_queue_free_lock); @@ -3823,9 +4427,9 @@ did_consider: */ free_available = vm_page_free_count - vm_page_free_reserved; considered = 0; -#if MACH_ASSERT + yielded++; -#endif + goto retry; } considered++; @@ -3880,46 +4484,21 @@ did_consider: #endif if (m1->free) { - if ( m1->phys_page <= vm_lopage_poolend && - m1->phys_page >= vm_lopage_poolstart) { - - assert( flags & KMA_LOMEM ); -#if MACH_ASSERT - vm_page_verify_free_list(&vm_lopage_queue_free, - (unsigned int) -1, m1, TRUE); -#endif - queue_remove(&vm_lopage_queue_free, - m1, - vm_page_t, - pageq); - vm_lopage_free_count--; - -#if MACH_ASSERT - vm_page_verify_free_list(&vm_lopage_queue_free, - (unsigned int) -1, VM_PAGE_NULL, FALSE); -#endif - } else { - - unsigned int color; + unsigned int color; - color = m1->phys_page & vm_color_mask; -#if MACH_ASSERT - vm_page_verify_free_list(&vm_page_queue_free[color], - color, m1, TRUE); -#endif - queue_remove(&vm_page_queue_free[color], - m1, - vm_page_t, - pageq); - vm_page_free_count--; + color = m1->phys_page & vm_color_mask; #if MACH_ASSERT - vm_page_verify_free_list(&vm_page_queue_free[color], - color, VM_PAGE_NULL, FALSE); + vm_page_verify_free_list(&vm_page_queue_free[color], color, m1, TRUE); #endif - } - + queue_remove(&vm_page_queue_free[color], + m1, + vm_page_t, + pageq); m1->pageq.next = NULL; m1->pageq.prev = NULL; +#if MACH_ASSERT + vm_page_verify_free_list(&vm_page_queue_free[color], color, VM_PAGE_NULL, FALSE); +#endif /* * Clear the "free" bit so that this page * does not get considered for another @@ -3927,14 +4506,10 @@ did_consider: */ m1->free = FALSE; assert(m1->busy); + + vm_page_free_count--; } } - /* - * adjust global freelist counts - */ - if (vm_page_free_count < vm_page_free_count_minimum) - vm_page_free_count_minimum = vm_page_free_count; - if( flags & KMA_LOMEM) vm_page_lomem_find_contiguous_last_idx = page_idx; else @@ -3960,6 +4535,7 @@ did_consider: m1 = &vm_pages[cur_idx--]; assert(!m1->free); + if (m1->object == VM_OBJECT_NULL) { /* * page has already been removed from @@ -3971,6 +4547,8 @@ did_consider: assert(!m1->laundry); } else { vm_object_t object; + int refmod; + boolean_t disconnected, reusable; if (abort_run == TRUE) continue; @@ -3987,9 +4565,9 @@ did_consider: } if (locked_object == VM_OBJECT_NULL || (VM_PAGE_WIRED(m1) || m1->gobbled || - m1->encrypted || m1->encrypted_cleaning || m1->cs_validated || m1->cs_tainted || - m1->error || m1->absent || m1->pageout_queue || m1->laundry || m1->wanted || m1->precious || - m1->cleaning || m1->overwriting || m1->restart || m1->unusual || m1->list_req_pending || m1->busy)) { + m1->encrypted_cleaning || + m1->pageout_queue || m1->laundry || m1->wanted || + m1->cleaning || m1->overwriting || m1->pageout || m1->busy)) { if (locked_object) { vm_object_unlock(locked_object); @@ -3999,8 +4577,31 @@ did_consider: abort_run = TRUE; continue; } - if (m1->pmapped || m1->dirty) { - int refmod; + + disconnected = FALSE; + reusable = FALSE; + + if ((m1->reusable || + m1->object->all_reusable) && + m1->inactive && + !m1->dirty && + !m1->reference) { + /* reusable page... */ + refmod = pmap_disconnect(m1->phys_page); + disconnected = TRUE; + if (refmod == 0) { + /* + * ... not reused: can steal + * without relocating contents. + */ + reusable = TRUE; + } + } + + if ((m1->pmapped && + ! reusable) || + m1->dirty || + m1->precious) { vm_object_offset_t offset; m2 = vm_page_grab(); @@ -4014,19 +4615,81 @@ did_consider: abort_run = TRUE; continue; } - if (m1->pmapped) - refmod = pmap_disconnect(m1->phys_page); - else - refmod = 0; - vm_page_copy(m1, m2); - - m2->reference = m1->reference; - m2->dirty = m1->dirty; + if (! disconnected) { + if (m1->pmapped) + refmod = pmap_disconnect(m1->phys_page); + else + refmod = 0; + } + + /* copy the page's contents */ + pmap_copy_page(m1->phys_page, m2->phys_page); + /* copy the page's state */ + assert(!VM_PAGE_WIRED(m1)); + assert(!m1->free); + assert(!m1->pageout_queue); + assert(!m1->laundry); + m2->reference = m1->reference; + assert(!m1->gobbled); + assert(!m1->private); + m2->no_cache = m1->no_cache; + m2->xpmapped = 0; + assert(!m1->busy); + assert(!m1->wanted); + assert(!m1->fictitious); + m2->pmapped = m1->pmapped; /* should flush cache ? */ + m2->wpmapped = m1->wpmapped; + assert(!m1->pageout); + m2->absent = m1->absent; + m2->error = m1->error; + m2->dirty = m1->dirty; + assert(!m1->cleaning); + m2->precious = m1->precious; + m2->clustered = m1->clustered; + assert(!m1->overwriting); + m2->restart = m1->restart; + m2->unusual = m1->unusual; + m2->encrypted = m1->encrypted; + assert(!m1->encrypted_cleaning); + m2->cs_validated = m1->cs_validated; + m2->cs_tainted = m1->cs_tainted; + m2->cs_nx = m1->cs_nx; + + /* + * If m1 had really been reusable, + * we would have just stolen it, so + * let's not propagate it's "reusable" + * bit and assert that m2 is not + * marked as "reusable". + */ + // m2->reusable = m1->reusable; + assert(!m2->reusable); + + assert(!m1->lopage); + m2->slid = m1->slid; + m2->compressor = m1->compressor; + + /* + * page may need to be flushed if + * it is marshalled into a UPL + * that is going to be used by a device + * that doesn't support coherency + */ + m2->written_by_kernel = TRUE; + + /* + * make sure we clear the ref/mod state + * from the pmap layer... else we risk + * inheriting state from the last time + * this page was used... + */ + pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED); if (refmod & VM_MEM_REFERENCED) m2->reference = TRUE; - if (refmod & VM_MEM_MODIFIED) - m2->dirty = TRUE; + if (refmod & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(m2, TRUE); + } offset = m1->offset; /* @@ -4039,25 +4702,31 @@ did_consider: vm_page_free_prepare(m1); /* - * make sure we clear the ref/mod state - * from the pmap layer... else we risk - * inheriting state from the last time - * this page was used... - */ - pmap_clear_refmod(m2->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED); - /* - * now put the substitute page on the object + * now put the substitute page + * on the object */ - vm_page_insert_internal(m2, locked_object, offset, TRUE, TRUE); + vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL); + + if (m2->compressor) { + m2->pmapped = TRUE; + m2->wpmapped = TRUE; + + PMAP_ENTER(kernel_pmap, m2->offset, m2, + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE); - if (m2->reference) - vm_page_activate(m2); - else - vm_page_deactivate(m2); + compressed_pages++; + } else { + if (m2->reference) + vm_page_activate(m2); + else + vm_page_deactivate(m2); + } PAGE_WAKEUP_DONE(m2); } else { + assert(!m1->compressor); + /* * completely cleans up the state * of the page so that it is ready @@ -4067,9 +4736,9 @@ did_consider: */ vm_page_free_prepare(m1); } -#if MACH_ASSERT + stolen_pages++; -#endif + } m1->pageq.next = (queue_entry_t) m; m1->pageq.prev = NULL; @@ -4084,9 +4753,9 @@ did_consider: if (m != VM_PAGE_NULL) { vm_page_free_list(m, FALSE); } -#if MACH_ASSERT + dumped_run++; -#endif + /* * want the index of the last * page in this run that was @@ -4144,6 +4813,8 @@ did_consider: assert(vm_page_verify_contiguous(m, npages)); } done_scanning: + PAGE_REPLACEMENT_ALLOWED(FALSE); + vm_page_unlock_queues(); #if DEBUG @@ -4160,16 +4831,33 @@ done_scanning: tv_end_sec -= 1000000; } if (vm_page_find_contig_debug) { - printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages\n", - __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, - (long)tv_end_sec, tv_end_usec, orig_last_idx, - scanned, yielded, dumped_run, stolen_pages); + printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n", + __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, + (long)tv_end_sec, tv_end_usec, orig_last_idx, + scanned, yielded, dumped_run, stolen_pages, compressed_pages); } #endif #if MACH_ASSERT vm_page_verify_free_lists(); #endif + if (m == NULL && zone_gc_called == FALSE) { + printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n", + __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, + scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count); + + if (consider_buffer_cache_collect != NULL) { + (void)(*consider_buffer_cache_collect)(1); + } + + consider_zone_gc(TRUE); + + zone_gc_called = TRUE; + + printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count); + goto full_scan_again; + } + return m; } @@ -4188,7 +4876,7 @@ cpm_allocate( vm_page_t pages; unsigned int npages; - if (size % page_size != 0) + if (size % PAGE_SIZE != 0) return KERN_INVALID_ARGUMENT; npages = (unsigned int) (size / PAGE_SIZE); @@ -4210,28 +4898,12 @@ cpm_allocate( * determine need for wakeups */ if ((vm_page_free_count < vm_page_free_min) || - ((vm_page_free_count < vm_page_free_target) && - ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min))) - thread_wakeup((event_t) &vm_page_free_wanted); + ((vm_page_free_count < vm_page_free_target) && + ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_min))) + thread_wakeup((event_t) &vm_page_free_wanted); -#if CONFIG_EMBEDDED - { - int percent_avail; - - /* - * Decide if we need to poke the memorystatus notification thread. - */ - percent_avail = - (vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count + vm_page_free_count + - (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 / - atop_64(max_mem); - if (percent_avail <= (kern_memorystatus_level - 5)) { - kern_memorystatus_level = percent_avail; - thread_wakeup((event_t)&kern_memorystatus_wakeup); - } - } -#endif + VM_CHECK_MEMORYSTATUS; + /* * The CPM pages should now be available and * ordered by ascending physical address. @@ -4241,64 +4913,703 @@ cpm_allocate( *list = pages; return KERN_SUCCESS; } - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if HIBERNATION -static vm_page_t hibernate_gobble_queue; +unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT; -static void -hibernate_page_list_zero(hibernate_page_list_t *list) +/* + * when working on a 'run' of pages, it is necessary to hold + * the vm_page_queue_lock (a hot global lock) for certain operations + * on the page... however, the majority of the work can be done + * while merely holding the object lock... in fact there are certain + * collections of pages that don't require any work brokered by the + * vm_page_queue_lock... to mitigate the time spent behind the global + * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT + * while doing all of the work that doesn't require the vm_page_queue_lock... + * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the + * necessary work for each page... we will grab the busy bit on the page + * if it's not already held so that vm_page_do_delayed_work can drop the object lock + * if it can't immediately take the vm_page_queue_lock in order to compete + * for the locks in the same order that vm_pageout_scan takes them. + * the operation names are modeled after the names of the routines that + * need to be called in order to make the changes very obvious in the + * original loop + */ + +void +vm_page_do_delayed_work( + vm_object_t object, + vm_tag_t tag, + struct vm_page_delayed_work *dwp, + int dw_count) { - uint32_t bank; - hibernate_bitmap_t * bitmap; + int j; + vm_page_t m; + vm_page_t local_free_q = VM_PAGE_NULL; - bitmap = &list->bank_bitmap[0]; - for (bank = 0; bank < list->bank_count; bank++) - { - uint32_t last_bit; + /* + * pageout_scan takes the vm_page_lock_queues first + * then tries for the object lock... to avoid what + * is effectively a lock inversion, we'll go to the + * trouble of taking them in that same order... otherwise + * if this object contains the majority of the pages resident + * in the UBC (or a small set of large objects actively being + * worked on contain the majority of the pages), we could + * cause the pageout_scan thread to 'starve' in its attempt + * to find pages to move to the free queue, since it has to + * successfully acquire the object lock of any candidate page + * before it can steal/clean it. + */ + if (!vm_page_trylockspin_queues()) { + vm_object_unlock(object); - bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); - // set out-of-bound bits at end of bitmap. - last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31); - if (last_bit) - bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit); + vm_page_lockspin_queues(); - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; - } -} + for (j = 0; ; j++) { + if (!vm_object_lock_avoid(object) && + _vm_object_lock_try(object)) + break; + vm_page_unlock_queues(); + mutex_pause(j); + vm_page_lockspin_queues(); + } + } + for (j = 0; j < dw_count; j++, dwp++) { -void -hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time) -{ - uint32_t i; - vm_page_t m; - uint64_t start, end, timeout, nsec; - clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout); - clock_get_uptime(&start); + m = dwp->dw_m; - for (i = 0; i < gobble_count; i++) - { - while (VM_PAGE_NULL == (m = vm_page_grab())) - { - clock_get_uptime(&end); - if (end >= timeout) - break; - VM_PAGE_WAIT(); + if (dwp->dw_mask & DW_vm_pageout_throttle_up) + vm_pageout_throttle_up(m); +#if CONFIG_PHANTOM_CACHE + if (dwp->dw_mask & DW_vm_phantom_cache_update) + vm_phantom_cache_update(m); +#endif + if (dwp->dw_mask & DW_vm_page_wire) + vm_page_wire(m, tag, FALSE); + else if (dwp->dw_mask & DW_vm_page_unwire) { + boolean_t queueit; + + queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE; + + vm_page_unwire(m, queueit); + } + if (dwp->dw_mask & DW_vm_page_free) { + vm_page_free_prepare_queues(m); + + assert(m->pageq.next == NULL && m->pageq.prev == NULL); + /* + * Add this page to our list of reclaimed pages, + * to be freed later. + */ + m->pageq.next = (queue_entry_t) local_free_q; + local_free_q = m; + } else { + if (dwp->dw_mask & DW_vm_page_deactivate_internal) + vm_page_deactivate_internal(m, FALSE); + else if (dwp->dw_mask & DW_vm_page_activate) { + if (m->active == FALSE) { + vm_page_activate(m); + } + } + else if (dwp->dw_mask & DW_vm_page_speculate) + vm_page_speculate(m, TRUE); + else if (dwp->dw_mask & DW_enqueue_cleaned) { + /* + * if we didn't hold the object lock and did this, + * we might disconnect the page, then someone might + * soft fault it back in, then we would put it on the + * cleaned queue, and so we would have a referenced (maybe even dirty) + * page on that queue, which we don't want + */ + int refmod_state = pmap_disconnect(m->phys_page); + + if ((refmod_state & VM_MEM_REFERENCED)) { + /* + * this page has been touched since it got cleaned; let's activate it + * if it hasn't already been + */ + vm_pageout_enqueued_cleaned++; + vm_pageout_cleaned_reactivated++; + vm_pageout_cleaned_commit_reactivated++; + + if (m->active == FALSE) + vm_page_activate(m); + } else { + m->reference = FALSE; + vm_page_enqueue_cleaned(m); + } + } + else if (dwp->dw_mask & DW_vm_page_lru) + vm_page_lru(m); + else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) { + if ( !m->pageout_queue) + vm_page_queues_remove(m); + } + if (dwp->dw_mask & DW_set_reference) + m->reference = TRUE; + else if (dwp->dw_mask & DW_clear_reference) + m->reference = FALSE; + + if (dwp->dw_mask & DW_move_page) { + if ( !m->pageout_queue) { + vm_page_queues_remove(m); + + assert(m->object != kernel_object); + + vm_page_enqueue_inactive(m, FALSE); + } + } + if (dwp->dw_mask & DW_clear_busy) + m->busy = FALSE; + + if (dwp->dw_mask & DW_PAGE_WAKEUP) + PAGE_WAKEUP(m); + } } - if (!m) - break; - m->busy = FALSE; - vm_page_gobble(m); + vm_page_unlock_queues(); - m->pageq.next = (queue_entry_t) hibernate_gobble_queue; - hibernate_gobble_queue = m; - } + if (local_free_q) + vm_page_free_list(local_free_q, TRUE); + + VM_CHECK_MEMORYSTATUS; - clock_get_uptime(&end); - absolutetime_to_nanoseconds(end - start, &nsec); - HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL); +} + +kern_return_t +vm_page_alloc_list( + int page_count, + int flags, + vm_page_t *list) +{ + vm_page_t lo_page_list = VM_PAGE_NULL; + vm_page_t mem; + int i; + + if ( !(flags & KMA_LOMEM)) + panic("vm_page_alloc_list: called w/o KMA_LOMEM"); + + for (i = 0; i < page_count; i++) { + + mem = vm_page_grablo(); + + if (mem == VM_PAGE_NULL) { + if (lo_page_list) + vm_page_free_list(lo_page_list, FALSE); + + *list = VM_PAGE_NULL; + + return (KERN_RESOURCE_SHORTAGE); + } + mem->pageq.next = (queue_entry_t) lo_page_list; + lo_page_list = mem; + } + *list = lo_page_list; + + return (KERN_SUCCESS); +} + +void +vm_page_set_offset(vm_page_t page, vm_object_offset_t offset) +{ + page->offset = offset; +} + +vm_page_t +vm_page_get_next(vm_page_t page) +{ + return ((vm_page_t) page->pageq.next); +} + +vm_object_offset_t +vm_page_get_offset(vm_page_t page) +{ + return (page->offset); +} + +ppnum_t +vm_page_get_phys_page(vm_page_t page) +{ + return (page->phys_page); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if HIBERNATION + +static vm_page_t hibernate_gobble_queue; + +static int hibernate_drain_pageout_queue(struct vm_pageout_queue *); +static int hibernate_flush_dirty_pages(int); +static int hibernate_flush_queue(queue_head_t *, int); + +void hibernate_flush_wait(void); +void hibernate_mark_in_progress(void); +void hibernate_clear_in_progress(void); + +void hibernate_free_range(int, int); +void hibernate_hash_insert_page(vm_page_t); +uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *); +void hibernate_rebuild_vm_structs(void); +uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *); +ppnum_t hibernate_lookup_paddr(unsigned int); + +struct hibernate_statistics { + int hibernate_considered; + int hibernate_reentered_on_q; + int hibernate_found_dirty; + int hibernate_skipped_cleaning; + int hibernate_skipped_transient; + int hibernate_skipped_precious; + int hibernate_skipped_external; + int hibernate_queue_nolock; + int hibernate_queue_paused; + int hibernate_throttled; + int hibernate_throttle_timeout; + int hibernate_drained; + int hibernate_drain_timeout; + int cd_lock_failed; + int cd_found_precious; + int cd_found_wired; + int cd_found_busy; + int cd_found_unusual; + int cd_found_cleaning; + int cd_found_laundry; + int cd_found_dirty; + int cd_found_xpmapped; + int cd_skipped_xpmapped; + int cd_local_free; + int cd_total_free; + int cd_vm_page_wire_count; + int cd_vm_struct_pages_unneeded; + int cd_pages; + int cd_discarded; + int cd_count_wire; +} hibernate_stats; + + +/* + * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image + * so that we don't overrun the estimated image size, which would + * result in a hibernation failure. + */ +#define HIBERNATE_XPMAPPED_LIMIT 40000 + + +static int +hibernate_drain_pageout_queue(struct vm_pageout_queue *q) +{ + wait_result_t wait_result; + + vm_page_lock_queues(); + + while ( !queue_empty(&q->pgo_pending) ) { + + q->pgo_draining = TRUE; + + assert_wait_timeout((event_t) (&q->pgo_laundry+1), THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC); + + vm_page_unlock_queues(); + + wait_result = thread_block(THREAD_CONTINUE_NULL); + + if (wait_result == THREAD_TIMED_OUT && !queue_empty(&q->pgo_pending)) { + hibernate_stats.hibernate_drain_timeout++; + + if (q == &vm_pageout_queue_external) + return (0); + + return (1); + } + vm_page_lock_queues(); + + hibernate_stats.hibernate_drained++; + } + vm_page_unlock_queues(); + + return (0); +} + + +boolean_t hibernate_skip_external = FALSE; + +static int +hibernate_flush_queue(queue_head_t *q, int qcount) +{ + vm_page_t m; + vm_object_t l_object = NULL; + vm_object_t m_object = NULL; + int refmod_state = 0; + int try_failed_count = 0; + int retval = 0; + int current_run = 0; + struct vm_pageout_queue *iq; + struct vm_pageout_queue *eq; + struct vm_pageout_queue *tq; + + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0); + + iq = &vm_pageout_queue_internal; + eq = &vm_pageout_queue_external; + + vm_page_lock_queues(); + + while (qcount && !queue_empty(q)) { + + if (current_run++ == 1000) { + if (hibernate_should_abort()) { + retval = 1; + break; + } + current_run = 0; + } + + m = (vm_page_t) queue_first(q); + m_object = m->object; + + /* + * check to see if we currently are working + * with the same object... if so, we've + * already got the lock + */ + if (m_object != l_object) { + /* + * the object associated with candidate page is + * different from the one we were just working + * with... dump the lock if we still own it + */ + if (l_object != NULL) { + vm_object_unlock(l_object); + l_object = NULL; + } + /* + * Try to lock object; since we've alread got the + * page queues lock, we can only 'try' for this one. + * if the 'try' fails, we need to do a mutex_pause + * to allow the owner of the object lock a chance to + * run... + */ + if ( !vm_object_lock_try_scan(m_object)) { + + if (try_failed_count > 20) { + hibernate_stats.hibernate_queue_nolock++; + + goto reenter_pg_on_q; + } + + vm_page_unlock_queues(); + mutex_pause(try_failed_count++); + vm_page_lock_queues(); + + hibernate_stats.hibernate_queue_paused++; + continue; + } else { + l_object = m_object; + } + } + if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) { + /* + * page is not to be cleaned + * put it back on the head of its queue + */ + if (m->cleaning) + hibernate_stats.hibernate_skipped_cleaning++; + else + hibernate_stats.hibernate_skipped_transient++; + + goto reenter_pg_on_q; + } + if (m_object->copy == VM_OBJECT_NULL) { + if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) { + /* + * let the normal hibernate image path + * deal with these + */ + goto reenter_pg_on_q; + } + } + if ( !m->dirty && m->pmapped) { + refmod_state = pmap_get_refmod(m->phys_page); + + if ((refmod_state & VM_MEM_MODIFIED)) { + SET_PAGE_DIRTY(m, FALSE); + } + } else + refmod_state = 0; + + if ( !m->dirty) { + /* + * page is not to be cleaned + * put it back on the head of its queue + */ + if (m->precious) + hibernate_stats.hibernate_skipped_precious++; + + goto reenter_pg_on_q; + } + + if (hibernate_skip_external == TRUE && !m_object->internal) { + + hibernate_stats.hibernate_skipped_external++; + + goto reenter_pg_on_q; + } + tq = NULL; + + if (m_object->internal) { + if (VM_PAGE_Q_THROTTLED(iq)) + tq = iq; + } else if (VM_PAGE_Q_THROTTLED(eq)) + tq = eq; + + if (tq != NULL) { + wait_result_t wait_result; + int wait_count = 5; + + if (l_object != NULL) { + vm_object_unlock(l_object); + l_object = NULL; + } + + while (retval == 0) { + + tq->pgo_throttled = TRUE; + + assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC); + + vm_page_unlock_queues(); + + wait_result = thread_block(THREAD_CONTINUE_NULL); + + vm_page_lock_queues(); + + if (wait_result != THREAD_TIMED_OUT) + break; + if (!VM_PAGE_Q_THROTTLED(tq)) + break; + + if (hibernate_should_abort()) + retval = 1; + + if (--wait_count == 0) { + + hibernate_stats.hibernate_throttle_timeout++; + + if (tq == eq) { + hibernate_skip_external = TRUE; + break; + } + retval = 1; + } + } + if (retval) + break; + + hibernate_stats.hibernate_throttled++; + + continue; + } + /* + * we've already factored out pages in the laundry which + * means this page can't be on the pageout queue so it's + * safe to do the vm_page_queues_remove + */ + assert(!m->pageout_queue); + + vm_page_queues_remove(m); + + if (COMPRESSED_PAGER_IS_ACTIVE && m_object->internal == TRUE) + pmap_disconnect_options(m->phys_page, PMAP_OPTIONS_COMPRESSOR, NULL); + + (void)vm_pageout_cluster(m, FALSE, FALSE, FALSE); + + hibernate_stats.hibernate_found_dirty++; + + goto next_pg; + +reenter_pg_on_q: + queue_remove(q, m, vm_page_t, pageq); + queue_enter(q, m, vm_page_t, pageq); + + hibernate_stats.hibernate_reentered_on_q++; +next_pg: + hibernate_stats.hibernate_considered++; + + qcount--; + try_failed_count = 0; + } + if (l_object != NULL) { + vm_object_unlock(l_object); + l_object = NULL; + } + + vm_page_unlock_queues(); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0); + + return (retval); +} + + +static int +hibernate_flush_dirty_pages(int pass) +{ + struct vm_speculative_age_q *aq; + uint32_t i; + + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) + vm_page_reactivate_local(i, TRUE, FALSE); + } + + for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) { + int qcount; + vm_page_t m; + + aq = &vm_page_queue_speculative[i]; + + if (queue_empty(&aq->age_q)) + continue; + qcount = 0; + + vm_page_lockspin_queues(); + + queue_iterate(&aq->age_q, + m, + vm_page_t, + pageq) + { + qcount++; + } + vm_page_unlock_queues(); + + if (qcount) { + if (hibernate_flush_queue(&aq->age_q, qcount)) + return (1); + } + } + if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) + return (1); + if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) + return (1); + if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) + return (1); + if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) + return (1); + + if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1) + vm_compressor_record_warmup_start(); + + if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) { + if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1) + vm_compressor_record_warmup_end(); + return (1); + } + if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) { + if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1) + vm_compressor_record_warmup_end(); + return (1); + } + if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1) + vm_compressor_record_warmup_end(); + + if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) + return (1); + + return (0); +} + + +void +hibernate_reset_stats() +{ + bzero(&hibernate_stats, sizeof(struct hibernate_statistics)); +} + + +int +hibernate_flush_memory() +{ + int retval; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0); + + hibernate_cleaning_in_progress = TRUE; + hibernate_skip_external = FALSE; + + if ((retval = hibernate_flush_dirty_pages(1)) == 0) { + + if (COMPRESSED_PAGER_IS_ACTIVE) { + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0); + + vm_compressor_flush(); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0); + } + if (consider_buffer_cache_collect != NULL) { + unsigned int orig_wire_count; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0); + orig_wire_count = vm_page_wire_count; + + (void)(*consider_buffer_cache_collect)(1); + consider_zone_gc(TRUE); + + HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count); + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0); + } + } + hibernate_cleaning_in_progress = FALSE; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0); + + if (retval && COMPRESSED_PAGER_IS_ACTIVE) + HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT); + + + HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n", + hibernate_stats.hibernate_considered, + hibernate_stats.hibernate_reentered_on_q, + hibernate_stats.hibernate_found_dirty); + HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n", + hibernate_stats.hibernate_skipped_cleaning, + hibernate_stats.hibernate_skipped_transient, + hibernate_stats.hibernate_skipped_precious, + hibernate_stats.hibernate_skipped_external, + hibernate_stats.hibernate_queue_nolock); + HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n", + hibernate_stats.hibernate_queue_paused, + hibernate_stats.hibernate_throttled, + hibernate_stats.hibernate_throttle_timeout, + hibernate_stats.hibernate_drained, + hibernate_stats.hibernate_drain_timeout); + + return (retval); +} + + +static void +hibernate_page_list_zero(hibernate_page_list_t *list) +{ + uint32_t bank; + hibernate_bitmap_t * bitmap; + + bitmap = &list->bank_bitmap[0]; + for (bank = 0; bank < list->bank_count; bank++) + { + uint32_t last_bit; + + bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); + // set out-of-bound bits at end of bitmap. + last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31); + if (last_bit) + bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit); + + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; + } } void @@ -4322,7 +5633,7 @@ hibernate_free_gobble_pages(void) } static boolean_t -hibernate_consider_discard(vm_page_t m) +hibernate_consider_discard(vm_page_t m, boolean_t preflight) { vm_object_t object = NULL; int refmod_state; @@ -4330,45 +5641,54 @@ hibernate_consider_discard(vm_page_t m) do { - if(m->private) + if (m->private) panic("hibernate_consider_discard: private"); - if (!vm_object_lock_try(m->object)) + if (!vm_object_lock_try(m->object)) { + if (!preflight) hibernate_stats.cd_lock_failed++; break; - + } object = m->object; - if (VM_PAGE_WIRED(m)) + if (VM_PAGE_WIRED(m)) { + if (!preflight) hibernate_stats.cd_found_wired++; break; - if (m->precious) + } + if (m->precious) { + if (!preflight) hibernate_stats.cd_found_precious++; break; - - if (m->busy || !object->alive) + } + if (m->busy || !object->alive) { /* * Somebody is playing with this page. */ + if (!preflight) hibernate_stats.cd_found_busy++; break; - - if (m->absent || m->unusual || m->error) + } + if (m->absent || m->unusual || m->error) { /* * If it's unusual in anyway, ignore it */ + if (!preflight) hibernate_stats.cd_found_unusual++; break; - - if (m->cleaning) + } + if (m->cleaning) { + if (!preflight) hibernate_stats.cd_found_cleaning++; break; - - if (m->laundry || m->list_req_pending) + } + if (m->laundry) { + if (!preflight) hibernate_stats.cd_found_laundry++; break; - + } if (!m->dirty) { refmod_state = pmap_get_refmod(m->phys_page); if (refmod_state & VM_MEM_REFERENCED) m->reference = TRUE; - if (refmod_state & VM_MEM_MODIFIED) - m->dirty = TRUE; + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(m, FALSE); + } } /* @@ -4376,7 +5696,22 @@ hibernate_consider_discard(vm_page_t m) */ discard = (!m->dirty) || (VM_PURGABLE_VOLATILE == object->purgable) - || (VM_PURGABLE_EMPTY == m->object->purgable); + || (VM_PURGABLE_EMPTY == object->purgable); + + + if (discard == FALSE) { + if (!preflight) + hibernate_stats.cd_found_dirty++; + } else if (m->xpmapped && m->reference && !object->internal) { + if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) { + if (!preflight) + hibernate_stats.cd_found_xpmapped++; + discard = FALSE; + } else { + if (!preflight) + hibernate_stats.cd_skipped_xpmapped++; + } + } } while (FALSE); @@ -4396,6 +5731,15 @@ hibernate_discard_page(vm_page_t m) */ return; +#if MACH_ASSERT || DEBUG + vm_object_t object = m->object; + if (!vm_object_lock_try(m->object)) + panic("hibernate_discard_page(%p) !vm_object_lock_try", m); +#else + /* No need to lock page queue for token delete, hibernate_vm_unlock() + makes sure these locks are uncontended before sleep */ +#endif /* MACH_ASSERT || DEBUG */ + if (m->pmapped == TRUE) { __unused int refmod_state = pmap_disconnect(m->phys_page); @@ -4414,13 +5758,66 @@ hibernate_discard_page(vm_page_t m) assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL)); purgeable_q_t old_queue = vm_purgeable_object_remove(m->object); assert(old_queue); - /* No need to lock page queue for token delete, hibernate_vm_unlock() - makes sure these locks are uncontended before sleep */ - vm_purgeable_token_delete_first(old_queue); + if (m->object->purgeable_when_ripe) { + vm_purgeable_token_delete_first(old_queue); + } m->object->purgable = VM_PURGABLE_EMPTY; + + /* + * Purgeable ledgers: pages of VOLATILE and EMPTY objects are + * accounted in the "volatile" ledger, so no change here. + * We have to update vm_page_purgeable_count, though, since we're + * effectively purging this object. + */ + unsigned int delta; + assert(m->object->resident_page_count >= m->object->wired_page_count); + delta = (m->object->resident_page_count - m->object->wired_page_count); + assert(vm_page_purgeable_count >= delta); + assert(delta > 0); + OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count); } vm_page_free(m); + +#if MACH_ASSERT || DEBUG + vm_object_unlock(object); +#endif /* MACH_ASSERT || DEBUG */ +} + +/* + Grab locks for hibernate_page_list_setall() +*/ +void +hibernate_vm_lock_queues(void) +{ + vm_object_lock(compressor_object); + vm_page_lock_queues(); + lck_mtx_lock(&vm_page_queue_free_lock); + + if (vm_page_local_q) { + uint32_t i; + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } + } +} + +void +hibernate_vm_unlock_queues(void) +{ + if (vm_page_local_q) { + uint32_t i; + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } + } + lck_mtx_unlock(&vm_page_queue_free_lock); + vm_page_unlock_queues(); + vm_object_unlock(compressor_object); } /* @@ -4432,46 +5829,110 @@ hibernate_discard_page(vm_page_t m) void hibernate_page_list_setall(hibernate_page_list_t * page_list, hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal, + boolean_t preflight, + boolean_t will_discard, uint32_t * pagesOut) { uint64_t start, end, nsec; vm_page_t m; + vm_page_t next; uint32_t pages = page_list->page_count; - uint32_t count_zf = 0, count_throttled = 0; - uint32_t count_inactive = 0, count_active = 0, count_speculative = 0; + uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0; + uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0; uint32_t count_wire = pages; uint32_t count_discard_active = 0; uint32_t count_discard_inactive = 0; + uint32_t count_discard_cleaned = 0; uint32_t count_discard_purgeable = 0; uint32_t count_discard_speculative = 0; + uint32_t count_discard_vm_struct_pages = 0; uint32_t i; uint32_t bank; hibernate_bitmap_t * bitmap; hibernate_bitmap_t * bitmap_wired; + boolean_t discard_all; + boolean_t discard; + + HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight); + + if (preflight) { + page_list = NULL; + page_list_wired = NULL; + page_list_pal = NULL; + discard_all = FALSE; + } else { + discard_all = will_discard; + } + +#if MACH_ASSERT || DEBUG + if (!preflight) + { + vm_page_lock_queues(); + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } + } + } +#endif /* MACH_ASSERT || DEBUG */ - HIBLOG("hibernate_page_list_setall start\n"); + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0); clock_get_uptime(&start); - hibernate_page_list_zero(page_list); - hibernate_page_list_zero(page_list_wired); + if (!preflight) { + hibernate_page_list_zero(page_list); + hibernate_page_list_zero(page_list_wired); + hibernate_page_list_zero(page_list_pal); + + hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count; + hibernate_stats.cd_pages = pages; + } if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) - vm_page_reactivate_local(i, TRUE, TRUE); + vm_page_reactivate_local(i, TRUE, !preflight); + } + + if (preflight) { + vm_object_lock(compressor_object); + vm_page_lock_queues(); + lck_mtx_lock(&vm_page_queue_free_lock); } m = (vm_page_t) hibernate_gobble_queue; - while(m) + while (m) { pages--; count_wire--; - hibernate_page_bitset(page_list, TRUE, m->phys_page); - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, m->phys_page); + hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + } m = (vm_page_t) m->pageq.next; } + if (!preflight) for( i = 0; i < real_ncpus; i++ ) + { + if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor) + { + for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next) + { + pages--; + count_wire--; + hibernate_page_bitset(page_list, TRUE, m->phys_page); + hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + + hibernate_stats.cd_local_free++; + hibernate_stats.cd_total_free++; + } + } + } + for( i = 0; i < vm_colors; i++ ) { queue_iterate(&vm_page_queue_free[i], @@ -4481,8 +5942,12 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, { pages--; count_wire--; - hibernate_page_bitset(page_list, TRUE, m->phys_page); - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, m->phys_page); + hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + + hibernate_stats.cd_total_free++; + } } } @@ -4493,131 +5958,231 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, { pages--; count_wire--; - hibernate_page_bitset(page_list, TRUE, m->phys_page); - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, m->phys_page); + hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + + hibernate_stats.cd_total_free++; + } } - queue_iterate( &vm_page_queue_throttled, - m, - vm_page_t, - pageq ) + m = (vm_page_t) queue_first(&vm_page_queue_throttled); + while (m && !queue_end(&vm_page_queue_throttled, (queue_entry_t)m)) { + next = (vm_page_t) m->pageq.next; + discard = FALSE; if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m)) + && hibernate_consider_discard(m, preflight)) { - hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); count_discard_inactive++; + discard = discard_all; } else count_throttled++; count_wire--; - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + + if (discard) hibernate_discard_page(m); + m = next; } - queue_iterate( &vm_page_queue_zf, - m, - vm_page_t, - pageq ) + m = (vm_page_t) queue_first(&vm_page_queue_anonymous); + while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m)) { + next = (vm_page_t) m->pageq.next; + discard = FALSE; if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m)) + && hibernate_consider_discard(m, preflight)) { - hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); if (m->dirty) count_discard_purgeable++; else count_discard_inactive++; + discard = discard_all; } else - count_zf++; + count_anonymous++; + count_wire--; + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; + } + + m = (vm_page_t) queue_first(&vm_page_queue_cleaned); + while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) + && hibernate_consider_discard(m, preflight)) + { + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (m->dirty) + count_discard_purgeable++; + else + count_discard_cleaned++; + discard = discard_all; + } + else + count_cleaned++; + count_wire--; + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; + } + + m = (vm_page_t) queue_first(&vm_page_queue_active); + while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + discard = FALSE; + if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) + && hibernate_consider_discard(m, preflight)) + { + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (m->dirty) + count_discard_purgeable++; + else + count_discard_active++; + discard = discard_all; + } + else + count_active++; count_wire--; - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; } - queue_iterate( &vm_page_queue_inactive, - m, - vm_page_t, - pageq ) + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) { + next = (vm_page_t) m->pageq.next; + discard = FALSE; if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m)) + && hibernate_consider_discard(m, preflight)) { - hibernate_page_bitset(page_list, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); if (m->dirty) count_discard_purgeable++; else count_discard_inactive++; + discard = discard_all; } else count_inactive++; count_wire--; - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; } for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) { - queue_iterate(&vm_page_queue_speculative[i].age_q, - m, - vm_page_t, - pageq) - { - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m)) - { - hibernate_page_bitset(page_list, TRUE, m->phys_page); - count_discard_speculative++; - } - else - count_speculative++; - count_wire--; - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); - } + m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q); + while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) + && hibernate_consider_discard(m, preflight)) + { + if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page); + count_discard_speculative++; + discard = discard_all; + } + else + count_speculative++; + count_wire--; + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (discard) hibernate_discard_page(m); + m = next; + } } - queue_iterate( &vm_page_queue_active, - m, - vm_page_t, - pageq ) + queue_iterate(&compressor_object->memq, m, vm_page_t, listq) { - if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) - && hibernate_consider_discard(m)) - { - hibernate_page_bitset(page_list, TRUE, m->phys_page); - if (m->dirty) - count_discard_purgeable++; - else - count_discard_active++; - } - else - count_active++; + count_compressor++; count_wire--; - hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); + if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); } - // pull wired from hibernate_bitmap + if (preflight == FALSE && discard_all == TRUE) { + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START, 0, 0, 0, 0, 0); - bitmap = &page_list->bank_bitmap[0]; - bitmap_wired = &page_list_wired->bank_bitmap[0]; - for (bank = 0; bank < page_list->bank_count; bank++) - { - for (i = 0; i < bitmap->bitmapwords; i++) - bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; - bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; + HIBLOG("hibernate_teardown started\n"); + count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired); + HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages); + + pages -= count_discard_vm_struct_pages; + count_wire -= count_discard_vm_struct_pages; + + hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0); + } + + if (!preflight) { + // pull wired from hibernate_bitmap + bitmap = &page_list->bank_bitmap[0]; + bitmap_wired = &page_list_wired->bank_bitmap[0]; + for (bank = 0; bank < page_list->bank_count; bank++) + { + for (i = 0; i < bitmap->bitmapwords; i++) + bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; + bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; + } } // machine dependent adjustments - hibernate_page_list_setall_machine(page_list, page_list_wired, &pages); + hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages); + + if (!preflight) { + hibernate_stats.cd_count_wire = count_wire; + hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + + count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages; + } clock_get_uptime(&end); absolutetime_to_nanoseconds(end - start, &nsec); HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); - HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n", - pages, count_wire, count_active, count_inactive, count_speculative, count_zf, count_throttled, - count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative); + HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", + pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped, + discard_all ? "did" : "could", + count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); + + if (hibernate_stats.cd_skipped_xpmapped) + HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped); - *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative; + *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned; + + if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active; + +#if MACH_ASSERT || DEBUG + if (!preflight) + { + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } + } + vm_page_unlock_queues(); + } +#endif /* MACH_ASSERT || DEBUG */ + + if (preflight) { + lck_mtx_unlock(&vm_page_queue_free_lock); + vm_page_unlock_queues(); + vm_object_unlock(compressor_object); + } + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0); } void @@ -4630,12 +6195,55 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) uint32_t count_discard_active = 0; uint32_t count_discard_inactive = 0; uint32_t count_discard_purgeable = 0; + uint32_t count_discard_cleaned = 0; uint32_t count_discard_speculative = 0; + +#if MACH_ASSERT || DEBUG + vm_page_lock_queues(); + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } + } +#endif /* MACH_ASSERT || DEBUG */ + clock_get_uptime(&start); - m = (vm_page_t) queue_first(&vm_page_queue_zf); - while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m)) + m = (vm_page_t) queue_first(&vm_page_queue_anonymous); + while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + if (hibernate_page_bittst(page_list, m->phys_page)) + { + if (m->dirty) + count_discard_purgeable++; + else + count_discard_inactive++; + hibernate_discard_page(m); + } + m = next; + } + + for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) + { + m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q); + while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + if (hibernate_page_bittst(page_list, m->phys_page)) + { + count_discard_speculative++; + hibernate_discard_page(m); + } + m = next; + } + } + + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) { next = (vm_page_t) m->pageq.next; if (hibernate_page_bittst(page_list, m->phys_page)) @@ -4649,58 +6257,403 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) m = next; } - for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) - { - m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q); - while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m)) - { - next = (vm_page_t) m->pageq.next; - if (hibernate_page_bittst(page_list, m->phys_page)) - { - count_discard_speculative++; - hibernate_discard_page(m); - } - m = next; - } - } + m = (vm_page_t) queue_first(&vm_page_queue_active); + while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + if (hibernate_page_bittst(page_list, m->phys_page)) + { + if (m->dirty) + count_discard_purgeable++; + else + count_discard_active++; + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t) queue_first(&vm_page_queue_cleaned); + while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m)) + { + next = (vm_page_t) m->pageq.next; + if (hibernate_page_bittst(page_list, m->phys_page)) + { + if (m->dirty) + count_discard_purgeable++; + else + count_discard_cleaned++; + hibernate_discard_page(m); + } + m = next; + } + +#if MACH_ASSERT || DEBUG + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } + } + vm_page_unlock_queues(); +#endif /* MACH_ASSERT || DEBUG */ + + clock_get_uptime(&end); + absolutetime_to_nanoseconds(end - start, &nsec); + HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n", + nsec / 1000000ULL, + count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); +} + +boolean_t hibernate_paddr_map_inited = FALSE; +boolean_t hibernate_rebuild_needed = FALSE; +unsigned int hibernate_teardown_last_valid_compact_indx = -1; +vm_page_t hibernate_rebuild_hash_list = NULL; + +unsigned int hibernate_teardown_found_tabled_pages = 0; +unsigned int hibernate_teardown_found_created_pages = 0; +unsigned int hibernate_teardown_found_free_pages = 0; +unsigned int hibernate_teardown_vm_page_free_count; + + +struct ppnum_mapping { + struct ppnum_mapping *ppnm_next; + ppnum_t ppnm_base_paddr; + unsigned int ppnm_sindx; + unsigned int ppnm_eindx; +}; + +struct ppnum_mapping *ppnm_head; +struct ppnum_mapping *ppnm_last_found = NULL; + + +void +hibernate_create_paddr_map() +{ + unsigned int i; + ppnum_t next_ppnum_in_run = 0; + struct ppnum_mapping *ppnm = NULL; + + if (hibernate_paddr_map_inited == FALSE) { + + for (i = 0; i < vm_pages_count; i++) { + + if (ppnm) + ppnm->ppnm_eindx = i; + + if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) { + + ppnm = kalloc(sizeof(struct ppnum_mapping)); + + ppnm->ppnm_next = ppnm_head; + ppnm_head = ppnm; + + ppnm->ppnm_sindx = i; + ppnm->ppnm_base_paddr = vm_pages[i].phys_page; + } + next_ppnum_in_run = vm_pages[i].phys_page + 1; + } + ppnm->ppnm_eindx++; + + hibernate_paddr_map_inited = TRUE; + } +} + +ppnum_t +hibernate_lookup_paddr(unsigned int indx) +{ + struct ppnum_mapping *ppnm = NULL; + + ppnm = ppnm_last_found; + + if (ppnm) { + if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) + goto done; + } + for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) { + + if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) { + ppnm_last_found = ppnm; + break; + } + } + if (ppnm == NULL) + panic("hibernate_lookup_paddr of %d failed\n", indx); +done: + return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx)); +} + + +uint32_t +hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired) +{ + addr64_t saddr_aligned; + addr64_t eaddr_aligned; + addr64_t addr; + ppnum_t paddr; + unsigned int mark_as_unneeded_pages = 0; + + saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64; + eaddr_aligned = eaddr & ~PAGE_MASK_64; + + for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) { + + paddr = pmap_find_phys(kernel_pmap, addr); + + assert(paddr); + + hibernate_page_bitset(page_list, TRUE, paddr); + hibernate_page_bitset(page_list_wired, TRUE, paddr); + + mark_as_unneeded_pages++; + } + return (mark_as_unneeded_pages); +} + + +void +hibernate_hash_insert_page(vm_page_t mem) +{ + vm_page_bucket_t *bucket; + int hash_id; + + assert(mem->hashed); + assert(mem->object); + assert(mem->offset != (vm_object_offset_t) -1); + + /* + * Insert it into the object_object/offset hash table + */ + hash_id = vm_page_hash(mem->object, mem->offset); + bucket = &vm_page_buckets[hash_id]; + + mem->next_m = bucket->page_list; + bucket->page_list = VM_PAGE_PACK_PTR(mem); +} + + +void +hibernate_free_range(int sindx, int eindx) +{ + vm_page_t mem; + unsigned int color; + + while (sindx < eindx) { + mem = &vm_pages[sindx]; + + vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE); + + mem->lopage = FALSE; + mem->free = TRUE; + + color = mem->phys_page & vm_color_mask; + queue_enter_first(&vm_page_queue_free[color], + mem, + vm_page_t, + pageq); + vm_page_free_count++; + + sindx++; + } +} + + +extern void hibernate_rebuild_pmap_structs(void); + +void +hibernate_rebuild_vm_structs(void) +{ + int cindx, sindx, eindx; + vm_page_t mem, tmem, mem_next; + AbsoluteTime startTime, endTime; + uint64_t nsec; + + if (hibernate_rebuild_needed == FALSE) + return; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0); + HIBLOG("hibernate_rebuild started\n"); + + clock_get_uptime(&startTime); + + hibernate_rebuild_pmap_structs(); + + bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t)); + eindx = vm_pages_count; + + for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) { + + mem = &vm_pages[cindx]; + /* + * hibernate_teardown_vm_structs leaves the location where + * this vm_page_t must be located in "next". + */ + tmem = VM_PAGE_UNPACK_PTR(mem->next_m); + mem->next_m = VM_PAGE_PACK_PTR(NULL); + + sindx = (int)(tmem - &vm_pages[0]); + + if (mem != tmem) { + /* + * this vm_page_t was moved by hibernate_teardown_vm_structs, + * so move it back to its real location + */ + *tmem = *mem; + mem = tmem; + } + if (mem->hashed) + hibernate_hash_insert_page(mem); + /* + * the 'hole' between this vm_page_t and the previous + * vm_page_t we moved needs to be initialized as + * a range of free vm_page_t's + */ + hibernate_free_range(sindx + 1, eindx); + + eindx = sindx; + } + if (sindx) + hibernate_free_range(0, sindx); + + assert(vm_page_free_count == hibernate_teardown_vm_page_free_count); + + /* + * process the list of vm_page_t's that were entered in the hash, + * but were not located in the vm_pages arrary... these are + * vm_page_t's that were created on the fly (i.e. fictitious) + */ + for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) { + mem_next = VM_PAGE_UNPACK_PTR(mem->next_m); + + mem->next_m = VM_PAGE_PACK_PTR(NULL); + hibernate_hash_insert_page(mem); + } + hibernate_rebuild_hash_list = NULL; + + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); + + HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL); + + hibernate_rebuild_needed = FALSE; + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0); +} + + +extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); + +uint32_t +hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired) +{ + unsigned int i; + unsigned int compact_target_indx; + vm_page_t mem, mem_next; + vm_page_bucket_t *bucket; + unsigned int mark_as_unneeded_pages = 0; + unsigned int unneeded_vm_page_bucket_pages = 0; + unsigned int unneeded_vm_pages_pages = 0; + unsigned int unneeded_pmap_pages = 0; + addr64_t start_of_unneeded = 0; + addr64_t end_of_unneeded = 0; + + + if (hibernate_should_abort()) + return (0); + + HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n", + vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, + vm_page_cleaned_count, compressor_object->resident_page_count); + + for (i = 0; i < vm_page_bucket_count; i++) { + + bucket = &vm_page_buckets[i]; + + for (mem = VM_PAGE_UNPACK_PTR(bucket->page_list); mem != VM_PAGE_NULL; mem = mem_next) { + assert(mem->hashed); + + mem_next = VM_PAGE_UNPACK_PTR(mem->next_m); + + if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) { + mem->next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list); + hibernate_rebuild_hash_list = mem; + } + } + } + unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired); + mark_as_unneeded_pages += unneeded_vm_page_bucket_pages; + + hibernate_teardown_vm_page_free_count = vm_page_free_count; + + compact_target_indx = 0; + + for (i = 0; i < vm_pages_count; i++) { + + mem = &vm_pages[i]; + + if (mem->free) { + unsigned int color; + + assert(mem->busy); + assert(!mem->lopage); + + color = mem->phys_page & vm_color_mask; + + queue_remove(&vm_page_queue_free[color], + mem, + vm_page_t, + pageq); + mem->pageq.next = NULL; + mem->pageq.prev = NULL; + + vm_page_free_count--; + + hibernate_teardown_found_free_pages++; + + if ( !vm_pages[compact_target_indx].free) + compact_target_indx = i; + } else { + /* + * record this vm_page_t's original location + * we need this even if it doesn't get moved + * as an indicator to the rebuild function that + * we don't have to move it + */ + mem->next_m = VM_PAGE_PACK_PTR(mem); + + if (vm_pages[compact_target_indx].free) { + /* + * we've got a hole to fill, so + * move this vm_page_t to it's new home + */ + vm_pages[compact_target_indx] = *mem; + mem->free = TRUE; - m = (vm_page_t) queue_first(&vm_page_queue_inactive); - while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) - { - next = (vm_page_t) m->pageq.next; - if (hibernate_page_bittst(page_list, m->phys_page)) - { - if (m->dirty) - count_discard_purgeable++; - else - count_discard_inactive++; - hibernate_discard_page(m); - } - m = next; - } + hibernate_teardown_last_valid_compact_indx = compact_target_indx; + compact_target_indx++; + } else + hibernate_teardown_last_valid_compact_indx = i; + } + } + unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1], + (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired); + mark_as_unneeded_pages += unneeded_vm_pages_pages; - m = (vm_page_t) queue_first(&vm_page_queue_active); - while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) - { - next = (vm_page_t) m->pageq.next; - if (hibernate_page_bittst(page_list, m->phys_page)) - { - if (m->dirty) - count_discard_purgeable++; - else - count_discard_active++; - hibernate_discard_page(m); - } - m = next; - } + hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded); - clock_get_uptime(&end); - absolutetime_to_nanoseconds(end - start, &nsec); - HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n", - nsec / 1000000ULL, - count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative); + if (start_of_unneeded) { + unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired); + mark_as_unneeded_pages += unneeded_pmap_pages; + } + HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages); + + hibernate_rebuild_needed = TRUE; + + return (mark_as_unneeded_pages); } + #endif /* HIBERNATION */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -4741,7 +6694,7 @@ vm_page_info( bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; lck_spin_lock(bucket_lock); - for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next) + for (m = VM_PAGE_UNPACK_PTR(bucket->page_list); m != VM_PAGE_NULL; m = VM_PAGE_UNPACK_PTR(m->next_m)) bucket_count++; lck_spin_unlock(bucket_lock); @@ -4754,63 +6707,653 @@ vm_page_info( } #endif /* MACH_VM_DEBUG */ -#include -#if MACH_KDB +#if VM_PAGE_BUCKETS_CHECK +void +vm_page_buckets_check(void) +{ + unsigned int i; + vm_page_t p; + unsigned int p_hash; + vm_page_bucket_t *bucket; + lck_spin_t *bucket_lock; + + if (!vm_page_buckets_check_ready) { + return; + } + +#if HIBERNATION + if (hibernate_rebuild_needed || + hibernate_rebuild_hash_list) { + panic("BUCKET_CHECK: hibernation in progress: " + "rebuild_needed=%d rebuild_hash_list=%p\n", + hibernate_rebuild_needed, + hibernate_rebuild_hash_list); + } +#endif /* HIBERNATION */ + +#if VM_PAGE_FAKE_BUCKETS + char *cp; + for (cp = (char *) vm_page_fake_buckets_start; + cp < (char *) vm_page_fake_buckets_end; + cp++) { + if (*cp != 0x5a) { + panic("BUCKET_CHECK: corruption at %p in fake buckets " + "[0x%llx:0x%llx]\n", + cp, + (uint64_t) vm_page_fake_buckets_start, + (uint64_t) vm_page_fake_buckets_end); + } + } +#endif /* VM_PAGE_FAKE_BUCKETS */ + + for (i = 0; i < vm_page_bucket_count; i++) { + bucket = &vm_page_buckets[i]; + if (!bucket->page_list) { + continue; + } + + bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; + lck_spin_lock(bucket_lock); + p = VM_PAGE_UNPACK_PTR(bucket->page_list); + while (p != VM_PAGE_NULL) { + if (!p->hashed) { + panic("BUCKET_CHECK: page %p (%p,0x%llx) " + "hash %d in bucket %d at %p " + "is not hashed\n", + p, p->object, p->offset, + p_hash, i, bucket); + } + p_hash = vm_page_hash(p->object, p->offset); + if (p_hash != i) { + panic("BUCKET_CHECK: corruption in bucket %d " + "at %p: page %p object %p offset 0x%llx " + "hash %d\n", + i, bucket, p, p->object, p->offset, + p_hash); + } + p = VM_PAGE_UNPACK_PTR(p->next_m); + } + lck_spin_unlock(bucket_lock); + } -#include -#include -#define printf kdbprintf +// printf("BUCKET_CHECK: checked buckets\n"); +} +#endif /* VM_PAGE_BUCKETS_CHECK */ /* - * Routine: vm_page_print [exported] + * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the + * local queues if they exist... its the only spot in the system where we add pages + * to those queues... once on those queues, those pages can only move to one of the + * global page queues or the free queues... they NEVER move from local q to local q. + * the 'local' state is stable when vm_page_queues_remove is called since we're behind + * the global vm_page_queue_lock at this point... we still need to take the local lock + * in case this operation is being run on a different CPU then the local queue's identity, + * but we don't have to worry about the page moving to a global queue or becoming wired + * while we're grabbing the local lock since those operations would require the global + * vm_page_queue_lock to be held, and we already own it. + * + * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id... + * 'wired' and local are ALWAYS mutually exclusive conditions. */ void -vm_page_print( - db_addr_t db_addr) +vm_page_queues_remove(vm_page_t mem) { - vm_page_t p; + boolean_t was_pageable; + + VM_PAGE_QUEUES_ASSERT(mem, 1); + assert(!mem->pageout_queue); + /* + * if (mem->pageout_queue) + * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue... + * the caller is responsible for determing if the page is on that queue, and if so, must + * either first remove it (it needs both the page queues lock and the object lock to do + * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove + */ + if (mem->local) { + struct vpl *lq; + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->inactive && !mem->speculative); + assert(!mem->active && !mem->throttled); + assert(!mem->clean_queue); + assert(!mem->fictitious); + lq = &vm_page_local_q[mem->local_id].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + queue_remove(&lq->vpl_queue, + mem, vm_page_t, pageq); + mem->local = FALSE; + mem->local_id = 0; + lq->vpl_count--; + if (mem->object->internal) { + lq->vpl_internal_count--; + } else { + lq->vpl_external_count--; + } + VPL_UNLOCK(&lq->vpl_lock); + was_pageable = FALSE; + } + + else if (mem->active) { + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->inactive && !mem->speculative); + assert(!mem->clean_queue); + assert(!mem->throttled); + assert(!mem->fictitious); + queue_remove(&vm_page_queue_active, + mem, vm_page_t, pageq); + mem->active = FALSE; + vm_page_active_count--; + was_pageable = TRUE; + } - p = (vm_page_t) (long) db_addr; + else if (mem->inactive) { + assert(mem->object != kernel_object); + assert(mem->object != compressor_object); + assert(!mem->active && !mem->speculative); + assert(!mem->throttled); + assert(!mem->fictitious); + vm_page_inactive_count--; + if (mem->clean_queue) { + queue_remove(&vm_page_queue_cleaned, + mem, vm_page_t, pageq); + mem->clean_queue = FALSE; + vm_page_cleaned_count--; + } else { + if (mem->object->internal) { + queue_remove(&vm_page_queue_anonymous, + mem, vm_page_t, pageq); + vm_page_anonymous_count--; + } else { + queue_remove(&vm_page_queue_inactive, + mem, vm_page_t, pageq); + } + vm_purgeable_q_advance_all(); + } + mem->inactive = FALSE; + was_pageable = TRUE; + } - iprintf("page 0x%x\n", p); + else if (mem->throttled) { + assert(mem->object != compressor_object); + assert(!mem->active && !mem->inactive); + assert(!mem->speculative); + assert(!mem->fictitious); + queue_remove(&vm_page_queue_throttled, + mem, vm_page_t, pageq); + mem->throttled = FALSE; + vm_page_throttled_count--; + was_pageable = FALSE; + } - db_indent += 2; + else if (mem->speculative) { + assert(mem->object != compressor_object); + assert(!mem->active && !mem->inactive); + assert(!mem->throttled); + assert(!mem->fictitious); + remque(&mem->pageq); + mem->speculative = FALSE; + vm_page_speculative_count--; + was_pageable = TRUE; + } - iprintf("object=0x%x", p->object); - printf(", offset=0x%x", p->offset); - printf(", wire_count=%d", p->wire_count); + else if (mem->pageq.next || mem->pageq.prev) { + was_pageable = FALSE; + panic("vm_page_queues_remove: unmarked page on Q"); + } else { + was_pageable = FALSE; + } - iprintf("%slocal, %sinactive, %sactive, %sthrottled, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n", - (p->local ? "" : "!"), - (p->inactive ? "" : "!"), - (p->active ? "" : "!"), - (p->throttled ? "" : "!"), - (p->gobbled ? "" : "!"), - (p->laundry ? "" : "!"), - (p->free ? "" : "!"), - (p->reference ? "" : "!"), - (p->encrypted ? "" : "!")); - iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n", - (p->busy ? "" : "!"), - (p->wanted ? "" : "!"), - (p->tabled ? "" : "!"), - (p->fictitious ? "" : "!"), - (p->private ? "" : "!"), - (p->precious ? "" : "!")); - iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n", - (p->absent ? "" : "!"), - (p->error ? "" : "!"), - (p->dirty ? "" : "!"), - (p->cleaning ? "" : "!"), - (p->pageout ? "" : "!"), - (p->clustered ? "" : "!")); - iprintf("%soverwriting, %srestart, %sunusual\n", - (p->overwriting ? "" : "!"), - (p->restart ? "" : "!"), - (p->unusual ? "" : "!")); - - iprintf("phys_page=0x%x", p->phys_page); - - db_indent -= 2; + mem->pageq.next = NULL; + mem->pageq.prev = NULL; + VM_PAGE_QUEUES_ASSERT(mem, 0); + if (was_pageable) { + if (mem->object->internal) { + vm_page_pageable_internal_count--; + } else { + vm_page_pageable_external_count--; + } + } +} + +void +vm_page_remove_internal(vm_page_t page) +{ + vm_object_t __object = page->object; + if (page == __object->memq_hint) { + vm_page_t __new_hint; + queue_entry_t __qe; + __qe = queue_next(&page->listq); + if (queue_end(&__object->memq, __qe)) { + __qe = queue_prev(&page->listq); + if (queue_end(&__object->memq, __qe)) { + __qe = NULL; + } + } + __new_hint = (vm_page_t) __qe; + __object->memq_hint = __new_hint; + } + queue_remove(&__object->memq, page, vm_page_t, listq); +} + +void +vm_page_enqueue_inactive(vm_page_t mem, boolean_t first) +{ + VM_PAGE_QUEUES_ASSERT(mem, 0); + assert(!mem->fictitious); + assert(!mem->laundry); + assert(!mem->pageout_queue); + vm_page_check_pageable_safe(mem); + if (mem->object->internal) { + if (first == TRUE) + queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); + else + queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); + vm_page_anonymous_count++; + vm_page_pageable_internal_count++; + } else { + if (first == TRUE) + queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); + else + queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); + vm_page_pageable_external_count++; + } + mem->inactive = TRUE; + vm_page_inactive_count++; + token_new_pagecount++; +} + +/* + * Pages from special kernel objects shouldn't + * be placed on pageable queues. + */ +void +vm_page_check_pageable_safe(vm_page_t page) +{ + if (page->object == kernel_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from kernel object (%p) to pageable queue", kernel_object); + } + + if (page->object == compressor_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from compressor object (%p) to pageable queue", compressor_object); + } + + if (page->object == vm_submap_object) { + panic("vm_page_check_pageable_safe: trying to add page" \ + "from submap object (%p) to pageable queue", vm_submap_object); + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * wired page diagnose + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include + +vm_allocation_site_t * +vm_allocation_sites[VM_KERN_MEMORY_COUNT]; + +vm_tag_t +vm_tag_bt(void) +{ + uintptr_t* frameptr; + uintptr_t* frameptr_next; + uintptr_t retaddr; + uintptr_t kstackb, kstackt; + const vm_allocation_site_t * site; + thread_t cthread; + + cthread = current_thread(); + if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK; + + kstackb = cthread->kernel_stack; + kstackt = kstackb + kernel_stack_size; + + /* Load stack frame pointer (EBP on x86) into frameptr */ + frameptr = __builtin_frame_address(0); + site = NULL; + while (frameptr != NULL) + { + /* Verify thread stack bounds */ + if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break; + + /* Next frame pointer is pointed to by the previous one */ + frameptr_next = (uintptr_t*) *frameptr; + + /* Pull return address from one spot above the frame pointer */ + retaddr = *(frameptr + 1); + + if ((retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) + { + site = OSKextGetAllocationSiteForCaller(retaddr); + break; + } + + frameptr = frameptr_next; + } + return (site ? site->tag : VM_KERN_MEMORY_NONE); +} + +static uint64_t free_tag_bits[256/64]; + +void +vm_tag_alloc_locked(vm_allocation_site_t * site) +{ + vm_tag_t tag; + uint64_t avail; + uint64_t idx; + + if (site->tag) return; + + idx = 0; + while (TRUE) + { + avail = free_tag_bits[idx]; + if (avail) + { + tag = __builtin_clzll(avail); + avail &= ~(1ULL << (63 - tag)); + free_tag_bits[idx] = avail; + tag += (idx << 6); + break; + } + idx++; + if (idx >= (sizeof(free_tag_bits) / sizeof(free_tag_bits[0]))) + { + tag = VM_KERN_MEMORY_ANY; + break; + } + } + site->tag = tag; + if (VM_KERN_MEMORY_ANY != tag) + { + assert(!vm_allocation_sites[tag]); + vm_allocation_sites[tag] = site; + } +} + +static void +vm_tag_free_locked(vm_tag_t tag) +{ + uint64_t avail; + uint32_t idx; + uint64_t bit; + + if (VM_KERN_MEMORY_ANY == tag) return; + + idx = (tag >> 6); + avail = free_tag_bits[idx]; + tag &= 63; + bit = (1ULL << (63 - tag)); + assert(!(avail & bit)); + free_tag_bits[idx] = (avail | bit); +} + +static void +vm_tag_init(void) +{ + vm_tag_t tag; + for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) + { + vm_tag_free_locked(tag); + } +} + +vm_tag_t +vm_tag_alloc(vm_allocation_site_t * site) +{ + vm_tag_t tag; + + if (VM_TAG_BT & site->flags) + { + tag = vm_tag_bt(); + if (VM_KERN_MEMORY_NONE != tag) return (tag); + } + + if (!site->tag) + { + lck_spin_lock(&vm_allocation_sites_lock); + vm_tag_alloc_locked(site); + lck_spin_unlock(&vm_allocation_sites_lock); + } + + return (site->tag); +} + +static void +vm_page_count_object(mach_memory_info_t * sites, unsigned int __unused num_sites, vm_object_t object) +{ + if (!object->wired_page_count) return; + if (object != kernel_object) + { + assert(object->wire_tag < num_sites); + sites[object->wire_tag].size += ptoa_64(object->wired_page_count); + } +} + +typedef void (*vm_page_iterate_proc)(mach_memory_info_t * sites, + unsigned int num_sites, vm_object_t object); + +static void +vm_page_iterate_purgeable_objects(mach_memory_info_t * sites, unsigned int num_sites, + vm_page_iterate_proc proc, purgeable_q_t queue, + int group) +{ + vm_object_t object; + + for (object = (vm_object_t) queue_first(&queue->objq[group]); + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) + { + proc(sites, num_sites, object); + } +} + +static void +vm_page_iterate_objects(mach_memory_info_t * sites, unsigned int num_sites, + vm_page_iterate_proc proc) +{ + purgeable_q_t volatile_q; + queue_head_t * nonvolatile_q; + vm_object_t object; + int group; + + lck_spin_lock(&vm_objects_wired_lock); + queue_iterate(&vm_objects_wired, + object, + vm_object_t, + objq) + { + proc(sites, num_sites, object); + } + lck_spin_unlock(&vm_objects_wired_lock); + + lck_mtx_lock(&vm_purgeable_queue_lock); + nonvolatile_q = &purgeable_nonvolatile_queue; + for (object = (vm_object_t) queue_first(nonvolatile_q); + !queue_end(nonvolatile_q, (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) + { + proc(sites, num_sites, object); + } + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, 0); + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + { + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group); + } + + volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + { + vm_page_iterate_purgeable_objects(sites, num_sites, proc, volatile_q, group); + } + lck_mtx_unlock(&vm_purgeable_queue_lock); +} + +static uint64_t +process_account(mach_memory_info_t * sites, unsigned int __unused num_sites) +{ + uint64_t found; + unsigned int idx; + vm_allocation_site_t * site; + + assert(num_sites >= VM_KERN_MEMORY_COUNT); + found = 0; + for (idx = 0; idx < VM_KERN_MEMORY_COUNT; idx++) + { + found += sites[idx].size; + if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) + { + sites[idx].site = idx; + sites[idx].flags |= VM_KERN_SITE_TAG; + if (VM_KERN_MEMORY_ZONE == idx) sites[idx].flags |= VM_KERN_SITE_HIDE; + else sites[idx].flags |= VM_KERN_SITE_WIRED; + continue; + } + lck_spin_lock(&vm_allocation_sites_lock); + if ((site = vm_allocation_sites[idx])) + { + if (sites[idx].size) + { + sites[idx].flags |= VM_KERN_SITE_WIRED; + if (VM_TAG_KMOD == (VM_KERN_SITE_TYPE & site->flags)) + { + sites[idx].site = OSKextGetKmodIDForSite(site); + sites[idx].flags |= VM_KERN_SITE_KMOD; + } + else + { + sites[idx].site = VM_KERNEL_UNSLIDE(site); + sites[idx].flags |= VM_KERN_SITE_KERNEL; + } + site = NULL; + } + else + { + vm_tag_free_locked(site->tag); + site->tag = VM_KERN_MEMORY_NONE; + vm_allocation_sites[idx] = NULL; + if (!(VM_TAG_UNLOAD & site->flags)) site = NULL; + } + } + lck_spin_unlock(&vm_allocation_sites_lock); + if (site) OSKextFreeSite(site); + } + return (found); +} + +kern_return_t +vm_page_diagnose(mach_memory_info_t * sites, unsigned int num_sites) +{ + enum { kMaxKernelDepth = 1 }; + vm_map_t maps [kMaxKernelDepth]; + vm_map_entry_t entries[kMaxKernelDepth]; + vm_map_t map; + vm_map_entry_t entry; + vm_object_offset_t offset; + vm_page_t page; + int stackIdx, count; + uint64_t wired_size; + uint64_t wired_managed_size; + uint64_t wired_reserved_size; + mach_memory_info_t * counts; + + bzero(sites, num_sites * sizeof(mach_memory_info_t)); + + vm_page_iterate_objects(sites, num_sites, &vm_page_count_object); + + wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count); + wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count); + wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial); + + assert(num_sites >= (VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT)); + counts = &sites[VM_KERN_MEMORY_COUNT]; + +#define SET_COUNT(xcount, xsize, xflags) \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags; + + SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0); + SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0); + SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0); + SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); + +#define SET_MAP(xcount, xsize, xfree, xlargest) \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].free = (xfree); \ + counts[xcount].largest = (xlargest); \ + counts[xcount].flags = VM_KERN_SITE_COUNTER; + + vm_map_size_t map_size, map_free, map_largest; + + vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest); + + vm_map_sizes(zone_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest); + + vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest); + + map = kernel_map; + stackIdx = 0; + while (map) + { + vm_map_lock(map); + for (entry = map->hdr.links.next; map; entry = entry->links.next) + { + if (entry->is_sub_map) + { + assert(stackIdx < kMaxKernelDepth); + maps[stackIdx] = map; + entries[stackIdx] = entry; + stackIdx++; + map = VME_SUBMAP(entry); + entry = NULL; + break; + } + if (VME_OBJECT(entry) == kernel_object) + { + count = 0; + vm_object_lock(VME_OBJECT(entry)); + for (offset = entry->links.start; offset < entry->links.end; offset += page_size) + { + page = vm_page_lookup(VME_OBJECT(entry), offset); + if (page && VM_PAGE_WIRED(page)) count++; + } + vm_object_unlock(VME_OBJECT(entry)); + + if (count) + { + assert(VME_ALIAS(entry) < num_sites); + sites[VME_ALIAS(entry)].size += ptoa_64(count); + } + } + if (entry == vm_map_last_entry(map)) + { + vm_map_unlock(map); + if (!stackIdx) map = NULL; + else + { + --stackIdx; + map = maps[stackIdx]; + entry = entries[stackIdx]; + } + } + } + } + + process_account(sites, num_sites); + + return (KERN_SUCCESS); } -#endif /* MACH_KDB */