X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..3e170ce000f1506b7b5d2c5c7faec85ceabb573d:/osfmk/vm/vm_object.c diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c index 1c0138d82..a2d77426b 100644 --- a/osfmk/vm/vm_object.c +++ b/osfmk/vm/vm_object.c @@ -72,12 +72,13 @@ #include #include +#include + #include #include #include #include -#include #include #include #include @@ -88,6 +89,7 @@ #include #include +#include #include #include #include @@ -96,10 +98,53 @@ #include #include -#if CONFIG_EMBEDDED -#include +#include + +#if CONFIG_PHANTOM_CACHE +#include #endif +boolean_t vm_object_collapse_compressor_allowed = TRUE; + +struct vm_counters vm_counters; + +#if VM_OBJECT_TRACKING +boolean_t vm_object_tracking_inited = FALSE; +decl_simple_lock_data(static,vm_object_tracking_lock_data); +btlog_t *vm_object_tracking_btlog; +static void +vm_object_tracking_lock(void *context) +{ + simple_lock((simple_lock_t)context); +} +static void +vm_object_tracking_unlock(void *context) +{ + simple_unlock((simple_lock_t)context); +} +void +vm_object_tracking_init(void) +{ + int vm_object_tracking; + + vm_object_tracking = 1; + PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking, + sizeof (vm_object_tracking)); + + if (vm_object_tracking) { + simple_lock_init(&vm_object_tracking_lock_data, 0); + vm_object_tracking_btlog = btlog_create( + 50000, + VM_OBJECT_TRACKING_BTDEPTH, + vm_object_tracking_lock, + vm_object_tracking_unlock, + &vm_object_tracking_lock_data); + assert(vm_object_tracking_btlog); + vm_object_tracking_inited = TRUE; + } +} +#endif /* VM_OBJECT_TRACKING */ + /* * Virtual memory objects maintain the actual data * associated with allocated virtual memory. A given @@ -207,6 +252,8 @@ static zone_t vm_object_zone; /* vm backing store zone */ static struct vm_object kernel_object_store; vm_object_t kernel_object; +static struct vm_object compressor_object_store; +vm_object_t compressor_object = &compressor_object_store; /* * The submap object is used as a placeholder for vm_map_submap @@ -346,6 +393,35 @@ unsigned int vm_object_reap_count_async = 0; #define vm_object_reaper_unlock() \ lck_mtx_unlock(&vm_object_reaper_lock_data) +#if CONFIG_IOSCHED +/* I/O Re-prioritization request list */ +queue_head_t io_reprioritize_list; +lck_spin_t io_reprioritize_list_lock; + +#define IO_REPRIORITIZE_LIST_LOCK() \ + lck_spin_lock(&io_reprioritize_list_lock) +#define IO_REPRIORITIZE_LIST_UNLOCK() \ + lck_spin_unlock(&io_reprioritize_list_lock) + +#define MAX_IO_REPRIORITIZE_REQS 8192 +zone_t io_reprioritize_req_zone; + +/* I/O Re-prioritization thread */ +int io_reprioritize_wakeup = 0; +static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused); + +#define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) +#define IO_REPRIO_THREAD_CONTINUATION() \ +{ \ + assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ + thread_block(io_reprioritize_thread); \ +} + +void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int); +void vm_page_handle_prio_inversion(vm_object_t, vm_page_t); +void vm_decmp_upl_reprioritize(upl_t, int); +#endif + #if 0 #undef KERNEL_DEBUG #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT @@ -412,12 +488,26 @@ vm_object_hash_insert( { queue_t bucket; + assert(vm_object_hash_lookup(entry->pager, FALSE) == NULL); + bucket = &vm_object_hashtable[vm_object_hash(entry->pager)]; queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link); + if (object->hashed) { + /* + * "hashed" was pre-set on this (new) object to avoid + * locking issues in vm_object_enter() (can't attempt to + * grab the object lock while holding the hash lock as + * a spinlock), so no need to set it here (and no need to + * hold the object's lock). + */ + } else { + vm_object_lock_assert_exclusive(object); + object->hashed = TRUE; + } + entry->object = object; - object->hashed = TRUE; } static vm_object_hash_entry_t @@ -459,11 +549,25 @@ _vm_object_allocate( *object = vm_object_template; queue_init(&object->memq); queue_init(&object->msr_q); -#if UPL_DEBUG +#if UPL_DEBUG || CONFIG_IOSCHED queue_init(&object->uplq); -#endif /* UPL_DEBUG */ +#endif vm_object_lock_init(object); object->vo_size = size; + +#if VM_OBJECT_TRACKING_OP_CREATED + if (vm_object_tracking_inited) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int numsaved = 0; + + numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_CREATED, + bt, + numsaved); + } +#endif /* VM_OBJECT_TRACKING_OP_CREATED */ } __private_extern__ vm_object_t @@ -488,6 +592,7 @@ lck_grp_t vm_object_cache_lck_grp; lck_grp_attr_t vm_object_lck_grp_attr; lck_attr_t vm_object_lck_attr; lck_attr_t kernel_object_lck_attr; +lck_attr_t compressor_object_lck_attr; /* * vm_object_bootstrap: @@ -574,6 +679,9 @@ vm_object_bootstrap(void) vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL; vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC; vm_object_template.paging_in_progress = 0; +#if __LP64__ + vm_object_template.__object1_unused_bits = 0; +#endif /* __LP64__ */ vm_object_template.activity_in_progress = 0; /* Begin bitfields */ @@ -589,8 +697,8 @@ vm_object_bootstrap(void) vm_object_template.pageout = FALSE; vm_object_template.alive = TRUE; vm_object_template.purgable = VM_PURGABLE_DENY; + vm_object_template.purgeable_when_ripe = FALSE; vm_object_template.shadowed = FALSE; - vm_object_template.silent_overwrite = FALSE; vm_object_template.advisory_pageout = FALSE; vm_object_template.true_share = FALSE; vm_object_template.terminating = FALSE; @@ -610,7 +718,9 @@ vm_object_bootstrap(void) vm_object_template.pages_created = 0; vm_object_template.pages_used = 0; vm_object_template.scan_collisions = 0; - +#if CONFIG_PHANTOM_CACHE + vm_object_template.phantom_object_id = 0; +#endif #if MACH_PAGEMAP vm_object_template.existence_map = VM_EXTERNAL_NULL; #endif /* MACH_PAGEMAP */ @@ -622,16 +732,18 @@ vm_object_bootstrap(void) /* cache bitfields */ vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT; vm_object_template.set_cache_attr = FALSE; + vm_object_template.object_slid = FALSE; vm_object_template.code_signed = FALSE; vm_object_template.hashed = FALSE; vm_object_template.transposed = FALSE; vm_object_template.mapping_in_progress = FALSE; + vm_object_template.phantom_isssd = FALSE; vm_object_template.volatile_empty = FALSE; vm_object_template.volatile_fault = FALSE; vm_object_template.all_reusable = FALSE; vm_object_template.blocked_access = FALSE; vm_object_template.__object2_unused_bits = 0; -#if UPL_DEBUG +#if CONFIG_IOSCHED || UPL_DEBUG vm_object_template.uplq.prev = NULL; vm_object_template.uplq.next = NULL; #endif /* UPL_DEBUG */ @@ -640,11 +752,24 @@ vm_object_bootstrap(void) sizeof (vm_object_template.pip_holders)); #endif /* VM_PIP_DEBUG */ - vm_object_template.objq.next=NULL; - vm_object_template.objq.prev=NULL; + vm_object_template.objq.next = NULL; + vm_object_template.objq.prev = NULL; + + vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; + vm_object_template.purgeable_queue_group = 0; vm_object_template.vo_cache_ts = 0; + + vm_object_template.wire_tag = VM_KERN_MEMORY_NONE; +#if DEBUG + bzero(&vm_object_template.purgeable_owner_bt[0], + sizeof (vm_object_template.purgeable_owner_bt)); + vm_object_template.vo_purgeable_volatilizer = NULL; + bzero(&vm_object_template.purgeable_volatilizer_bt[0], + sizeof (vm_object_template.purgeable_volatilizer_bt)); +#endif /* DEBUG */ + /* * Initialize the "kernel object" */ @@ -656,14 +781,13 @@ vm_object_bootstrap(void) * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. */ -#ifdef ppc - _vm_object_allocate(vm_last_addr + 1, - kernel_object); -#else _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object); -#endif + + _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, + compressor_object); kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; /* * Initialize the "submap object". Make it as large as the @@ -671,13 +795,8 @@ vm_object_bootstrap(void) */ vm_submap_object = &vm_submap_object_store; -#ifdef ppc - _vm_object_allocate(vm_last_addr + 1, - vm_submap_object); -#else _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, vm_submap_object); -#endif vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; /* @@ -692,6 +811,30 @@ vm_object_bootstrap(void) #endif /* MACH_PAGEMAP */ } +#if CONFIG_IOSCHED +void +vm_io_reprioritize_init(void) +{ + kern_return_t result; + thread_t thread = THREAD_NULL; + + /* Initialze the I/O reprioritization subsystem */ + lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr); + queue_init(&io_reprioritize_list); + + io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req), + MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req), + 4096, "io_reprioritize_req"); + + result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); + if (result == KERN_SUCCESS) { + thread_deallocate(thread); + } else { + panic("Could not create io_reprioritize_thread"); + } +} +#endif + void vm_object_reaper_init(void) { @@ -730,6 +873,8 @@ vm_object_init_lck_grp(void) lck_attr_setdefault(&vm_object_lck_attr); lck_attr_setdefault(&kernel_object_lck_attr); lck_attr_cleardebug(&kernel_object_lck_attr); + lck_attr_setdefault(&compressor_object_lck_attr); + lck_attr_cleardebug(&compressor_object_lck_attr); } #if VM_OBJECT_CACHE @@ -753,6 +898,7 @@ static int cache_shadows = TRUE; unsigned long vm_object_deallocate_shared_successes = 0; unsigned long vm_object_deallocate_shared_failures = 0; unsigned long vm_object_deallocate_shared_swap_failures = 0; + __private_extern__ void vm_object_deallocate( register vm_object_t object) @@ -769,20 +915,38 @@ vm_object_deallocate( if (object == VM_OBJECT_NULL) return; - if (object == kernel_object) { + if (object == kernel_object || object == compressor_object) { vm_object_lock_shared(object); OSAddAtomic(-1, &object->ref_count); if (object->ref_count == 0) { - panic("vm_object_deallocate: losing kernel_object\n"); + if (object == kernel_object) + panic("vm_object_deallocate: losing kernel_object\n"); + else + panic("vm_object_deallocate: losing compressor_object\n"); } vm_object_unlock(object); return; } - if (object->ref_count > 2 || - (!object->named && object->ref_count > 1)) { + if (object->ref_count == 2 && + object->named) { + /* + * This "named" object's reference count is about to + * drop from 2 to 1: + * we'll need to call memory_object_last_unmap(). + */ + } else if (object->ref_count == 2 && + object->internal && + object->shadow != VM_OBJECT_NULL) { + /* + * This internal object's reference count is about to + * drop from 2 to 1 and it has a shadow object: + * we'll want to try and collapse this object with its + * shadow. + */ + } else if (object->ref_count >= 2) { UInt32 original_ref_count; volatile UInt32 *ref_count_p; Boolean atomic_swap; @@ -803,19 +967,30 @@ vm_object_deallocate( * Test again as "ref_count" could have changed. * "named" shouldn't change. */ - if (original_ref_count > 2 || - (!object->named && original_ref_count > 1)) { + if (original_ref_count == 2 && + object->named) { + /* need to take slow path for m_o_last_unmap() */ + atomic_swap = FALSE; + } else if (original_ref_count == 2 && + object->internal && + object->shadow != VM_OBJECT_NULL) { + /* need to take slow path for vm_object_collapse() */ + atomic_swap = FALSE; + } else if (original_ref_count < 2) { + /* need to take slow path for vm_object_terminate() */ + atomic_swap = FALSE; + } else { + /* try an atomic update with the shared lock */ atomic_swap = OSCompareAndSwap( original_ref_count, original_ref_count - 1, (UInt32 *) &object->ref_count); if (atomic_swap == FALSE) { vm_object_deallocate_shared_swap_failures++; + /* fall back to the slow path... */ } - - } else { - atomic_swap = FALSE; } + vm_object_unlock(object); if (atomic_swap) { @@ -1104,7 +1279,7 @@ vm_object_page_grab( p = next_p; next_p = (vm_page_t)queue_next(&next_p->listq); - if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->fictitious) + if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry || p->fictitious) goto move_page_in_obj; if (p->pmapped || p->dirty || p->precious) { @@ -1121,8 +1296,9 @@ vm_object_page_grab( if (refmod_state & VM_MEM_REFERENCED) p->reference = TRUE; - if (refmod_state & VM_MEM_MODIFIED) - p->dirty = TRUE; + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(p, FALSE); + } } if (p->dirty == FALSE && p->precious == FALSE) { @@ -1130,8 +1306,9 @@ vm_object_page_grab( if (refmod_state & VM_MEM_REFERENCED) p->reference = TRUE; - if (refmod_state & VM_MEM_MODIFIED) - p->dirty = TRUE; + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(p, FALSE); + } if (p->dirty == FALSE) goto take_page; @@ -1346,7 +1523,7 @@ vm_object_cache_evict( object->vo_cache_pages_to_scan--; - if (VM_PAGE_WIRED(p) || p->busy || p->cleaning) { + if (VM_PAGE_WIRED(p) || p->busy || p->cleaning || p->laundry) { queue_remove(&object->memq, p, vm_page_t, listq); queue_enter(&object->memq, p, vm_page_t, listq); @@ -1373,11 +1550,20 @@ vm_object_cache_evict( p->reference = FALSE; p->no_cache = FALSE; - VM_PAGE_QUEUES_REMOVE(p); - VM_PAGE_ENQUEUE_INACTIVE(p, TRUE); + /* + * we've already filtered out pages that are in the laundry + * so if we get here, this page can't be on the pageout queue + */ + assert(!p->pageout_queue); + + vm_page_queues_remove(p); + vm_page_enqueue_inactive(p, TRUE); ep_moved++; } else { +#if CONFIG_PHANTOM_CACHE + vm_phantom_cache_add_ghost(p); +#endif vm_page_free_prepare_queues(p); assert(p->pageq.next == NULL && p->pageq.prev == NULL); @@ -1685,6 +1871,7 @@ vm_object_terminate( * The VM object must be locked by caller. * The lock will be released on return and the VM object is no longer valid. */ + void vm_object_reap( vm_object_t object) @@ -1697,6 +1884,18 @@ vm_object_reap( vm_object_reap_count++; + /* + * Disown this purgeable object to cleanup its owner's purgeable + * ledgers. We need to do this before disconnecting the object + * from its pager, to properly account for compressed pages. + */ + if (object->internal && + object->purgable != VM_PURGABLE_DENY) { + vm_purgeable_accounting(object, + object->purgable, + TRUE); /* disown */ + } + pager = object->pager; object->pager = MEMORY_OBJECT_NULL; @@ -1713,16 +1912,70 @@ vm_object_reap( /* * remove from purgeable queue if it's on */ - if (object->internal && (object->objq.next || object->objq.prev)) { - purgeable_q_t queue = vm_purgeable_object_remove(object); - assert(queue); + if (object->internal) { + task_t owner; + + owner = object->vo_purgeable_owner; + + VM_OBJECT_UNWIRED(object); + + if (object->purgable == VM_PURGABLE_DENY) { + /* not purgeable: nothing to do */ + } else if (object->purgable == VM_PURGABLE_VOLATILE) { + purgeable_q_t queue; + + assert(object->vo_purgeable_owner == NULL); + + queue = vm_purgeable_object_remove(object); + assert(queue); - /* Must take page lock for this - using it to protect token queue */ - vm_page_lock_queues(); - vm_purgeable_token_delete_first(queue); + if (object->purgeable_when_ripe) { + /* + * Must take page lock for this - + * using it to protect token queue + */ + vm_page_lock_queues(); + vm_purgeable_token_delete_first(queue); - assert(queue->debug_count_objects>=0); - vm_page_unlock_queues(); + assert(queue->debug_count_objects>=0); + vm_page_unlock_queues(); + } + + /* + * Update "vm_page_purgeable_count" in bulk and mark + * object as VM_PURGABLE_EMPTY to avoid updating + * "vm_page_purgeable_count" again in vm_page_remove() + * when reaping the pages. + */ + unsigned int delta; + assert(object->resident_page_count >= + object->wired_page_count); + delta = (object->resident_page_count - + object->wired_page_count); + if (delta != 0) { + assert(vm_page_purgeable_count >= delta); + OSAddAtomic(-delta, + (SInt32 *)&vm_page_purgeable_count); + } + if (object->wired_page_count != 0) { + assert(vm_page_purgeable_wired_count >= + object->wired_page_count); + OSAddAtomic(-object->wired_page_count, + (SInt32 *)&vm_page_purgeable_wired_count); + } + object->purgable = VM_PURGABLE_EMPTY; + } + else if (object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_EMPTY) { + /* remove from nonvolatile queue */ + assert(object->vo_purgeable_owner == TASK_NULL); + vm_purgeable_nonvolatile_dequeue(object); + } else { + panic("object %p in unexpected purgeable state 0x%x\n", + object, object->purgable); + } + assert(object->objq.next == NULL); + assert(object->objq.prev == NULL); } /* @@ -1767,6 +2020,13 @@ vm_object_reap( object->shadow = VM_OBJECT_NULL; +#if VM_OBJECT_TRACKING + if (vm_object_tracking_inited) { + btlog_remove_entries_for_element(vm_object_tracking_btlog, + object); + } +#endif /* VM_OBJECT_TRACKING */ + vm_object_lock_destroy(object); /* * Free the space for the object. @@ -1812,6 +2072,7 @@ vm_object_reap_pages( vm_page_t local_free_q = VM_PAGE_NULL; int loop_count; boolean_t disconnect_on_release; + pmap_flush_context pmap_flush_context_storage; if (reap_type == REAP_DATA_FLUSH) { /* @@ -1833,7 +2094,10 @@ vm_object_reap_pages( restart_after_sleep: if (queue_empty(&object->memq)) return; - loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH) + 1; + loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); + + if (reap_type == REAP_PURGEABLE) + pmap_flush_context_init(&pmap_flush_context_storage); vm_page_lockspin_queues(); @@ -1849,6 +2113,11 @@ restart_after_sleep: vm_page_unlock_queues(); if (local_free_q) { + + if (reap_type == REAP_PURGEABLE) { + pmap_flush(&pmap_flush_context_storage); + pmap_flush_context_init(&pmap_flush_context_storage); + } /* * Free the pages we reclaimed so far * and take a little break to avoid @@ -1859,38 +2128,13 @@ restart_after_sleep: } else mutex_pause(0); - loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH) + 1; + loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); vm_page_lockspin_queues(); } if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) { - if (reap_type == REAP_DATA_FLUSH && - ((p->pageout == TRUE || p->cleaning == TRUE) && p->list_req_pending == TRUE)) { - p->list_req_pending = FALSE; - p->cleaning = FALSE; - /* - * need to drop the laundry count... - * we may also need to remove it - * from the I/O paging queue... - * vm_pageout_throttle_up handles both cases - * - * the laundry and pageout_queue flags are cleared... - */ - vm_pageout_throttle_up(p); - - if (p->pageout == TRUE) { - /* - * toss the wire count we picked up - * when we initially set this page up - * to be cleaned and stolen... - */ - vm_page_unwire(p, TRUE); - p->pageout = FALSE; - } - PAGE_WAKEUP(p); - - } else if (p->busy || p->cleaning) { + if (p->busy || p->cleaning) { vm_page_unlock_queues(); /* @@ -1903,6 +2147,11 @@ restart_after_sleep: goto restart_after_sleep; } + if (p->laundry) { + p->pageout = FALSE; + + vm_pageout_steal_laundry(p, TRUE); + } } switch (reap_type) { @@ -1920,15 +2169,29 @@ restart_after_sleep: case REAP_PURGEABLE: if (VM_PAGE_WIRED(p)) { - /* can't purge a wired page */ + /* + * can't purge a wired page + */ vm_page_purged_wired++; continue; } + if (p->laundry && !p->busy && !p->cleaning) { + p->pageout = FALSE; + vm_pageout_steal_laundry(p, TRUE); + } + if (p->cleaning || p->laundry || p->absent) { + /* + * page is being acted upon, + * so don't mess with it + */ + vm_page_purged_others++; + continue; + } if (p->busy) { /* * We can't reclaim a busy page but we can - * make it pageable (it's not wired) to make + * make it more likely to be paged (it's not wired) to make * sure that it gets considered by * vm_pageout_scan() later. */ @@ -1937,35 +2200,18 @@ restart_after_sleep: continue; } - if (p->cleaning || p->laundry || p->list_req_pending) { - /* - * page is being acted upon, - * so don't mess with it - */ - vm_page_purged_others++; - continue; - } assert(p->object != kernel_object); /* * we can discard this page... */ if (p->pmapped == TRUE) { - int refmod_state; /* * unmap the page */ - refmod_state = pmap_disconnect(p->phys_page); - if (refmod_state & VM_MEM_MODIFIED) { - p->dirty = TRUE; - } - } - if (p->dirty || p->precious) { - /* - * we saved the cost of cleaning this page ! - */ - vm_page_purged_count++; + pmap_disconnect_options(p->phys_page, PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage); } + vm_page_purged_count++; break; @@ -1989,15 +2235,16 @@ restart_after_sleep: if ((p->dirty || p->precious) && !p->error && object->alive) { - p->busy = TRUE; - - VM_PAGE_QUEUES_REMOVE(p); - /* - * flush page... page will be freed - * upon completion of I/O - */ - vm_pageout_cluster(p); - + assert(!object->internal); + + if (!p->laundry) { + vm_page_queues_remove(p); + /* + * flush page... page will be freed + * upon completion of I/O + */ + (void)vm_pageout_cluster(p, TRUE, FALSE, FALSE); + } vm_page_unlock_queues(); /* * free the pages reclaimed so far @@ -2028,6 +2275,9 @@ restart_after_sleep: /* * Free the remaining reclaimed pages */ + if (reap_type == REAP_PURGEABLE) + pmap_flush(&pmap_flush_context_storage); + VM_OBJ_REAP_FREELIST(local_free_q, disconnect_on_release); } @@ -2418,11 +2668,20 @@ page_is_paged_out( return TRUE; } } else -#endif - if (object->internal && - object->alive && - !object->terminating && - object->pager_ready) { +#endif /* MACH_PAGEMAP */ + if (object->internal && + object->alive && + !object->terminating && + object->pager_ready) { + + if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { + if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) + == VM_EXTERNAL_STATE_EXISTS) { + return TRUE; + } else { + return FALSE; + } + } /* * We're already holding a "paging in progress" reference @@ -2457,6 +2716,20 @@ page_is_paged_out( +/* + * madvise_free_debug + * + * To help debug madvise(MADV_FREE*) mis-usage, this triggers a + * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to + * simulate the loss of the page's contents as if the page had been + * reclaimed and then re-faulted. + */ +#if DEVELOPMENT || DEBUG +int madvise_free_debug = 1; +#else /* DEBUG */ +int madvise_free_debug = 0; +#endif /* DEBUG */ + /* * Deactivate the pages in the specified object and range. If kill_page is set, also discard any * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify @@ -2470,11 +2743,11 @@ deactivate_pages_in_object( vm_object_size_t size, boolean_t kill_page, boolean_t reusable_page, -#if !MACH_ASSERT - __unused -#endif boolean_t all_reusable, - chunk_state_t *chunk_state) + chunk_state_t *chunk_state, + pmap_flush_context *pfc, + struct pmap *pmap, + vm_map_offset_t pmap_offset) { vm_page_t m; int p; @@ -2484,7 +2757,6 @@ deactivate_pages_in_object( int dw_limit; unsigned int reusable = 0; - /* * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may @@ -2496,7 +2768,7 @@ deactivate_pages_in_object( dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64) { + for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { /* * If this offset has already been found and handled in a higher level object, then don't @@ -2521,15 +2793,25 @@ deactivate_pages_in_object( MARK_PAGE_HANDLED(*chunk_state, p); - if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy)) { + if (( !VM_PAGE_WIRED(m)) && (!m->private) && (!m->gobbled) && (!m->busy) && (!m->laundry)) { int clear_refmod; + int pmap_options; - assert(!m->laundry); - + dwp->dw_mask = 0; + + pmap_options = 0; clear_refmod = VM_MEM_REFERENCED; - dwp->dw_mask = DW_clear_reference; + dwp->dw_mask |= DW_clear_reference; if ((kill_page) && (object->internal)) { + if (madvise_free_debug) { + /* + * zero-fill the page now + * to simulate it being + * reclaimed and re-faulted. + */ + pmap_zero_page(m->phys_page); + } m->precious = FALSE; m->dirty = FALSE; @@ -2547,6 +2829,8 @@ deactivate_pages_in_object( #if MACH_PAGEMAP vm_external_state_clr(object->existence_map, offset); #endif /* MACH_PAGEMAP */ + VM_COMPRESSOR_PAGER_STATE_CLR(object, + offset); if (reusable_page && !m->reusable) { assert(!all_reusable); @@ -2555,14 +2839,26 @@ deactivate_pages_in_object( object->reusable_page_count++; assert(object->resident_page_count >= object->reusable_page_count); reusable++; + /* + * Tell pmap this page is now + * "reusable" (to update pmap + * stats for all mappings). + */ + pmap_options |= PMAP_OPTIONS_SET_REUSABLE; } } - pmap_clear_refmod(m->phys_page, clear_refmod); + pmap_options |= PMAP_OPTIONS_NOFLUSH; + pmap_clear_refmod_options(m->phys_page, + clear_refmod, + pmap_options, + (void *)pfc); if (!m->throttled && !(reusable_page || all_reusable)) dwp->dw_mask |= DW_move_page; - VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); + if (dwp->dw_mask) + VM_PAGE_ADD_DELAYED_WORK(dwp, m, + dw_count); if (dw_count >= dw_limit) { if (reusable) { @@ -2571,7 +2867,7 @@ deactivate_pages_in_object( vm_page_stats_reusable.reusable += reusable; reusable = 0; } - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; @@ -2598,6 +2894,25 @@ deactivate_pages_in_object( #if MACH_PAGEMAP vm_external_state_clr(object->existence_map, offset); #endif /* MACH_PAGEMAP */ + VM_COMPRESSOR_PAGER_STATE_CLR(object, + offset); + if (pmap != PMAP_NULL && + (COMPRESSED_PAGER_IS_ACTIVE || + DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)) { + /* + * Tell pmap that this page + * is no longer mapped, to + * adjust the footprint ledger + * because this page is no + * longer compressed. + */ + pmap_remove_options( + pmap, + pmap_offset, + (pmap_offset + + PAGE_SIZE), + PMAP_OPTIONS_REMOVE); + } } } } @@ -2610,7 +2925,7 @@ deactivate_pages_in_object( } if (dw_count) - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); } @@ -2630,7 +2945,10 @@ deactivate_a_chunk( vm_object_size_t size, boolean_t kill_page, boolean_t reusable_page, - boolean_t all_reusable) + boolean_t all_reusable, + pmap_flush_context *pfc, + struct pmap *pmap, + vm_map_offset_t pmap_offset) { vm_object_t object; vm_object_t tmp_object; @@ -2663,7 +2981,7 @@ deactivate_a_chunk( while (object && CHUNK_NOT_COMPLETE(chunk_state)) { vm_object_paging_begin(object); - deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state); + deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc, pmap, pmap_offset); vm_object_paging_end(object); @@ -2709,10 +3027,13 @@ vm_object_deactivate_pages( vm_object_offset_t offset, vm_object_size_t size, boolean_t kill_page, - boolean_t reusable_page) + boolean_t reusable_page, + struct pmap *pmap, + vm_map_offset_t pmap_offset) { vm_object_size_t length; boolean_t all_reusable; + pmap_flush_context pmap_flush_context_storage; /* * We break the range up into chunks and do one chunk at a time. This is for @@ -2723,6 +3044,13 @@ vm_object_deactivate_pages( all_reusable = FALSE; +#if 11 + /* + * For the sake of accurate "reusable" pmap stats, we need + * to tell pmap about each page that is no longer "reusable", + * so we can't do the "all_reusable" optimization. + */ +#else if (reusable_page && object->internal && object->vo_size != 0 && @@ -2731,6 +3059,7 @@ vm_object_deactivate_pages( all_reusable = TRUE; reusable_page = FALSE; } +#endif if ((reusable_page || all_reusable) && object->all_reusable) { /* This means MADV_FREE_REUSABLE has been called twice, which @@ -2738,12 +3067,16 @@ vm_object_deactivate_pages( return; } + pmap_flush_context_init(&pmap_flush_context_storage); + while (size) { - length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable); + length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage, pmap, pmap_offset); size -= length; offset += length; + pmap_offset += length; } + pmap_flush(&pmap_flush_context_storage); if (all_reusable) { if (!object->all_reusable) { @@ -2784,6 +3117,17 @@ vm_object_reuse_pages( (object)->reusable_page_count--; \ (m)->reusable = FALSE; \ (reused)++; \ + /* \ + * Tell pmap that this page is no longer \ + * "reusable", to update the "reusable" stats \ + * for all the pmaps that have mapped this \ + * page. \ + */ \ + pmap_clear_refmod_options((m)->phys_page, \ + 0, /* refmod */ \ + (PMAP_OPTIONS_CLEAR_REUSABLE \ + | PMAP_OPTIONS_NOFLUSH), \ + NULL); \ } \ MACRO_END @@ -2793,6 +3137,8 @@ vm_object_reuse_pages( vm_object_lock_assert_exclusive(object); if (object->all_reusable) { + panic("object %p all_reusable: can't update pmap stats\n", + object); assert(object->reusable_page_count == 0); object->all_reusable = FALSE; if (end_offset - start_offset == object->vo_size || @@ -2879,8 +3225,25 @@ vm_object_pmap_protect( vm_map_offset_t pmap_start, vm_prot_t prot) { + vm_object_pmap_protect_options(object, offset, size, + pmap, pmap_start, prot, 0); +} + +__private_extern__ void +vm_object_pmap_protect_options( + register vm_object_t object, + register vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot, + int options) +{ + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; + if (object == VM_OBJECT_NULL) - return; + return; size = vm_object_round_page(size); offset = vm_object_trunc_page(offset); @@ -2889,7 +3252,12 @@ vm_object_pmap_protect( if (object->phys_contiguous) { if (pmap != NULL) { vm_object_unlock(object); - pmap_protect(pmap, pmap_start, pmap_start + size, prot); + pmap_protect_options(pmap, + pmap_start, + pmap_start + size, + prot, + options & ~PMAP_OPTIONS_NOFLUSH, + NULL); } else { vm_object_offset_t phys_start, phys_end, phys_addr; @@ -2899,11 +3267,21 @@ vm_object_pmap_protect( assert(phys_end <= object->vo_shadow_offset + object->vo_size); vm_object_unlock(object); + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + for (phys_addr = phys_start; phys_addr < phys_end; phys_addr += PAGE_SIZE_64) { - pmap_page_protect((ppnum_t) (phys_addr >> PAGE_SHIFT), prot); + pmap_page_protect_options( + (ppnum_t) (phys_addr >> PAGE_SHIFT), + prot, + options | PMAP_OPTIONS_NOFLUSH, + (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; } + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); } return; } @@ -2913,38 +3291,49 @@ vm_object_pmap_protect( while (TRUE) { if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) { vm_object_unlock(object); - pmap_protect(pmap, pmap_start, pmap_start + size, prot); + pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, + options & ~PMAP_OPTIONS_NOFLUSH, NULL); return; } - /* if we are doing large ranges with respect to resident */ - /* page count then we should interate over pages otherwise */ - /* inverse page look-up will be faster */ + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + + /* + * if we are doing large ranges with respect to resident + * page count then we should interate over pages otherwise + * inverse page look-up will be faster + */ if (ptoa_64(object->resident_page_count / 4) < size) { vm_page_t p; vm_object_offset_t end; end = offset + size; - if (pmap != PMAP_NULL) { - queue_iterate(&object->memq, p, vm_page_t, listq) { - if (!p->fictitious && - (offset <= p->offset) && (p->offset < end)) { - vm_map_offset_t start; + queue_iterate(&object->memq, p, vm_page_t, listq) { + if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) { + vm_map_offset_t start; - start = pmap_start + p->offset - offset; - pmap_protect(pmap, start, start + PAGE_SIZE_64, prot); - } - } - } else { - queue_iterate(&object->memq, p, vm_page_t, listq) { - if (!p->fictitious && - (offset <= p->offset) && (p->offset < end)) { + start = pmap_start + p->offset - offset; - pmap_page_protect(p->phys_page, prot); - } - } + if (pmap != PMAP_NULL) + pmap_protect_options( + pmap, + start, + start + PAGE_SIZE_64, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + else + pmap_page_protect_options( + p->phys_page, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + delayed_pmap_flush = TRUE; + } } + } else { vm_page_t p; vm_object_offset_t end; @@ -2952,29 +3341,36 @@ vm_object_pmap_protect( end = offset + size; - if (pmap != PMAP_NULL) { - for(target_off = offset; - target_off < end; - target_off += PAGE_SIZE) { - p = vm_page_lookup(object, target_off); - if (p != VM_PAGE_NULL) { - vm_object_offset_t start; - start = pmap_start + - (p->offset - offset); - pmap_protect(pmap, start, - start + PAGE_SIZE, prot); - } - } - } else { - for(target_off = offset; - target_off < end; target_off += PAGE_SIZE) { - p = vm_page_lookup(object, target_off); - if (p != VM_PAGE_NULL) { - pmap_page_protect(p->phys_page, prot); - } + for (target_off = offset; + target_off < end; target_off += PAGE_SIZE) { + + p = vm_page_lookup(object, target_off); + + if (p != VM_PAGE_NULL) { + vm_object_offset_t start; + + start = pmap_start + (p->offset - offset); + + if (pmap != PMAP_NULL) + pmap_protect_options( + pmap, + start, + start + PAGE_SIZE_64, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + else + pmap_page_protect_options( + p->phys_page, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + delayed_pmap_flush = TRUE; } } - } + } + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); if (prot == VM_PROT_NONE) { /* @@ -3084,7 +3480,8 @@ vm_object_copy_slowly( fault_info.interruptible = interruptible; fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; - fault_info.user_tag = 0; + fault_info.user_tag = 0; + fault_info.pmap_options = 0; fault_info.lo_offset = src_offset; fault_info.hi_offset = src_offset + size; fault_info.no_cache = FALSE; @@ -3092,6 +3489,7 @@ vm_object_copy_slowly( fault_info.io_sync = FALSE; fault_info.cs_bypass = FALSE; fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; for ( ; size != 0 ; @@ -3127,6 +3525,34 @@ vm_object_copy_slowly( kern_return_t error_code; vm_object_lock(src_object); + + if (src_object->internal && + src_object->shadow == VM_OBJECT_NULL && + (vm_page_lookup(src_object, + src_offset) == VM_PAGE_NULL) && + (src_object->pager == NULL || + (VM_COMPRESSOR_PAGER_STATE_GET(src_object, + src_offset) == + VM_EXTERNAL_STATE_ABSENT))) { + /* + * This page is neither resident nor compressed + * and there's no shadow object below + * "src_object", so this page is really missing. + * There's no need to zero-fill it just to copy + * it: let's leave it missing in "new_object" + * and get zero-filled on demand. + */ + vm_object_unlock(src_object); + /* free the unused "new_page"... */ + vm_object_lock(new_object); + VM_PAGE_FREE(new_page); + new_page = VM_PAGE_NULL; + vm_object_unlock(new_object); + /* ...and go to next page in "src_object" */ + result = VM_FAULT_SUCCESS; + break; + } + vm_object_paging_begin(src_object); if (size > (vm_size_t) -1) { @@ -3138,8 +3564,10 @@ vm_object_copy_slowly( } XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0); + _result_page = VM_PAGE_NULL; result = vm_fault_page(src_object, src_offset, VM_PROT_READ, FALSE, + FALSE, /* page not looked up */ &prot, &_result_page, &top_page, (int *)0, &error_code, FALSE, FALSE, &fault_info); @@ -3149,11 +3577,6 @@ vm_object_copy_slowly( result_page = _result_page; /* - * We don't need to hold the object - * lock -- the busy page will be enough. - * [We don't care about picking up any - * new modifications.] - * * Copy the page to the new object. * * POLICY DECISION: @@ -3162,15 +3585,15 @@ vm_object_copy_slowly( * of copying. */ - vm_object_unlock(result_page->object); vm_page_copy(result_page, new_page); + vm_object_unlock(result_page->object); /* * Let go of both pages (make them * not busy, perform wakeup, activate). */ vm_object_lock(new_object); - new_page->dirty = TRUE; + SET_PAGE_DIRTY(new_page, FALSE); PAGE_WAKEUP_DONE(new_page); vm_object_unlock(new_object); @@ -3482,6 +3905,8 @@ vm_object_copy_delayed( vm_object_t old_copy; vm_page_t p; vm_object_size_t copy_size = src_offset + size; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; int collisions = 0; @@ -3524,6 +3949,7 @@ vm_object_copy_delayed( * the original object must be done carefully, to avoid deadlock. */ + copy_size = vm_object_round_page(copy_size); Retry: /* @@ -3612,6 +4038,9 @@ vm_object_copy_delayed( */ copy_delayed_protect_iterate++; + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + queue_iterate(&src_object->memq, p, vm_page_t, listq) { if (!p->fictitious && p->offset >= old_copy->vo_size && @@ -3624,14 +4053,20 @@ vm_object_copy_delayed( vm_object_unlock(new_copy); vm_object_deallocate(new_copy); } + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); return VM_OBJECT_NULL; } else { - pmap_page_protect(p->phys_page, - (VM_PROT_ALL & ~VM_PROT_WRITE)); + pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE), + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; } } } + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); + old_copy->vo_size = copy_size; } if (src_object_shared == TRUE) @@ -3703,6 +4138,9 @@ vm_object_copy_delayed( */ copy_delayed_protect_iterate++; + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + queue_iterate(&src_object->memq, p, vm_page_t, listq) { if (!p->fictitious && p->offset < copy_size) { if (VM_PAGE_WIRED(p)) { @@ -3711,13 +4149,21 @@ vm_object_copy_delayed( vm_object_unlock(src_object); vm_object_unlock(new_copy); vm_object_deallocate(new_copy); + + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); + return VM_OBJECT_NULL; } else { - pmap_page_protect(p->phys_page, - (VM_PROT_ALL & ~VM_PROT_WRITE)); + pmap_page_protect_options(p->phys_page, (VM_PROT_ALL & ~VM_PROT_WRITE), + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; } } } + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); + if (old_copy != VM_OBJECT_NULL) { /* * Make the old copy-object shadow the new one. @@ -3894,6 +4340,10 @@ vm_object_shadow( register vm_object_t result; source = *object; + assert(source != VM_OBJECT_NULL); + if (source == VM_OBJECT_NULL) + return FALSE; + #if 0 /* * XXX FBDP @@ -4090,12 +4540,31 @@ Retry: assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL); new_entry = vm_object_hash_entry_alloc(pager); new_object = vm_object_allocate(size); + /* + * Set new_object->hashed now, while noone + * knows about this object yet and we + * don't need to lock it. Once it's in + * the hash table, we would have to lock + * the object to set its "hashed" bit and + * we can't lock the object while holding + * the hash lock as a spinlock... + */ + new_object->hashed = TRUE; lck = vm_object_hash_lock_spin(pager); } else { /* * Lookup failed twice, and we have something * to insert; set the object. */ + /* + * We can't lock the object here since we're + * holding the hash lock as a spin lock. + * We've already pre-set "new_object->hashed" + * when we created "new_object" above, so we + * won't need to modify the object in + * vm_object_hash_insert(). + */ + assert(new_object->hashed); vm_object_hash_insert(new_entry, new_object); entry = new_entry; new_entry = VM_OBJECT_HASH_ENTRY_NULL; @@ -4182,8 +4651,16 @@ Retry: * throw away ours. */ - if (new_object != VM_OBJECT_NULL) + if (new_object != VM_OBJECT_NULL) { + /* + * Undo the pre-setting of "new_object->hashed" before + * deallocating "new_object", since we did not insert it + * into the hash table after all. + */ + assert(new_object->hashed); + new_object->hashed = FALSE; vm_object_deallocate(new_object); + } if (new_entry != VM_OBJECT_HASH_ENTRY_NULL) vm_object_hash_entry_free(new_entry); @@ -4330,11 +4807,13 @@ vm_object_pager_create( vm_object_unlock(object); #if MACH_PAGEMAP - map = vm_external_create(size); - vm_object_lock(object); - assert(object->vo_size == size); - object->existence_map = map; - vm_object_unlock(object); + if (DEFAULT_PAGER_IS_ACTIVE) { + map = vm_external_create(size); + vm_object_lock(object); + assert(object->vo_size == size); + object->existence_map = map; + vm_object_unlock(object); + } #endif /* MACH_PAGEMAP */ if ((uint32_t) object->vo_size != object->vo_size) { @@ -4367,9 +4846,11 @@ vm_object_pager_create( entry = vm_object_hash_entry_alloc(pager); + vm_object_lock(object); lck = vm_object_hash_lock_spin(pager); vm_object_hash_insert(entry, object); vm_object_hash_unlock(lck); + vm_object_unlock(object); /* * A reference was returned by @@ -4393,6 +4874,111 @@ vm_object_pager_create( vm_object_paging_end(object); } +void +vm_object_compressor_pager_create( + register vm_object_t object) +{ + memory_object_t pager; + vm_object_hash_entry_t entry; + lck_mtx_t *lck; + vm_object_t pager_object = VM_OBJECT_NULL; + + assert(object != kernel_object); + + /* + * Prevent collapse or termination by holding a paging reference + */ + + vm_object_paging_begin(object); + if (object->pager_created) { + /* + * Someone else got to it first... + * wait for them to finish initializing the ports + */ + while (!object->pager_initialized) { + vm_object_sleep(object, + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); + } + vm_object_paging_end(object); + return; + } + + /* + * Indicate that a memory object has been assigned + * before dropping the lock, to prevent a race. + */ + + object->pager_created = TRUE; + object->paging_offset = 0; + + vm_object_unlock(object); + + if ((uint32_t) (object->vo_size/PAGE_SIZE) != + (object->vo_size/PAGE_SIZE)) { + panic("vm_object_compressor_pager_create(%p): " + "object size 0x%llx >= 0x%llx\n", + object, + (uint64_t) object->vo_size, + 0x0FFFFFFFFULL*PAGE_SIZE); + } + + /* + * Create the [internal] pager, and associate it with this object. + * + * We make the association here so that vm_object_enter() + * can look up the object to complete initializing it. No + * user will ever map this object. + */ + { + assert(object->temporary); + + /* create our new memory object */ + assert((uint32_t) (object->vo_size/PAGE_SIZE) == + (object->vo_size/PAGE_SIZE)); + (void) compressor_memory_object_create( + (memory_object_size_t) object->vo_size, + &pager); + if (pager == NULL) { + panic("vm_object_compressor_pager_create(): " + "no pager for object %p size 0x%llx\n", + object, (uint64_t) object->vo_size); + } + } + + entry = vm_object_hash_entry_alloc(pager); + + vm_object_lock(object); + lck = vm_object_hash_lock_spin(pager); + vm_object_hash_insert(entry, object); + vm_object_hash_unlock(lck); + vm_object_unlock(object); + + /* + * A reference was returned by + * memory_object_create(), and it is + * copied by vm_object_enter(). + */ + + pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE); + + if (pager_object != object) { + panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size); + } + + /* + * Drop the reference we were passed. + */ + memory_object_deallocate(pager); + + vm_object_lock(object); + + /* + * Release the paging reference + */ + vm_object_paging_end(object); +} + /* * Routine: vm_object_remove * Purpose: @@ -4436,6 +5022,83 @@ static int vm_external_collapsed; unsigned long vm_object_collapse_encrypted = 0; +void vm_object_do_collapse_compressor(vm_object_t object, + vm_object_t backing_object); +void +vm_object_do_collapse_compressor( + vm_object_t object, + vm_object_t backing_object) +{ + vm_object_offset_t new_offset, backing_offset; + vm_object_size_t size; + + vm_counters.do_collapse_compressor++; + + vm_object_lock_assert_exclusive(object); + vm_object_lock_assert_exclusive(backing_object); + + size = object->vo_size; + + /* + * Move all compressed pages from backing_object + * to the parent. + */ + + for (backing_offset = object->vo_shadow_offset; + backing_offset < object->vo_shadow_offset + object->vo_size; + backing_offset += PAGE_SIZE) { + memory_object_offset_t backing_pager_offset; + + /* find the next compressed page at or after this offset */ + backing_pager_offset = (backing_offset + + backing_object->paging_offset); + backing_pager_offset = vm_compressor_pager_next_compressed( + backing_object->pager, + backing_pager_offset); + if (backing_pager_offset == (memory_object_offset_t) -1) { + /* no more compressed pages */ + break; + } + backing_offset = (backing_pager_offset - + backing_object->paging_offset); + + new_offset = backing_offset - object->vo_shadow_offset; + + if (new_offset >= object->vo_size) { + /* we're out of the scope of "object": done */ + break; + } + + if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) || + (vm_compressor_pager_state_get(object->pager, + (new_offset + + object->paging_offset)) == + VM_EXTERNAL_STATE_EXISTS)) { + /* + * This page already exists in object, resident or + * compressed. + * We don't need this compressed page in backing_object + * and it will be reclaimed when we release + * backing_object. + */ + continue; + } + + /* + * backing_object has this page in the VM compressor and + * we need to transfer it to object. + */ + vm_counters.do_collapse_compressor_pages++; + vm_compressor_pager_transfer( + /* destination: */ + object->pager, + (new_offset + object->paging_offset), + /* source: */ + backing_object->pager, + (backing_offset + backing_object->paging_offset)); + } +} + /* * Routine: vm_object_do_collapse * Purpose: @@ -4459,6 +5122,9 @@ vm_object_do_collapse( vm_object_lock_assert_exclusive(object); vm_object_lock_assert_exclusive(backing_object); + assert(object->purgable == VM_PURGABLE_DENY); + assert(backing_object->purgable == VM_PURGABLE_DENY); + backing_offset = object->vo_shadow_offset; size = object->vo_size; @@ -4503,12 +5169,26 @@ vm_object_do_collapse( pp = vm_page_lookup(object, new_offset); if (pp == VM_PAGE_NULL) { - /* - * Parent now has no page. - * Move the backing object's page up. - */ + if (VM_COMPRESSOR_PAGER_STATE_GET(object, + new_offset) + == VM_EXTERNAL_STATE_EXISTS) { + /* + * Parent object has this page + * in the VM compressor. + * Throw away the backing + * object's page. + */ + VM_PAGE_FREE(p); + } else { + /* + * Parent now has no page. + * Move the backing object's page + * up. + */ + vm_page_rename(p, object, new_offset, + TRUE); + } - vm_page_rename(p, object, new_offset, TRUE); #if MACH_PAGEMAP } else if (pp->absent) { @@ -4539,18 +5219,27 @@ vm_object_do_collapse( } } } - + + if (vm_object_collapse_compressor_allowed && + object->pager != MEMORY_OBJECT_NULL && + backing_object->pager != MEMORY_OBJECT_NULL) { + + /* move compressed pages from backing_object to object */ + vm_object_do_collapse_compressor(object, backing_object); + + } else if (backing_object->pager != MEMORY_OBJECT_NULL) { + vm_object_hash_entry_t entry; + #if !MACH_PAGEMAP - assert((!object->pager_created && (object->pager == MEMORY_OBJECT_NULL)) - || (!backing_object->pager_created - && (backing_object->pager == MEMORY_OBJECT_NULL))); + assert((!object->pager_created && + (object->pager == MEMORY_OBJECT_NULL)) || + (!backing_object->pager_created && + (backing_object->pager == MEMORY_OBJECT_NULL))); #else - assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL); + assert(!object->pager_created && + object->pager == MEMORY_OBJECT_NULL); #endif /* !MACH_PAGEMAP */ - if (backing_object->pager != MEMORY_OBJECT_NULL) { - vm_object_hash_entry_t entry; - /* * Move the pager from backing_object to object. * @@ -4561,6 +5250,8 @@ vm_object_do_collapse( assert(!object->paging_in_progress); assert(!object->activity_in_progress); + assert(!object->pager_created); + assert(object->pager == NULL); object->pager = backing_object->pager; if (backing_object->hashed) { @@ -4584,6 +5275,12 @@ vm_object_do_collapse( memory_object_control_collapse(object->pager_control, object); } + /* the backing_object has lost its pager: reset all fields */ + backing_object->pager_created = FALSE; + backing_object->pager_control = NULL; + backing_object->pager_ready = FALSE; + backing_object->paging_offset = 0; + backing_object->pager = NULL; } #if MACH_PAGEMAP @@ -4623,6 +5320,9 @@ vm_object_do_collapse( object->shadow = backing_object->shadow; if (object->shadow) { object->vo_shadow_offset += backing_object->vo_shadow_offset; + /* "backing_object" gave its shadow to "object" */ + backing_object->shadow = VM_OBJECT_NULL; + backing_object->vo_shadow_offset = 0; } else { /* no shadow, therefore no shadow offset... */ object->vo_shadow_offset = 0; @@ -4637,11 +5337,25 @@ vm_object_do_collapse( * pager left, and no object references within it, * all that is necessary is to dispose of it. */ + object_collapses++; - assert((backing_object->ref_count == 1) && - (backing_object->resident_page_count == 0) && - (backing_object->paging_in_progress == 0) && - (backing_object->activity_in_progress == 0)); + assert(backing_object->ref_count == 1); + assert(backing_object->resident_page_count == 0); + assert(backing_object->paging_in_progress == 0); + assert(backing_object->activity_in_progress == 0); + assert(backing_object->shadow == VM_OBJECT_NULL); + assert(backing_object->vo_shadow_offset == 0); + + if (backing_object->pager != MEMORY_OBJECT_NULL) { + /* ... unless it has a pager; need to terminate pager too */ + vm_counters.do_collapse_terminate++; + if (vm_object_terminate(backing_object) != KERN_SUCCESS) { + vm_counters.do_collapse_terminate_failure++; + } + return; + } + + assert(backing_object->pager == NULL); backing_object->alive = FALSE; vm_object_unlock(backing_object); @@ -4649,11 +5363,17 @@ vm_object_do_collapse( XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n", backing_object, 0,0,0,0); +#if VM_OBJECT_TRACKING + if (vm_object_tracking_inited) { + btlog_remove_entries_for_element(vm_object_tracking_btlog, + backing_object); + } +#endif /* VM_OBJECT_TRACKING */ + vm_object_lock_destroy(backing_object); zfree(vm_object_zone, backing_object); - object_collapses++; } static void @@ -4754,7 +5474,19 @@ vm_object_do_bypass( vm_object_res_reference(backing_object); } #endif /* TASK_SWAPPER */ + /* + * vm_object_collapse (the caller of this function) is + * now called from contexts that may not guarantee that a + * valid reference is held on the object... w/o a valid + * reference, it is unsafe and unwise (you will definitely + * regret it) to unlock the object and then retake the lock + * since the object may be terminated and recycled in between. + * The "activity_in_progress" reference will keep the object + * 'stable'. + */ + vm_object_activity_begin(object); vm_object_unlock(object); + vm_object_unlock(backing_object); vm_object_deallocate(backing_object); @@ -4766,6 +5498,7 @@ vm_object_do_bypass( */ vm_object_lock(object); + vm_object_activity_end(object); } object_bypasses++; @@ -4786,7 +5519,7 @@ static unsigned long vm_object_collapse_calls = 0; static unsigned long vm_object_collapse_objects = 0; static unsigned long vm_object_collapse_do_collapse = 0; static unsigned long vm_object_collapse_do_bypass = 0; -static unsigned long vm_object_collapse_delays = 0; + __private_extern__ void vm_object_collapse( register vm_object_t object, @@ -4887,6 +5620,29 @@ retry: object_lock_type = backing_object_lock_type; continue; } + + /* + * Purgeable objects are not supposed to engage in + * copy-on-write activities, so should not have + * any shadow objects or be a shadow object to another + * object. + * Collapsing a purgeable object would require some + * updates to the purgeable compressed ledgers. + */ + if (object->purgable != VM_PURGABLE_DENY || + backing_object->purgable != VM_PURGABLE_DENY) { + panic("vm_object_collapse() attempting to collapse " + "purgeable object: %p(%d) %p(%d)\n", + object, object->purgable, + backing_object, backing_object->purgable); + /* try and collapse the rest of the shadow chain */ + if (object != original_object) { + vm_object_unlock(object); + } + object = backing_object; + object_lock_type = backing_object_lock_type; + continue; + } /* * The backing object can't be a copy-object: @@ -4930,9 +5686,10 @@ retry: * to the pager, we can collapse them. */ if (backing_object->ref_count == 1 && - (!object->pager_created + (vm_object_collapse_compressor_allowed || + !object->pager_created #if !MACH_PAGEMAP - || !backing_object->pager_created + || (!backing_object->pager_created) #endif /*!MACH_PAGEMAP */ ) && vm_object_collapse_allowed) { @@ -4999,13 +5756,13 @@ retry: * we have to make sure no pages in the backing object * "show through" before bypassing it. */ - size = atop(object->vo_size); + size = (unsigned int)atop(object->vo_size); rcount = object->resident_page_count; + if (rcount != size) { vm_object_offset_t offset; vm_object_offset_t backing_offset; unsigned int backing_rcount; - unsigned int lookups = 0; /* * If the backing object has a pager but no pagemap, @@ -5045,6 +5802,24 @@ retry: continue; } + backing_offset = object->vo_shadow_offset; + backing_rcount = backing_object->resident_page_count; + + if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) { + /* + * we have enough pages in the backing object to guarantee that + * at least 1 of them must be 'uncovered' by a resident page + * in the object we're evaluating, so move on and + * try to collapse the rest of the shadow chain + */ + if (object != original_object) { + vm_object_unlock(object); + } + object = backing_object; + object_lock_type = backing_object_lock_type; + continue; + } + /* * If all of the pages in the backing object are * shadowed by the parent object, the parent @@ -5058,17 +5833,19 @@ retry: * */ - backing_offset = object->vo_shadow_offset; - backing_rcount = backing_object->resident_page_count; - #if MACH_PAGEMAP #define EXISTS_IN_OBJECT(obj, off, rc) \ - (vm_external_state_get((obj)->existence_map, \ - (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \ - ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) -#else -#define EXISTS_IN_OBJECT(obj, off, rc) \ - (((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) + ((vm_external_state_get((obj)->existence_map, \ + (vm_offset_t)(off)) \ + == VM_EXTERNAL_STATE_EXISTS) || \ + (VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ + == VM_EXTERNAL_STATE_EXISTS) || \ + ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) +#else /* MACH_PAGEMAP */ +#define EXISTS_IN_OBJECT(obj, off, rc) \ + ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ + == VM_EXTERNAL_STATE_EXISTS) || \ + ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) #endif /* MACH_PAGEMAP */ /* @@ -5101,36 +5878,23 @@ retry: * pages in the backing object, it makes sense to * walk the backing_object's resident pages first. * - * NOTE: Pages may be in both the existence map and - * resident. So, we can't permanently decrement - * the rcount here because the second loop may - * find the same pages in the backing object' - * existence map that we found here and we would - * double-decrement the rcount. We also may or - * may not have found the + * NOTE: Pages may be in both the existence map and/or + * resident, so if we don't find a dependency while + * walking the backing object's resident page list + * directly, and there is an existence map, we'll have + * to run the offset based 2nd pass. Because we may + * have to run both passes, we need to be careful + * not to decrement 'rcount' in the 1st pass */ - if (backing_rcount && -#if MACH_PAGEMAP - size > ((backing_object->existence_map) ? - backing_rcount : (backing_rcount >> 1)) -#else - size > (backing_rcount >> 1) -#endif /* MACH_PAGEMAP */ - ) { + if (backing_rcount && backing_rcount < (size / 8)) { unsigned int rc = rcount; vm_page_t p; backing_rcount = backing_object->resident_page_count; p = (vm_page_t)queue_first(&backing_object->memq); do { - /* Until we get more than one lookup lock */ - if (lookups > 256) { - vm_object_collapse_delays++; - lookups = 0; - mutex_pause(0); - } - offset = (p->offset - backing_offset); + if (offset < object->vo_size && offset != hint_offset && !EXISTS_IN_OBJECT(object, offset, rc)) { @@ -5168,13 +5932,6 @@ retry: (offset + PAGE_SIZE_64 < object->vo_size) ? (offset + PAGE_SIZE_64) : 0) != hint_offset) { - /* Until we get more than one lookup lock */ - if (lookups > 256) { - vm_object_collapse_delays++; - lookups = 0; - mutex_pause(0); - } - if (EXISTS_IN_OBJECT(backing_object, offset + backing_offset, backing_rcount) && !EXISTS_IN_OBJECT(object, offset, rcount)) { @@ -5226,9 +5983,12 @@ retry: continue; } + /* NOT REACHED */ + /* if (object != original_object) { vm_object_unlock(object); } + */ } /* @@ -5265,7 +6025,7 @@ vm_object_page_remove( for (; start < end; start += PAGE_SIZE_64) { p = vm_page_lookup(object, start); if (p != VM_PAGE_NULL) { - assert(!p->cleaning && !p->pageout); + assert(!p->cleaning && !p->pageout && !p->laundry); if (!p->fictitious && p->pmapped) pmap_disconnect(p->phys_page); VM_PAGE_FREE(p); @@ -5278,7 +6038,7 @@ vm_object_page_remove( while (!queue_end(&object->memq, (queue_entry_t) p)) { next = (vm_page_t) queue_next(&p->listq); if ((start <= p->offset) && (p->offset < end)) { - assert(!p->cleaning && !p->pageout); + assert(!p->cleaning && !p->pageout && !p->laundry); if (!p->fictitious && p->pmapped) pmap_disconnect(p->phys_page); VM_PAGE_FREE(p); @@ -5405,464 +6165,83 @@ vm_object_coalesce( return(TRUE); } -/* - * Attach a set of physical pages to an object, so that they can - * be mapped by mapping the object. Typically used to map IO memory. - * - * The mapping function and its private data are used to obtain the - * physical addresses for each page to be mapped. - */ -void -vm_object_page_map( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - vm_object_offset_t (*map_fn)(void *map_fn_data, - vm_object_offset_t offset), - void *map_fn_data) /* private to map_fn */ +kern_return_t +vm_object_populate_with_private( + vm_object_t object, + vm_object_offset_t offset, + ppnum_t phys_page, + vm_size_t size) { - int64_t num_pages; - int i; - vm_page_t m; - vm_page_t old_page; - vm_object_offset_t addr; - - num_pages = atop_64(size); + ppnum_t base_page; + vm_object_offset_t base_offset; - for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) { - addr = (*map_fn)(map_fn_data, offset); + if (!object->private) + return KERN_FAILURE; - while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) - vm_page_more_fictitious(); + base_page = phys_page; - vm_object_lock(object); - if ((old_page = vm_page_lookup(object, offset)) - != VM_PAGE_NULL) - { - VM_PAGE_FREE(old_page); - } + vm_object_lock(object); - assert((ppnum_t) addr == addr); - vm_page_init(m, (ppnum_t) addr, FALSE); - /* - * private normally requires lock_queues but since we - * are initializing the page, its not necessary here - */ - m->private = TRUE; /* don`t free page */ - m->wire_count = 1; - vm_page_insert(m, object, offset); + if (!object->phys_contiguous) { + vm_page_t m; - PAGE_WAKEUP_DONE(m); - vm_object_unlock(object); - } -} + if ((base_offset = trunc_page_64(offset)) != offset) { + vm_object_unlock(object); + return KERN_FAILURE; + } + base_offset += object->paging_offset; -#include + while (size) { + m = vm_page_lookup(object, base_offset); -#if MACH_KDB -#include -#include + if (m != VM_PAGE_NULL) { + if (m->fictitious) { + if (m->phys_page != vm_page_guard_addr) { -#define printf kdbprintf + vm_page_lockspin_queues(); + m->private = TRUE; + vm_page_unlock_queues(); -extern boolean_t vm_object_cached( - vm_object_t object); + m->fictitious = FALSE; + m->phys_page = base_page; + } + } else if (m->phys_page != base_page) { -extern void print_bitstring( - char byte); + if ( !m->private) { + /* + * we'd leak a real page... that can't be right + */ + panic("vm_object_populate_with_private - %p not private", m); + } + if (m->pmapped) { + /* + * pmap call to clear old mapping + */ + pmap_disconnect(m->phys_page); + } + m->phys_page = base_page; + } + if (m->encrypted) { + /* + * we should never see this on a ficticious or private page + */ + panic("vm_object_populate_with_private - %p encrypted", m); + } -boolean_t vm_object_print_pages = FALSE; + } else { + while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) + vm_page_more_fictitious(); -void -print_bitstring( - char byte) -{ - printf("%c%c%c%c%c%c%c%c", - ((byte & (1 << 0)) ? '1' : '0'), - ((byte & (1 << 1)) ? '1' : '0'), - ((byte & (1 << 2)) ? '1' : '0'), - ((byte & (1 << 3)) ? '1' : '0'), - ((byte & (1 << 4)) ? '1' : '0'), - ((byte & (1 << 5)) ? '1' : '0'), - ((byte & (1 << 6)) ? '1' : '0'), - ((byte & (1 << 7)) ? '1' : '0')); -} - -boolean_t -vm_object_cached( - __unused register vm_object_t object) -{ -#if VM_OBJECT_CACHE - register vm_object_t o; - - queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) { - if (object == o) { - return TRUE; - } - } -#endif - return FALSE; -} - -#if MACH_PAGEMAP -/* - * vm_external_print: [ debug ] - */ -void -vm_external_print( - vm_external_map_t emap, - vm_object_size_t size) -{ - if (emap == VM_EXTERNAL_NULL) { - printf("0 "); - } else { - vm_object_size_t existence_size = stob(size); - printf("{ size=%lld, map=[", (uint64_t) existence_size); - if (existence_size > 0) { - print_bitstring(emap[0]); - } - if (existence_size > 1) { - print_bitstring(emap[1]); - } - if (existence_size > 2) { - printf("..."); - print_bitstring(emap[existence_size-1]); - } - printf("] }\n"); - } - return; -} -#endif /* MACH_PAGEMAP */ - -int -vm_follow_object( - vm_object_t object) -{ - int count = 0; - int orig_db_indent = db_indent; - - while (TRUE) { - if (object == VM_OBJECT_NULL) { - db_indent = orig_db_indent; - return count; - } - - count += 1; - - iprintf("object 0x%x", object); - printf(", shadow=0x%x", object->shadow); - printf(", copy=0x%x", object->copy); - printf(", pager=0x%x", object->pager); - printf(", ref=%d\n", object->ref_count); - - db_indent += 2; - object = object->shadow; - } - -} - -/* - * vm_object_print: [ debug ] - */ -void -vm_object_print(db_expr_t db_addr, __unused boolean_t have_addr, - __unused db_expr_t arg_count, __unused char *modif) -{ - vm_object_t object; - register vm_page_t p; - const char *s; - - register int count; - - object = (vm_object_t) (long) db_addr; - if (object == VM_OBJECT_NULL) - return; - - iprintf("object 0x%x\n", object); - - db_indent += 2; - - iprintf("size=0x%x", object->vo_size); - printf(", memq_hint=%p", object->memq_hint); - printf(", ref_count=%d\n", object->ref_count); - iprintf(""); -#if TASK_SWAPPER - printf("res_count=%d, ", object->res_count); -#endif /* TASK_SWAPPER */ - printf("resident_page_count=%d\n", object->resident_page_count); - - iprintf("shadow=0x%x", object->shadow); - if (object->shadow) { - register int i = 0; - vm_object_t shadow = object; - while((shadow = shadow->shadow)) - i++; - printf(" (depth %d)", i); - } - printf(", copy=0x%x", object->copy); - printf(", shadow_offset=0x%x", object->vo_shadow_offset); - printf(", last_alloc=0x%x\n", object->last_alloc); - - iprintf("pager=0x%x", object->pager); - printf(", paging_offset=0x%x", object->paging_offset); - printf(", pager_control=0x%x\n", object->pager_control); - - iprintf("copy_strategy=%d[", object->copy_strategy); - switch (object->copy_strategy) { - case MEMORY_OBJECT_COPY_NONE: - printf("copy_none"); - break; - - case MEMORY_OBJECT_COPY_CALL: - printf("copy_call"); - break; - - case MEMORY_OBJECT_COPY_DELAY: - printf("copy_delay"); - break; - - case MEMORY_OBJECT_COPY_SYMMETRIC: - printf("copy_symmetric"); - break; - - case MEMORY_OBJECT_COPY_INVALID: - printf("copy_invalid"); - break; - - default: - printf("?"); - } - printf("]"); - - iprintf("all_wanted=0x%x<", object->all_wanted); - s = ""; - if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) { - printf("%sinit", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) { - printf("%sready", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) { - printf("%spaging", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) { - printf("%slock", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) { - printf("%suncaching", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) { - printf("%scopy_call", s); - s = ","; - } - if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) { - printf("%scaching", s); - s = ","; - } - printf(">"); - printf(", paging_in_progress=%d\n", object->paging_in_progress); - printf(", activity_in_progress=%d\n", object->activity_in_progress); - - iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n", - (object->pager_created ? "" : "!"), - (object->pager_initialized ? "" : "!"), - (object->pager_ready ? "" : "!"), - (object->can_persist ? "" : "!"), - (object->pager_trusted ? "" : "!"), - (object->pageout ? "" : "!"), - (object->internal ? "internal" : "external"), - (object->temporary ? "temporary" : "permanent")); - iprintf("%salive, %spurgeable, %spurgeable_volatile, %spurgeable_empty, %sshadowed, %scached, %sprivate\n", - (object->alive ? "" : "!"), - ((object->purgable != VM_PURGABLE_DENY) ? "" : "!"), - ((object->purgable == VM_PURGABLE_VOLATILE) ? "" : "!"), - ((object->purgable == VM_PURGABLE_EMPTY) ? "" : "!"), - (object->shadowed ? "" : "!"), - (vm_object_cached(object) ? "" : "!"), - (object->private ? "" : "!")); - iprintf("%sadvisory_pageout, %ssilent_overwrite\n", - (object->advisory_pageout ? "" : "!"), - (object->silent_overwrite ? "" : "!")); - -#if MACH_PAGEMAP - iprintf("existence_map="); - vm_external_print(object->existence_map, object->vo_size); -#endif /* MACH_PAGEMAP */ -#if MACH_ASSERT - iprintf("paging_object=0x%x\n", object->paging_object); -#endif /* MACH_ASSERT */ - - if (vm_object_print_pages) { - count = 0; - p = (vm_page_t) queue_first(&object->memq); - while (!queue_end(&object->memq, (queue_entry_t) p)) { - if (count == 0) { - iprintf("memory:="); - } else if (count == 2) { - printf("\n"); - iprintf(" ..."); - count = 0; - } else { - printf(","); - } - count++; - - printf("(off=0x%llX,page=%p)", p->offset, p); - p = (vm_page_t) queue_next(&p->listq); - } - if (count != 0) { - printf("\n"); - } - } - db_indent -= 2; -} - - -/* - * vm_object_find [ debug ] - * - * Find all tasks which reference the given vm_object. - */ - -boolean_t vm_object_find(vm_object_t object); -boolean_t vm_object_print_verbose = FALSE; - -boolean_t -vm_object_find( - vm_object_t object) -{ - task_t task; - vm_map_t map; - vm_map_entry_t entry; - boolean_t found = FALSE; - - queue_iterate(&tasks, task, task_t, tasks) { - map = task->map; - for (entry = vm_map_first_entry(map); - entry && entry != vm_map_to_entry(map); - entry = entry->vme_next) { - - vm_object_t obj; - - /* - * For the time being skip submaps, - * only the kernel can have submaps, - * and unless we are interested in - * kernel objects, we can simply skip - * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm - * for a full solution. - */ - if (entry->is_sub_map) - continue; - if (entry) - obj = entry->object.vm_object; - else - continue; - - while (obj != VM_OBJECT_NULL) { - if (obj == object) { - if (!found) { - printf("TASK\t\tMAP\t\tENTRY\n"); - found = TRUE; - } - printf("0x%x\t0x%x\t0x%x\n", - task, map, entry); - } - obj = obj->shadow; - } - } - } - - return(found); -} - -#endif /* MACH_KDB */ - -kern_return_t -vm_object_populate_with_private( - vm_object_t object, - vm_object_offset_t offset, - ppnum_t phys_page, - vm_size_t size) -{ - ppnum_t base_page; - vm_object_offset_t base_offset; - - - if(!object->private) - return KERN_FAILURE; - - base_page = phys_page; - - vm_object_lock(object); - if(!object->phys_contiguous) { - vm_page_t m; - if((base_offset = trunc_page_64(offset)) != offset) { - vm_object_unlock(object); - return KERN_FAILURE; - } - base_offset += object->paging_offset; - while(size) { - m = vm_page_lookup(object, base_offset); - if(m != VM_PAGE_NULL) { - if(m->fictitious) { - if (m->phys_page != vm_page_guard_addr) { - - vm_page_lockspin_queues(); - m->private = TRUE; - vm_page_unlock_queues(); - - m->fictitious = FALSE; - m->phys_page = base_page; - if(!m->busy) { - m->busy = TRUE; - } - if(!m->absent) { - m->absent = TRUE; - } - m->list_req_pending = TRUE; - } - } else if (m->phys_page != base_page) { - if (m->pmapped) { - /* - * pmap call to clear old mapping - */ - pmap_disconnect(m->phys_page); - } - m->phys_page = base_page; - } - - /* - * ENCRYPTED SWAP: - * We're not pointing to the same - * physical page any longer and the - * contents of the new one are not - * supposed to be encrypted. - * XXX What happens to the original - * physical page. Is it lost ? - */ - m->encrypted = FALSE; - - } else { - while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) - vm_page_more_fictitious(); - - /* - * private normally requires lock_queues but since we - * are initializing the page, its not necessary here - */ - m->private = TRUE; - m->fictitious = FALSE; - m->phys_page = base_page; - m->list_req_pending = TRUE; - m->absent = TRUE; - m->unusual = TRUE; + /* + * private normally requires lock_queues but since we + * are initializing the page, its not necessary here + */ + m->private = TRUE; + m->fictitious = FALSE; + m->phys_page = base_page; + m->unusual = TRUE; + m->busy = FALSE; vm_page_insert(m, object, base_offset); } @@ -5883,6 +6262,7 @@ vm_object_populate_with_private( object->vo_size = size; } vm_object_unlock(object); + return KERN_SUCCESS; } @@ -6250,7 +6630,7 @@ vm_object_lock_request( * purgeable with no delayed copies pending. */ void -vm_object_purge(vm_object_t object) +vm_object_purge(vm_object_t object, int flags) { vm_object_lock_assert_exclusive(object); @@ -6260,7 +6640,22 @@ vm_object_purge(vm_object_t object) assert(object->copy == VM_OBJECT_NULL); assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); - if(object->purgable == VM_PURGABLE_VOLATILE) { + /* + * We need to set the object's state to VM_PURGABLE_EMPTY *before* + * reaping its pages. We update vm_page_purgeable_count in bulk + * and we don't want vm_page_remove() to update it again for each + * page we reap later. + * + * For the purgeable ledgers, pages from VOLATILE and EMPTY objects + * are all accounted for in the "volatile" ledgers, so this does not + * make any difference. + * If we transitioned directly from NONVOLATILE to EMPTY, + * vm_page_purgeable_count must have been updated when the object + * was dequeued from its volatile queue and the purgeable ledgers + * must have also been updated accordingly at that time (in + * vm_object_purgable_control()). + */ + if (object->purgable == VM_PURGABLE_VOLATILE) { unsigned int delta; assert(object->resident_page_count >= object->wired_page_count); @@ -6278,10 +6673,63 @@ vm_object_purge(vm_object_t object) OSAddAtomic(-object->wired_page_count, (SInt32 *)&vm_page_purgeable_wired_count); } + object->purgable = VM_PURGABLE_EMPTY; } - object->purgable = VM_PURGABLE_EMPTY; + assert(object->purgable == VM_PURGABLE_EMPTY); vm_object_reap_pages(object, REAP_PURGEABLE); + + if (object->pager != NULL && + COMPRESSED_PAGER_IS_ACTIVE) { + unsigned int pgcount; + + if (object->activity_in_progress == 0 && + object->paging_in_progress == 0) { + /* + * Also reap any memory coming from this object + * in the VM compressor. + * + * There are no operations in progress on the VM object + * and no operation can start while we're holding the + * VM object lock, so it's safe to reap the compressed + * pages and update the page counts. + */ + pgcount = vm_compressor_pager_get_count(object->pager); + if (pgcount) { + pgcount = vm_compressor_pager_reap_pages(object->pager, flags); + vm_compressor_pager_count(object->pager, + -pgcount, + FALSE, /* shared */ + object); + vm_purgeable_compressed_update(object, + -pgcount); + } + if ( !(flags & C_DONT_BLOCK)) { + assert(vm_compressor_pager_get_count(object->pager) + == 0); + } + } else { + /* + * There's some kind of paging activity in progress + * for this object, which could result in a page + * being compressed or decompressed, possibly while + * the VM object is not locked, so it could race + * with us. + * + * We can't really synchronize this without possibly + * causing a deadlock when the compressor needs to + * allocate or free memory while compressing or + * decompressing a page from a purgeable object + * mapped in the kernel_map... + * + * So let's not attempt to purge the compressor + * pager if there's any kind of operation in + * progress on the VM object. + */ + } + } + + vm_object_lock_assert_exclusive(object); } @@ -6376,6 +6824,8 @@ vm_object_purgable_control( return KERN_INVALID_ARGUMENT; } + vm_object_lock_assert_exclusive(object); + /* * Get current state of the purgeable object. */ @@ -6436,15 +6886,35 @@ vm_object_purgable_control( vm_page_lock_queues(); - assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ - purgeable_q_t queue = vm_purgeable_object_remove(object); + /* object should be on a queue */ + assert(object->objq.next != NULL && + object->objq.prev != NULL); + purgeable_q_t queue; + + /* + * Move object from its volatile queue to the + * non-volatile queue... + */ + queue = vm_purgeable_object_remove(object); assert(queue); - vm_purgeable_token_delete_first(queue); + if (object->purgeable_when_ripe) { + vm_purgeable_token_delete_last(queue); + } assert(queue->debug_count_objects>=0); vm_page_unlock_queues(); } + if (old_state == VM_PURGABLE_VOLATILE || + old_state == VM_PURGABLE_EMPTY) { + /* + * Transfer the object's pages from the volatile to + * non-volatile ledgers. + */ + vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE, + FALSE); + } + break; case VM_PURGABLE_VOLATILE: @@ -6461,13 +6931,14 @@ vm_object_purgable_control( refmod = pmap_disconnect(p->phys_page); if ((refmod & VM_MEM_MODIFIED) && !p->dirty) { - p->dirty = TRUE; + SET_PAGE_DIRTY(p, FALSE); } } } if (old_state == VM_PURGABLE_EMPTY && - object->resident_page_count == 0) + object->resident_page_count == 0 && + object->pager == NULL) break; purgeable_q_t queue; @@ -6486,15 +6957,26 @@ vm_object_purgable_control( old_state == VM_PURGABLE_EMPTY) { unsigned int delta; - /* try to add token... this can fail */ - vm_page_lock_queues(); + if ((*state & VM_PURGABLE_NO_AGING_MASK) == + VM_PURGABLE_NO_AGING) { + object->purgeable_when_ripe = FALSE; + } else { + object->purgeable_when_ripe = TRUE; + } + + if (object->purgeable_when_ripe) { + kern_return_t result; + + /* try to add token... this can fail */ + vm_page_lock_queues(); - kern_return_t result = vm_purgeable_token_add(queue); - if (result != KERN_SUCCESS) { - vm_page_unlock_queues(); - return result; + result = vm_purgeable_token_add(queue); + if (result != KERN_SUCCESS) { + vm_page_unlock_queues(); + return result; + } + vm_page_unlock_queues(); } - vm_page_unlock_queues(); assert(object->resident_page_count >= object->wired_page_count); @@ -6512,10 +6994,14 @@ vm_object_purgable_control( object->purgable = new_state; - /* object should not be on a queue */ - assert(object->objq.next == NULL && object->objq.prev == NULL); + /* object should be on "non-volatile" queue */ + assert(object->objq.next != NULL); + assert(object->objq.prev != NULL); } else if (old_state == VM_PURGABLE_VOLATILE) { + purgeable_q_t old_queue; + boolean_t purgeable_when_ripe; + /* * if reassigning priorities / purgeable groups, we don't change the * token queue. So moving priorities will not make pages stay around longer. @@ -6526,22 +7012,40 @@ vm_object_purgable_control( */ assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ - purgeable_q_t old_queue=vm_purgeable_object_remove(object); + old_queue = vm_purgeable_object_remove(object); assert(old_queue); - if (old_queue != queue) { + if ((*state & VM_PURGABLE_NO_AGING_MASK) == + VM_PURGABLE_NO_AGING) { + purgeable_when_ripe = FALSE; + } else { + purgeable_when_ripe = TRUE; + } + + if (old_queue != queue || + (purgeable_when_ripe != + object->purgeable_when_ripe)) { kern_return_t result; /* Changing queue. Have to move token. */ vm_page_lock_queues(); - vm_purgeable_token_delete_first(old_queue); - result = vm_purgeable_token_add(queue); + if (object->purgeable_when_ripe) { + vm_purgeable_token_delete_last(old_queue); + } + object->purgeable_when_ripe = purgeable_when_ripe; + if (object->purgeable_when_ripe) { + result = vm_purgeable_token_add(queue); + assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */ + } vm_page_unlock_queues(); - assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */ } }; vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT ); + if (old_state == VM_PURGABLE_NONVOLATILE) { + vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE, + FALSE); + } assert(queue->debug_count_objects>=0); @@ -6562,61 +7066,186 @@ vm_object_purgable_control( refmod = pmap_disconnect(p->phys_page); if ((refmod & VM_MEM_MODIFIED) && !p->dirty) { - p->dirty = TRUE; + SET_PAGE_DIRTY(p, FALSE); } } } - if (old_state != new_state) { - assert(old_state == VM_PURGABLE_NONVOLATILE || - old_state == VM_PURGABLE_VOLATILE); - if (old_state == VM_PURGABLE_VOLATILE) { - purgeable_q_t old_queue; + if (old_state == new_state) { + /* nothing changes */ + break; + } + + assert(old_state == VM_PURGABLE_NONVOLATILE || + old_state == VM_PURGABLE_VOLATILE); + if (old_state == VM_PURGABLE_VOLATILE) { + purgeable_q_t old_queue; + + /* object should be on a queue */ + assert(object->objq.next != NULL && + object->objq.prev != NULL); - /* object should be on a queue */ - assert(object->objq.next != NULL && - object->objq.prev != NULL); - old_queue = vm_purgeable_object_remove(object); - assert(old_queue); + old_queue = vm_purgeable_object_remove(object); + assert(old_queue); + if (object->purgeable_when_ripe) { vm_page_lock_queues(); vm_purgeable_token_delete_first(old_queue); vm_page_unlock_queues(); } - (void) vm_object_purge(object); } - break; + if (old_state == VM_PURGABLE_NONVOLATILE) { + /* + * This object's pages were previously accounted as + * "non-volatile" and now need to be accounted as + * "volatile". + */ + vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE, + FALSE); + /* + * Set to VM_PURGABLE_EMPTY because the pages are no + * longer accounted in the "non-volatile" ledger + * and are also not accounted for in + * "vm_page_purgeable_count". + */ + object->purgable = VM_PURGABLE_EMPTY; + } + + (void) vm_object_purge(object, 0); + assert(object->purgable == VM_PURGABLE_EMPTY); + + break; } + *state = old_state; + vm_object_lock_assert_exclusive(object); + return KERN_SUCCESS; } -#if TASK_SWAPPER -/* - * vm_object_res_deallocate - * - * (recursively) decrement residence counts on vm objects and their shadows. - * Called from vm_object_deallocate and when swapping out an object. - * - * The object is locked, and remains locked throughout the function, - * even as we iterate down the shadow chain. Locks on intermediate objects - * will be dropped, but not the original object. - * - * NOTE: this function used to use recursion, rather than iteration. - */ - -__private_extern__ void -vm_object_res_deallocate( - vm_object_t object) +kern_return_t +vm_object_get_page_counts( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + unsigned int *resident_page_count, + unsigned int *dirty_page_count) { - vm_object_t orig_object = object; - /* - * Object is locked so it can be called directly - * from vm_object_deallocate. Original object is never - * unlocked. - */ - assert(object->res_count > 0); + + kern_return_t kr = KERN_SUCCESS; + boolean_t count_dirty_pages = FALSE; + vm_page_t p = VM_PAGE_NULL; + unsigned int local_resident_count = 0; + unsigned int local_dirty_count = 0; + vm_object_offset_t cur_offset = 0; + vm_object_offset_t end_offset = 0; + + if (object == VM_OBJECT_NULL) + return KERN_INVALID_ARGUMENT; + + + cur_offset = offset; + + end_offset = offset + size; + + vm_object_lock_assert_exclusive(object); + + if (dirty_page_count != NULL) { + + count_dirty_pages = TRUE; + } + + if (resident_page_count != NULL && count_dirty_pages == FALSE) { + /* + * Fast path when: + * - we only want the resident page count, and, + * - the entire object is exactly covered by the request. + */ + if (offset == 0 && (object->vo_size == size)) { + + *resident_page_count = object->resident_page_count; + goto out; + } + } + + if (object->resident_page_count <= (size >> PAGE_SHIFT)) { + + queue_iterate(&object->memq, p, vm_page_t, listq) { + + if (p->offset >= cur_offset && p->offset < end_offset) { + + local_resident_count++; + + if (count_dirty_pages) { + + if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) { + + local_dirty_count++; + } + } + } + } + } else { + + for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) { + + p = vm_page_lookup(object, cur_offset); + + if (p != VM_PAGE_NULL) { + + local_resident_count++; + + if (count_dirty_pages) { + + if (p->dirty || (p->wpmapped && pmap_is_modified(p->phys_page))) { + + local_dirty_count++; + } + } + } + } + + } + + if (resident_page_count != NULL) { + *resident_page_count = local_resident_count; + } + + if (dirty_page_count != NULL) { + *dirty_page_count = local_dirty_count; + } + +out: + return kr; +} + + +#if TASK_SWAPPER +/* + * vm_object_res_deallocate + * + * (recursively) decrement residence counts on vm objects and their shadows. + * Called from vm_object_deallocate and when swapping out an object. + * + * The object is locked, and remains locked throughout the function, + * even as we iterate down the shadow chain. Locks on intermediate objects + * will be dropped, but not the original object. + * + * NOTE: this function used to use recursion, rather than iteration. + */ + +__private_extern__ void +vm_object_res_deallocate( + vm_object_t object) +{ + vm_object_t orig_object = object; + /* + * Object is locked so it can be called directly + * from vm_object_deallocate. Original object is never + * unlocked. + */ + assert(object->res_count > 0); while (--object->res_count == 0) { assert(object->ref_count >= object->res_count); vm_object_deactivate_all_pages(object); @@ -6879,7 +7508,7 @@ vm_object_transpose( vm_page_rename(page, object1, page->offset, FALSE); } assert(queue_empty(&object2->memq)); - /* transfer tmp_object's pages to object1 */ + /* transfer tmp_object's pages to object2 */ while (!queue_empty(&tmp_object->memq)) { page = (vm_page_t) queue_first(&tmp_object->memq); queue_remove(&tmp_object->memq, page, @@ -6949,7 +7578,7 @@ MACRO_END assert(object1->purgable == VM_PURGABLE_DENY); assert(object2->purgable == VM_PURGABLE_DENY); /* "shadowed" refers to the the object not its contents */ - __TRANSPOSE_FIELD(silent_overwrite); + __TRANSPOSE_FIELD(purgeable_when_ripe); __TRANSPOSE_FIELD(advisory_pageout); __TRANSPOSE_FIELD(true_share); /* "terminating" should not be set */ @@ -7011,10 +7640,10 @@ MACRO_END #if UPL_DEBUG /* "uplq" refers to the object not its contents (see upl_transpose()) */ #endif - assert(object1->objq.next == NULL); - assert(object1->objq.prev == NULL); - assert(object2->objq.next == NULL); - assert(object2->objq.prev == NULL); + assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL)); + assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL)); + assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL)); + assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL)); #undef __TRANSPOSE_FIELD @@ -7066,16 +7695,8 @@ done: extern int speculative_reads_disabled; extern int ignore_is_ssd; -#if CONFIG_EMBEDDED -unsigned int preheat_pages_max = MAX_UPL_TRANSFER; -unsigned int preheat_pages_min = 8; -#else -unsigned int preheat_pages_max = MAX_UPL_TRANSFER; -unsigned int preheat_pages_min = 8; -#endif - -uint32_t pre_heat_scaling[MAX_UPL_TRANSFER + 1]; -uint32_t pre_heat_cluster[MAX_UPL_TRANSFER + 1]; +unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; +unsigned int preheat_min_bytes = (1024 * 32); __private_extern__ void @@ -7098,9 +7719,8 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, uint32_t throttle_limit; int sequential_run; int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - unsigned int max_ph_size; - unsigned int min_ph_size; - unsigned int min_ph_size_in_bytes; + vm_size_t max_ph_size; + vm_size_t min_ph_size; assert( !(*length & PAGE_MASK)); assert( !(*start & PAGE_MASK_64)); @@ -7136,29 +7756,27 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, if (!ignore_is_ssd) vnode_pager_get_isSSD(object->pager, &isSSD); - min_ph_size = preheat_pages_min; - max_ph_size = preheat_pages_max; + min_ph_size = round_page(preheat_min_bytes); + max_ph_size = round_page(preheat_max_bytes); if (isSSD) { min_ph_size /= 2; max_ph_size /= 8; } - if (min_ph_size < 1) - min_ph_size = 1; + if (min_ph_size < PAGE_SIZE) + min_ph_size = PAGE_SIZE; - if (max_ph_size < 1) - max_ph_size = 1; - else if (max_ph_size > MAX_UPL_TRANSFER) - max_ph_size = MAX_UPL_TRANSFER; + if (max_ph_size < PAGE_SIZE) + max_ph_size = PAGE_SIZE; + else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) + max_ph_size = MAX_UPL_TRANSFER_BYTES; - if (max_length > (max_ph_size * PAGE_SIZE)) - max_length = max_ph_size * PAGE_SIZE; + if (max_length > max_ph_size) + max_length = max_ph_size; if (max_length <= PAGE_SIZE) goto out; - min_ph_size_in_bytes = min_ph_size * PAGE_SIZE; - if (object->internal) object_size = object->vo_size; else @@ -7210,11 +7828,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, *io_streaming = 1; } else { - if (object->pages_created < (20 * min_ph_size)) { + if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) { /* * prime the pump */ - pre_heat_size = min_ph_size_in_bytes; + pre_heat_size = min_ph_size; } else { /* * Linear growth in PH size: The maximum size is max_length... @@ -7222,10 +7840,10 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, * power of 2 nor a multiple of PAGE_SIZE... so round * it up to the nearest PAGE_SIZE boundary */ - pre_heat_size = (max_length * object->pages_used) / object->pages_created; - - if (pre_heat_size < min_ph_size_in_bytes) - pre_heat_size = min_ph_size_in_bytes; + pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created; + + if (pre_heat_size < min_ph_size) + pre_heat_size = min_ph_size; else pre_heat_size = round_page(pre_heat_size); } @@ -7257,21 +7875,25 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, throttle_limit = (uint32_t) max_length; assert(throttle_limit == max_length); - if (vnode_pager_check_hard_throttle(object->pager, &throttle_limit, *io_streaming) == KERN_SUCCESS) { + if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) { if (max_length > throttle_limit) max_length = throttle_limit; } if (pre_heat_size > max_length) pre_heat_size = max_length; - if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size_in_bytes)) { - if (vm_page_free_count < vm_page_throttle_limit) + if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) { + + unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count; + + if (consider_free < vm_page_throttle_limit) { pre_heat_size = trunc_page(pre_heat_size / 16); - else if (vm_page_free_count < vm_page_free_target) + } else if (consider_free < vm_page_free_target) { pre_heat_size = trunc_page(pre_heat_size / 4); - - if (pre_heat_size < min_ph_size_in_bytes) - pre_heat_size = min_ph_size_in_bytes; + } + + if (pre_heat_size < min_ph_size) + pre_heat_size = min_ph_size; } if (look_ahead == TRUE) { if (look_behind == TRUE) { @@ -7321,8 +7943,6 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, assert( !(target_start & PAGE_MASK_64)); assert( !(pre_heat_size & PAGE_MASK)); - pre_heat_scaling[pre_heat_size / PAGE_SIZE]++; - if (pre_heat_size <= PAGE_SIZE) goto out; @@ -7353,7 +7973,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, */ break; } -#endif +#endif /* MACH_PAGEMAP */ + if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) + == VM_EXTERNAL_STATE_ABSENT) { + break; + } if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { /* * don't bridge resident pages @@ -7385,7 +8009,10 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, */ break; } -#endif +#endif /* MACH_PAGEMAP */ + if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) { + break; + } if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { /* * don't bridge resident pages @@ -7399,9 +8026,9 @@ out: if (*length > max_length) *length = max_length; - pre_heat_cluster[*length / PAGE_SIZE]++; - vm_object_unlock(object); + + DTRACE_VM1(clustersize, vm_size_t, *length); } @@ -7488,7 +8115,9 @@ vm_object_page_op( /* if such violations occur we will assert sooner */ /* or later. */ assert(dst_page->busy || (ops & UPL_POP_BUSY)); - if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE; + if (ops & UPL_POP_DIRTY) { + SET_PAGE_DIRTY(dst_page, FALSE); + } if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE; if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE; if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE; @@ -7607,12 +8236,7 @@ vm_object_range_op( dst_page = vm_page_lookup(object, offset); if (dst_page != VM_PAGE_NULL) { if (ops & UPL_ROP_DUMP) { - if (dst_page->list_req_pending) { - /* - * This page isn't on a UPL yet. - * So it's safe to steal it here and dump it. - */ - } else if (dst_page->busy || dst_page->cleaning) { + if (dst_page->busy || dst_page->cleaning) { /* * someone else is playing with the * page, we will have to wait @@ -7626,13 +8250,20 @@ vm_object_range_op( */ continue; } + if (dst_page->laundry) { + dst_page->pageout = FALSE; + + vm_pageout_steal_laundry(dst_page, FALSE); + } if (dst_page->pmapped == TRUE) pmap_disconnect(dst_page->phys_page); VM_PAGE_FREE(dst_page); - } else if ((ops & UPL_ROP_ABSENT) && !dst_page->absent) - break; + } else if ((ops & UPL_ROP_ABSENT) + && (!dst_page->absent || dst_page->busy)) { + break; + } } else if (ops & UPL_ROP_PRESENT) break; @@ -7653,6 +8284,46 @@ vm_object_range_op( return KERN_SUCCESS; } +/* + * Used to point a pager directly to a range of memory (when the pager may be associated + * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently + * expect that the virtual address will denote the start of a range that is physically contiguous. + */ +kern_return_t pager_map_to_phys_contiguous( + memory_object_control_t object, + memory_object_offset_t offset, + addr64_t base_vaddr, + vm_size_t size) +{ + ppnum_t page_num; + boolean_t clobbered_private; + kern_return_t retval; + vm_object_t pager_object; + + page_num = pmap_find_phys(kernel_pmap, base_vaddr); + + if (!page_num) { + retval = KERN_FAILURE; + goto out; + } + + pager_object = memory_object_control_to_vm_object(object); + + if (!pager_object) { + retval = KERN_FAILURE; + goto out; + } + + clobbered_private = pager_object->private; + pager_object->private = TRUE; + retval = vm_object_populate_with_private(pager_object, offset, page_num, size); + + if (retval != KERN_SUCCESS) + pager_object->private = clobbered_private; + +out: + return retval; +} uint32_t scan_object_collision = 0; @@ -7744,19 +8415,15 @@ vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) #if CONFIG_FREEZE -__private_extern__ void default_freezer_pack_page(vm_page_t , vm_object_t , vm_object_offset_t, void**); -__private_extern__ void default_freezer_unpack(vm_object_t , void**); - kern_return_t vm_object_pack( - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - boolean_t *shared, - vm_object_t src_object, - vm_object_t compact_object, - void **table, - vm_object_offset_t *offset) + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + boolean_t *shared, + vm_object_t src_object, + struct default_freezer_handle *df_handle) { kern_return_t kr = KERN_SUCCESS; @@ -7773,24 +8440,35 @@ kern_return_t vm_object_pack( if (src_object->purgable == VM_PURGABLE_VOLATILE) { *purgeable_count = src_object->resident_page_count; - /* If the destination object is null, we're just walking the pages to discover how many can be hibernated */ - if (VM_OBJECT_NULL != compact_object) { + /* If the default freezer handle is null, we're just walking the pages to discover how many can be hibernated */ + if (df_handle != NULL) { purgeable_q_t queue; /* object should be on a queue */ assert(src_object->objq.next != NULL && src_object->objq.prev != NULL); + queue = vm_purgeable_object_remove(src_object); assert(queue); - vm_page_lock_queues(); - vm_purgeable_token_delete_first(queue); - vm_page_unlock_queues(); - vm_object_purge(src_object); + if (src_object->purgeable_when_ripe) { + vm_page_lock_queues(); + vm_purgeable_token_delete_first(queue); + vm_page_unlock_queues(); + } + + vm_object_purge(src_object, 0); + assert(src_object->purgable == VM_PURGABLE_EMPTY); + + /* + * This object was "volatile" so its pages must have + * already been accounted as "volatile": no change + * in accounting now that it's "empty". + */ } goto done; } if (src_object->ref_count == 1) { - vm_object_pack_pages(wired_count, clean_count, dirty_count, src_object, compact_object, table, offset); + vm_object_pack_pages(wired_count, clean_count, dirty_count, dirty_budget, src_object, df_handle); } else { if (src_object->internal) { *shared = TRUE; @@ -7805,34 +8483,27 @@ done: void vm_object_pack_pages( - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - vm_object_t src_object, - vm_object_t compact_object, - void **table, - vm_object_offset_t *offset) + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + vm_object_t src_object, + struct default_freezer_handle *df_handle) { vm_page_t p, next; next = (vm_page_t)queue_first(&src_object->memq); - /* Since this function is dual purpose in order that we can count - * the freezable pages as well as prepare them, assert that our - * arguments are sane. Gnarly, but avoids code duplication. - */ - if (VM_OBJECT_NULL == compact_object){ - assert(!table); - assert(!offset); - } else { - assert(table); - assert(offset); - } - while (!queue_end(&src_object->memq, (queue_entry_t)next)) { p = next; next = (vm_page_t)queue_next(&next->listq); + /* Finish up if we've hit our pageout limit */ + if (dirty_budget && (dirty_budget == *dirty_count)) { + break; + } + assert(!p->laundry); + if (p->fictitious || p->busy ) continue; @@ -7844,7 +8515,7 @@ vm_object_pack_pages( continue; } - if (VM_OBJECT_NULL == compact_object) { + if (df_handle == NULL) { if (p->dirty || pmap_is_modified(p->phys_page)) { (*dirty_count)++; } else { @@ -7854,14 +8525,7 @@ vm_object_pack_pages( } if (p->cleaning) { - p->busy = TRUE; p->pageout = TRUE; - p->dump_cleaning = TRUE; - - vm_page_lockspin_queues(); - vm_page_wire(p); - vm_page_unlock_queues(); - continue; } @@ -7869,16 +8533,12 @@ vm_object_pack_pages( int refmod_state; refmod_state = pmap_disconnect(p->phys_page); if (refmod_state & VM_MEM_MODIFIED) { - p->dirty = TRUE; + SET_PAGE_DIRTY(p, FALSE); } } if (p->dirty) { - p->busy = TRUE; - - default_freezer_pack_page(p, compact_object, *offset, table); - *offset += PAGE_SIZE; - + default_freezer_pack_page(p, df_handle); (*dirty_count)++; } else { @@ -7888,32 +8548,248 @@ vm_object_pack_pages( } } + +/* + * This routine does the "relocation" of previously + * compressed pages belonging to this object that are + * residing in a number of compressed segments into + * a set of compressed segments dedicated to hold + * compressed pages belonging to this object. + */ + +extern void *freezer_chead; +extern char *freezer_compressor_scratch_buf; +extern int c_freezer_compression_count; +extern AbsoluteTime c_freezer_last_yield_ts; + +#define MAX_FREE_BATCH 32 +#define FREEZER_DUTY_CYCLE_ON_MS 5 +#define FREEZER_DUTY_CYCLE_OFF_MS 5 + +static int c_freezer_should_yield(void); + + +static int +c_freezer_should_yield() +{ + AbsoluteTime cur_time; + uint64_t nsecs; + + assert(c_freezer_last_yield_ts); + clock_get_uptime(&cur_time); + + SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts); + absolutetime_to_nanoseconds(cur_time, &nsecs); + + if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) + return (1); + return (0); +} + + void -vm_object_pageout( +vm_object_compressed_freezer_done() +{ + vm_compressor_finished_filling(&freezer_chead); +} + + +void +vm_object_compressed_freezer_pageout( vm_object_t object) { - vm_page_t p, next; - - assert(object != VM_OBJECT_NULL ); - + vm_page_t p; + vm_page_t local_freeq = NULL; + int local_freed = 0; + kern_return_t retval = KERN_SUCCESS; + int obj_resident_page_count_snapshot = 0; + + assert(object != VM_OBJECT_NULL); + vm_object_lock(object); + + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + + if (!object->pager_initialized) { + + vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); + + if (!object->pager_initialized) + vm_object_compressor_pager_create(object); + } + + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + vm_object_unlock(object); + return; + } + } + + if (DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED) { + vm_object_offset_t curr_offset = 0; + + /* + * Go through the object and make sure that any + * previously compressed pages are relocated into + * a compressed segment associated with our "freezer_chead". + */ + while (curr_offset < object->vo_size) { + + curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset); - next = (vm_page_t)queue_first(&object->memq); + if (curr_offset == (vm_object_offset_t) -1) + break; + + retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead); + + if (retval != KERN_SUCCESS) + break; + + curr_offset += PAGE_SIZE_64; + } + } + + /* + * We can't hold the object lock while heading down into the compressed pager + * layer because we might need the kernel map lock down there to allocate new + * compressor data structures. And if this same object is mapped in the kernel + * and there's a fault on it, then that thread will want the object lock while + * holding the kernel map lock. + * + * Since we are going to drop/grab the object lock repeatedly, we must make sure + * we won't be stuck in an infinite loop if the same page(s) keep getting + * decompressed. So we grab a snapshot of the number of pages in the object and + * we won't process any more than that number of pages. + */ + + obj_resident_page_count_snapshot = object->resident_page_count; + + vm_object_activity_begin(object); + + while ((obj_resident_page_count_snapshot--) && !queue_empty(&object->memq)) { + + p = (vm_page_t)queue_first(&object->memq); + + KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0); - while (!queue_end(&object->memq, (queue_entry_t)next)) { - p = next; - next = (vm_page_t)queue_next(&next->listq); - - /* Throw to the pageout queue */ vm_page_lockspin_queues(); - VM_PAGE_QUEUES_REMOVE(p); - vm_pageout_cluster(p); + if (p->cleaning || p->fictitious || p->busy || p->absent || p->unusual || p->error || VM_PAGE_WIRED(p)) { + if (p->cleaning) + p->pageout = TRUE; + + vm_page_unlock_queues(); + + KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0); + + queue_remove(&object->memq, p, vm_page_t, listq); + queue_enter(&object->memq, p, vm_page_t, listq); + + continue; + } + + if (p->pmapped == TRUE) { + int refmod_state, pmap_flags; + + if (p->dirty || p->precious) { + pmap_flags = PMAP_OPTIONS_COMPRESSOR; + } else { + pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; + } + + refmod_state = pmap_disconnect_options(p->phys_page, pmap_flags, NULL); + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(p, FALSE); + } + } + + if (p->dirty == FALSE && p->precious == FALSE) { + /* + * Clean and non-precious page. + */ + vm_page_unlock_queues(); + VM_PAGE_FREE(p); + + KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0); + continue; + } + + if (p->laundry) { + p->pageout = FALSE; + vm_pageout_steal_laundry(p, TRUE); + } + vm_page_queues_remove(p); vm_page_unlock_queues(); + + + /* + * In case the compressor fails to compress this page, we need it at + * the back of the object memq so that we don't keep trying to process it. + * Make the move here while we have the object lock held. + */ + + queue_remove(&object->memq, p, vm_page_t, listq); + queue_enter(&object->memq, p, vm_page_t, listq); + + /* + * Grab an activity_in_progress here for vm_pageout_compress_page() to consume. + * + * Mark the page busy so no one messes with it while we have the object lock dropped. + */ + + p->busy = TRUE; + + vm_object_activity_begin(object); + + vm_object_unlock(object); + + /* + * arg3 == FALSE tells vm_pageout_compress_page that we don't hold the object lock and the pager may not be initialized. + */ + if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p, FALSE) == KERN_SUCCESS) { + /* + * page has already been un-tabled from the object via 'vm_page_remove' + */ + p->pageq.next = (queue_entry_t)local_freeq; + local_freeq = p; + local_freed++; + + if (local_freed >= MAX_FREE_BATCH) { + + vm_page_free_list(local_freeq, TRUE); + + local_freeq = NULL; + local_freed = 0; + } + c_freezer_compression_count++; + } + KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); + + if (local_freed == 0 && c_freezer_should_yield()) { + + thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); + clock_get_uptime(&c_freezer_last_yield_ts); + } + + vm_object_lock(object); + } + + if (local_freeq) { + vm_page_free_list(local_freeq, TRUE); + + local_freeq = NULL; + local_freed = 0; } + + vm_object_activity_end(object); vm_object_unlock(object); + + if (c_freezer_should_yield()) { + + thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); + clock_get_uptime(&c_freezer_last_yield_ts); + } } kern_return_t @@ -7949,26 +8825,381 @@ vm_object_pagein( return kr; } +#endif /* CONFIG_FREEZE */ + + +void +vm_object_pageout( + vm_object_t object) +{ + vm_page_t p, next; + struct vm_pageout_queue *iq; + boolean_t need_unlock = TRUE; + + iq = &vm_pageout_queue_internal; + + assert(object != VM_OBJECT_NULL ); + assert(!DEFAULT_PAGER_IS_ACTIVE && !DEFAULT_FREEZER_IS_ACTIVE); + + vm_object_lock(object); + + if (!object->internal || + object->terminating || + !object->alive) { + vm_object_unlock(object); + return; + } + + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + + if (!object->pager_initialized) { + + vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); + + if (!object->pager_initialized) + vm_object_compressor_pager_create(object); + } + + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + vm_object_unlock(object); + return; + } + } + +ReScan: + next = (vm_page_t)queue_first(&object->memq); + + while (!queue_end(&object->memq, (queue_entry_t)next)) { + p = next; + next = (vm_page_t)queue_next(&next->listq); + + if (!(p->active || p->inactive || p->speculative) || + p->encrypted_cleaning || + p->cleaning || + p->laundry || + p->pageout || + p->busy || + p->absent || + p->error || + p->fictitious || + VM_PAGE_WIRED(p)) { + /* + * Page is already being cleaned or can't be cleaned. + */ + continue; + } + + /* Throw to the pageout queue */ + + vm_page_lockspin_queues(); + need_unlock = TRUE; + + if (vm_compressor_low_on_space()) { + vm_page_unlock_queues(); + break; + } + + if (VM_PAGE_Q_THROTTLED(iq)) { + + iq->pgo_draining = TRUE; + + assert_wait((event_t) (&iq->pgo_laundry + 1), + THREAD_INTERRUPTIBLE); + vm_page_unlock_queues(); + vm_object_unlock(object); + + thread_block(THREAD_CONTINUE_NULL); + + vm_object_lock(object); + goto ReScan; + } + + assert(!p->fictitious); + assert(!p->busy); + assert(!p->absent); + assert(!p->unusual); + assert(!p->error); + assert(!VM_PAGE_WIRED(p)); + assert(!p->cleaning); + + if (p->pmapped == TRUE) { + int refmod_state; + int pmap_options; + + pmap_options = 0; + if (COMPRESSED_PAGER_IS_ACTIVE || + DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { + /* + * Tell pmap the page should be accounted + * for as "compressed" if it's been modified. + */ + pmap_options = + PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; + if (p->dirty || p->precious) { + /* + * We already know it's been modified, + * so tell pmap to account for it + * as "compressed". + */ + pmap_options = PMAP_OPTIONS_COMPRESSOR; + } + } + refmod_state = pmap_disconnect_options(p->phys_page, + pmap_options, + NULL); + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(p, FALSE); + } + } + + if (!p->dirty && !p->precious) { + vm_page_unlock_queues(); + VM_PAGE_FREE(p); + continue; + } + + vm_page_queues_remove(p); + if (vm_pageout_cluster(p, TRUE, FALSE, TRUE)) + need_unlock = FALSE; + + if (need_unlock == TRUE) + vm_page_unlock_queues(); + } + + vm_object_unlock(object); +} + +#if CONFIG_IOSCHED void -vm_object_unpack( - vm_object_t compact_object, - void **table) +vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio) { + io_reprioritize_req_t req; + struct vnode *devvp = NULL; + + if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) + return; + /* - * Future Work: - * Right now we treat the default freezer much like - * the default pager with respect to when it is - * created and terminated. - * But, in the future, we may want to terminate the - * default freezer at the very instant that an object - * has been completely re-filled with all it's previously - * paged-out pages. - * At that time we'll need to reset the object fields like - * "pager" and the associated "pager_{created,initialized,trusted}" - * fields right here. + * Create the request for I/O reprioritization. + * We use the noblock variant of zalloc because we're holding the object + * lock here and we could cause a deadlock in low memory conditions. */ - default_freezer_unpack(compact_object, table); + req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone); + if (req == NULL) + return; + req->blkno = blkno; + req->len = len; + req->priority = prio; + req->devvp = devvp; + + /* Insert request into the reprioritization list */ + IO_REPRIORITIZE_LIST_LOCK(); + queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); + IO_REPRIORITIZE_LIST_UNLOCK(); + + /* Wakeup reprioritize thread */ + IO_REPRIO_THREAD_WAKEUP(); + + return; +} + +void +vm_decmp_upl_reprioritize(upl_t upl, int prio) +{ + int offset; + vm_object_t object; + io_reprioritize_req_t req; + struct vnode *devvp = NULL; + uint64_t blkno; + uint32_t len; + upl_t io_upl; + uint64_t *io_upl_reprio_info; + int io_upl_size; + + if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) + return; + + /* + * We dont want to perform any allocations with the upl lock held since that might + * result in a deadlock. If the system is low on memory, the pageout thread would + * try to pageout stuff and might wait on this lock. If we are waiting for the memory to + * be freed up by the pageout thread, it would be a deadlock. + */ + + + /* First step is just to get the size of the upl to find out how big the reprio info is */ + if(!upl_try_lock(upl)) + return; + + if (upl->decmp_io_upl == NULL) { + /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ + upl_unlock(upl); + return; + } + + io_upl = upl->decmp_io_upl; + assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); + io_upl_size = io_upl->size; + upl_unlock(upl); + + /* Now perform the allocation */ + io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); + if (io_upl_reprio_info == NULL) + return; + + /* Now again take the lock, recheck the state and grab out the required info */ + if(!upl_try_lock(upl)) + goto out; + + if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) { + /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ + upl_unlock(upl); + goto out; + } + memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); + + /* Get the VM object for this UPL */ + if (io_upl->flags & UPL_SHADOWED) { + object = io_upl->map_object->shadow; + } else { + object = io_upl->map_object; + } + + /* Get the dev vnode ptr for this object */ + if(!object || !object->pager || + vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { + upl_unlock(upl); + goto out; + } + + upl_unlock(upl); + + /* Now we have all the information needed to do the expedite */ + + offset = 0; + while (offset < io_upl_size) { + blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK; + len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; + + /* + * This implementation may cause some spurious expedites due to the + * fact that we dont cleanup the blkno & len from the upl_reprio_info + * even after the I/O is complete. + */ + + if (blkno != 0 && len != 0) { + /* Create the request for I/O reprioritization */ + req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone); + assert(req != NULL); + req->blkno = blkno; + req->len = len; + req->priority = prio; + req->devvp = devvp; + + /* Insert request into the reprioritization list */ + IO_REPRIORITIZE_LIST_LOCK(); + queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); + IO_REPRIORITIZE_LIST_UNLOCK(); + + offset += len; + } else { + offset += PAGE_SIZE; + } + } + + /* Wakeup reprioritize thread */ + IO_REPRIO_THREAD_WAKEUP(); + +out: + kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); + return; } -#endif /* CONFIG_FREEZE */ +void +vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) +{ + upl_t upl; + upl_page_info_t *pl; + unsigned int i, num_pages; + int cur_tier; + + cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); + + /* + Scan through all UPLs associated with the object to find the + UPL containing the contended page. + */ + queue_iterate(&o->uplq, upl, upl_t, uplq) { + if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) + continue; + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + num_pages = (upl->size / PAGE_SIZE); + + /* + For each page in the UPL page list, see if it matches the contended + page and was issued as a low prio I/O. + */ + for(i=0; i < num_pages; i++) { + if(UPL_PAGE_PRESENT(pl,i) && m->phys_page == pl[i].phys_addr) { + if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl, upl->upl_priority, 0); + vm_decmp_upl_reprioritize(upl, cur_tier); + break; + } + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl->upl_reprio_info[i], upl->upl_priority, 0); + if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) + vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier); + break; + } + } + /* Check if we found any hits */ + if (i != num_pages) + break; + } + + return; +} + +wait_result_t +vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) +{ + wait_result_t ret; + + KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); + + if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) { + /* + Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. + */ + vm_page_handle_prio_inversion(o,m); + } + m->wanted = TRUE; + ret = thread_sleep_vm_object(o, m, interruptible); + KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0); + return ret; +} + +static void +io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) +{ + io_reprioritize_req_t req = NULL; + + while(1) { + + IO_REPRIORITIZE_LIST_LOCK(); + if (queue_empty(&io_reprioritize_list)) { + IO_REPRIORITIZE_LIST_UNLOCK(); + break; + } + + queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); + IO_REPRIORITIZE_LIST_UNLOCK(); + + vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority); + zfree(io_reprioritize_req_zone, req); + } + + IO_REPRIO_THREAD_CONTINUATION(); +} +#endif