X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/vm/memory_object.c?ds=inline diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index de7baff29..848b1eea8 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -62,8 +62,6 @@ * External memory management interface control functions. */ -#include - /* * Interface dependencies: */ @@ -103,13 +101,10 @@ #include /* Needed by some vm_page.h macros */ #include -#if MACH_PAGEMAP #include -#endif /* MACH_PAGEMAP */ #include - memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; decl_lck_mtx_data(, memory_manager_default_lock) @@ -176,57 +171,12 @@ memory_object_lock_page( m, should_return, should_flush, prot, 0); - if (m->busy || m->cleaning) { - if (m->list_req_pending && - should_return == MEMORY_OBJECT_RETURN_NONE && - should_flush == TRUE) { + if (m->busy || m->cleaning) + return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); - if (m->absent) { - /* - * this is the list_req_pending | absent | busy case - * which originates from vm_fault_page. - * Combine that with should_flush == TRUE and we - * have a case where we need to toss the page from - * the object. - */ - if (!VM_PAGE_WIRED(m)) { - return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE); - } else { - return (MEMORY_OBJECT_LOCK_RESULT_DONE); - } - } - if (m->pageout || m->cleaning) { - /* - * if pageout is set, page was earmarked by vm_pageout_scan - * to be cleaned and stolen... if cleaning is set, we're - * pre-cleaning pages for a hibernate... - * in either case, we're going - * to take it back since we are being asked to - * flush the page w/o cleaning it (i.e. we don't - * care that it's dirty, we want it gone from - * the cache) and we don't want to stall - * waiting for it to be cleaned for 2 reasons... - * 1 - no use paging it out since we're probably - * shrinking the file at this point or we no - * longer care about the data in the page - * 2 - if we stall, we may casue a deadlock in - * the FS trying to acquire its locks - * on the VNOP_PAGEOUT path presuming that - * those locks are already held on the truncate - * path before calling through to this function - * - * so undo all of the state that vm_pageout_scan - * hung on this page - */ + if (m->laundry) + vm_pageout_steal_laundry(m, FALSE); - vm_pageout_queue_steal(m, FALSE); - PAGE_WAKEUP_DONE(m); - } else { - panic("list_req_pending on page %p without absent/pageout/cleaning set\n", m); - } - } else - return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); - } /* * Don't worry about pages for which the kernel * does not have any data. @@ -262,8 +212,9 @@ memory_object_lock_page( * for the page to go from the clean to the dirty state * after we've made our decision */ - if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED) - m->dirty = TRUE; + if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(m, FALSE); + } } else { /* * If we are decreasing permission, do it now; @@ -498,9 +449,17 @@ vm_object_sync( vm_object_lock(object); vm_object_paging_begin(object); - if (should_flush) + if (should_flush) { flags = MEMORY_OBJECT_DATA_FLUSH; - else + /* + * This flush is from an msync(), not a truncate(), so the + * contents of the file are not affected. + * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know + * that the data is not changed and that there's no need to + * push the old contents to a copy object. + */ + flags |= MEMORY_OBJECT_DATA_NO_CHANGE; + } else flags = 0; if (should_iosync) @@ -527,7 +486,7 @@ MACRO_BEGIN \ int upl_flags; \ memory_object_t pager; \ \ - if (object == slide_info.slide_object) { \ + if (object->object_slid) { \ panic("Objects with slid pages not allowed\n"); \ } \ \ @@ -578,10 +537,12 @@ vm_object_update_extent( struct vm_page_delayed_work *dwp; int dw_count; int dw_limit; + int dirty_count; dwp = &dw_array[0]; dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dirty_count = 0; for (; offset < offset_end && object->resident_page_count; @@ -589,13 +550,13 @@ vm_object_update_extent( /* * Limit the number of pages to be cleaned at once to a contiguous - * run, or at most MAX_UPL_TRANSFER size + * run, or at most MAX_UPL_TRANSFER_BYTES */ if (data_cnt) { - if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) { + if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) { if (dw_count) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -615,7 +576,7 @@ vm_object_update_extent( * End of a run of dirty/precious pages. */ if (dw_count) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -636,6 +597,8 @@ vm_object_update_extent( break; case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE: + if (m->dirty == TRUE) + dirty_count++; dwp->dw_mask |= DW_vm_page_free; break; @@ -650,12 +613,6 @@ vm_object_update_extent( data_cnt += PAGE_SIZE; next_offset = offset + PAGE_SIZE_64; - /* - * Clean - */ - m->list_req_pending = TRUE; - m->cleaning = TRUE; - /* * wired pages shouldn't be flushed and * since they aren't on any queue, @@ -667,10 +624,7 @@ vm_object_update_extent( /* * add additional state for the flush */ - m->busy = TRUE; m->pageout = TRUE; - - dwp->dw_mask |= DW_vm_page_wire; } /* * we use to remove the page from the queues at this @@ -688,7 +642,7 @@ vm_object_update_extent( VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -696,12 +650,16 @@ vm_object_update_extent( break; } } + + if (dirty_count) { + task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED); + } /* * We have completed the scan for applicable pages. * Clean any pages that have been saved. */ if (dw_count) - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); if (data_cnt) { LIST_REQ_PAGEOUT_PAGES(object, data_cnt, @@ -851,6 +809,7 @@ vm_object_update( fault_info.interruptible = THREAD_UNINT; fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; fault_info.user_tag = 0; + fault_info.pmap_options = 0; fault_info.lo_offset = copy_offset; fault_info.hi_offset = copy_size; fault_info.no_cache = FALSE; @@ -858,6 +817,7 @@ vm_object_update( fault_info.io_sync = FALSE; fault_info.cs_bypass = FALSE; fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; vm_object_paging_begin(copy_object); @@ -867,9 +827,11 @@ vm_object_update( assert(fault_info.cluster_size == copy_size - i); prot = VM_PROT_WRITE|VM_PROT_READ; + page = VM_PAGE_NULL; result = vm_fault_page(copy_object, i, VM_PROT_WRITE|VM_PROT_READ, FALSE, + FALSE, /* page not looked up */ &prot, &page, &top_page, @@ -1148,7 +1110,7 @@ vm_object_set_attributes_common( boolean_t may_cache, memory_object_copy_strategy_t copy_strategy, boolean_t temporary, - boolean_t silent_overwrite, + __unused boolean_t silent_overwrite, boolean_t advisory_pageout) { boolean_t object_became_ready; @@ -1172,11 +1134,6 @@ vm_object_set_attributes_common( return(KERN_INVALID_ARGUMENT); } -#if !ADVISORY_PAGEOUT - if (silent_overwrite || advisory_pageout) - return(KERN_INVALID_ARGUMENT); - -#endif /* !ADVISORY_PAGEOUT */ if (may_cache) may_cache = TRUE; if (temporary) @@ -1192,7 +1149,7 @@ vm_object_set_attributes_common( object->copy_strategy = copy_strategy; object->can_persist = may_cache; object->temporary = temporary; - object->silent_overwrite = silent_overwrite; +// object->silent_overwrite = silent_overwrite; object->advisory_pageout = advisory_pageout; /* @@ -1243,7 +1200,8 @@ memory_object_change_attributes( temporary = object->temporary; may_cache = object->can_persist; copy_strategy = object->copy_strategy; - silent_overwrite = object->silent_overwrite; +// silent_overwrite = object->silent_overwrite; + silent_overwrite = FALSE; advisory_pageout = object->advisory_pageout; #if notyet invalidate = object->invalidate; @@ -1423,7 +1381,8 @@ memory_object_get_attributes( behave->invalidate = FALSE; #endif behave->advisory_pageout = object->advisory_pageout; - behave->silent_overwrite = object->silent_overwrite; +// behave->silent_overwrite = object->silent_overwrite; + behave->silent_overwrite = FALSE; *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; break; } @@ -1500,11 +1459,11 @@ memory_object_iopl_request( upl_t *upl_ptr, upl_page_info_array_t user_page_list, unsigned int *page_list_count, - int *flags) + upl_control_flags_t *flags) { vm_object_t object; kern_return_t ret; - int caller_flags; + upl_control_flags_t caller_flags; caller_flags = *flags; @@ -1547,8 +1506,9 @@ memory_object_iopl_request( /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; - if(named_entry->is_sub_map) - return (KERN_INVALID_ARGUMENT); + if (named_entry->is_sub_map || + named_entry->is_copy) + return KERN_INVALID_ARGUMENT; named_entry_lock(named_entry); @@ -1607,8 +1567,6 @@ memory_object_iopl_request( return (KERN_INVALID_ARGUMENT); if (!object->private) { - if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) - *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); if (object->phys_contiguous) { *flags = UPL_PHYS_CONTIG; } else { @@ -1660,7 +1618,7 @@ memory_object_upl_request( upl_ptr, user_page_list, page_list_count, - cntrl_flags); + (upl_control_flags_t)(unsigned int) cntrl_flags); } /* @@ -1698,7 +1656,7 @@ memory_object_super_upl_request( upl, user_page_list, page_list_count, - cntrl_flags); + (upl_control_flags_t)(unsigned int) cntrl_flags); } kern_return_t @@ -1764,6 +1722,14 @@ host_default_memory_manager( returned_manager = current_manager; memory_object_default_reference(returned_manager); } else { + /* + * Only allow the kernel to change the value. + */ + extern task_t kernel_task; + if (current_task() != kernel_task) { + result = KERN_NO_ACCESS; + goto out; + } /* * If this is the first non-null manager, start @@ -1793,7 +1759,6 @@ host_default_memory_manager( thread_wakeup((event_t) &memory_manager_default); -#ifndef CONFIG_FREEZE /* * Now that we have a default pager for anonymous memory, * reactivate all the throttled pages (i.e. dirty pages with @@ -1803,7 +1768,6 @@ host_default_memory_manager( { vm_page_reactivate_all_throttled(); } -#endif } out: lck_mtx_unlock(&memory_manager_default_lock); @@ -1969,6 +1933,22 @@ memory_object_mark_unused( vm_object_cache_add(object); } +void +memory_object_mark_io_tracking( + memory_object_control_t control) +{ + vm_object_t object; + + if (control == NULL) + return; + object = memory_object_control_to_vm_object(control); + + if (object != VM_OBJECT_NULL) { + vm_object_lock(object); + object->io_tracking = TRUE; + vm_object_unlock(object); + } +} kern_return_t memory_object_pages_resident( @@ -2007,18 +1987,35 @@ memory_object_signed( return KERN_SUCCESS; } +boolean_t +memory_object_is_signed( + memory_object_control_t control) +{ + boolean_t is_signed; + vm_object_t object; + + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return FALSE; + + vm_object_lock_shared(object); + is_signed = object->code_signed; + vm_object_unlock(object); + + return is_signed; +} + boolean_t memory_object_is_slid( memory_object_control_t control) { vm_object_t object = VM_OBJECT_NULL; - vm_object_t slide_object = slide_info.slide_object; object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) return FALSE; - return (object == slide_object); + return object->object_slid; } static zone_t mem_obj_control_zone;