X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/vm/memory_object.c diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index 67b69df41..848b1eea8 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -62,8 +62,6 @@ * External memory management interface control functions. */ -#include - /* * Interface dependencies: */ @@ -103,13 +101,10 @@ #include /* Needed by some vm_page.h macros */ #include -#if MACH_PAGEMAP #include -#endif /* MACH_PAGEMAP */ #include - memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; decl_lck_mtx_data(, memory_manager_default_lock) @@ -454,9 +449,17 @@ vm_object_sync( vm_object_lock(object); vm_object_paging_begin(object); - if (should_flush) + if (should_flush) { flags = MEMORY_OBJECT_DATA_FLUSH; - else + /* + * This flush is from an msync(), not a truncate(), so the + * contents of the file are not affected. + * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know + * that the data is not changed and that there's no need to + * push the old contents to a copy object. + */ + flags |= MEMORY_OBJECT_DATA_NO_CHANGE; + } else flags = 0; if (should_iosync) @@ -483,7 +486,7 @@ MACRO_BEGIN \ int upl_flags; \ memory_object_t pager; \ \ - if (object == slide_info.slide_object) { \ + if (object->object_slid) { \ panic("Objects with slid pages not allowed\n"); \ } \ \ @@ -534,10 +537,12 @@ vm_object_update_extent( struct vm_page_delayed_work *dwp; int dw_count; int dw_limit; + int dirty_count; dwp = &dw_array[0]; dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dirty_count = 0; for (; offset < offset_end && object->resident_page_count; @@ -545,13 +550,13 @@ vm_object_update_extent( /* * Limit the number of pages to be cleaned at once to a contiguous - * run, or at most MAX_UPL_TRANSFER size + * run, or at most MAX_UPL_TRANSFER_BYTES */ if (data_cnt) { - if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) { + if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) { if (dw_count) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -571,7 +576,7 @@ vm_object_update_extent( * End of a run of dirty/precious pages. */ if (dw_count) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -592,6 +597,8 @@ vm_object_update_extent( break; case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE: + if (m->dirty == TRUE) + dirty_count++; dwp->dw_mask |= DW_vm_page_free; break; @@ -635,7 +642,7 @@ vm_object_update_extent( VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } @@ -643,12 +650,16 @@ vm_object_update_extent( break; } } + + if (dirty_count) { + task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED); + } /* * We have completed the scan for applicable pages. * Clean any pages that have been saved. */ if (dw_count) - vm_page_do_delayed_work(object, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); if (data_cnt) { LIST_REQ_PAGEOUT_PAGES(object, data_cnt, @@ -798,6 +809,7 @@ vm_object_update( fault_info.interruptible = THREAD_UNINT; fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; fault_info.user_tag = 0; + fault_info.pmap_options = 0; fault_info.lo_offset = copy_offset; fault_info.hi_offset = copy_size; fault_info.no_cache = FALSE; @@ -815,9 +827,11 @@ vm_object_update( assert(fault_info.cluster_size == copy_size - i); prot = VM_PROT_WRITE|VM_PROT_READ; + page = VM_PAGE_NULL; result = vm_fault_page(copy_object, i, VM_PROT_WRITE|VM_PROT_READ, FALSE, + FALSE, /* page not looked up */ &prot, &page, &top_page, @@ -1096,7 +1110,7 @@ vm_object_set_attributes_common( boolean_t may_cache, memory_object_copy_strategy_t copy_strategy, boolean_t temporary, - boolean_t silent_overwrite, + __unused boolean_t silent_overwrite, boolean_t advisory_pageout) { boolean_t object_became_ready; @@ -1120,11 +1134,6 @@ vm_object_set_attributes_common( return(KERN_INVALID_ARGUMENT); } -#if !ADVISORY_PAGEOUT - if (silent_overwrite || advisory_pageout) - return(KERN_INVALID_ARGUMENT); - -#endif /* !ADVISORY_PAGEOUT */ if (may_cache) may_cache = TRUE; if (temporary) @@ -1140,7 +1149,7 @@ vm_object_set_attributes_common( object->copy_strategy = copy_strategy; object->can_persist = may_cache; object->temporary = temporary; - object->silent_overwrite = silent_overwrite; +// object->silent_overwrite = silent_overwrite; object->advisory_pageout = advisory_pageout; /* @@ -1191,7 +1200,8 @@ memory_object_change_attributes( temporary = object->temporary; may_cache = object->can_persist; copy_strategy = object->copy_strategy; - silent_overwrite = object->silent_overwrite; +// silent_overwrite = object->silent_overwrite; + silent_overwrite = FALSE; advisory_pageout = object->advisory_pageout; #if notyet invalidate = object->invalidate; @@ -1371,7 +1381,8 @@ memory_object_get_attributes( behave->invalidate = FALSE; #endif behave->advisory_pageout = object->advisory_pageout; - behave->silent_overwrite = object->silent_overwrite; +// behave->silent_overwrite = object->silent_overwrite; + behave->silent_overwrite = FALSE; *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; break; } @@ -1448,11 +1459,11 @@ memory_object_iopl_request( upl_t *upl_ptr, upl_page_info_array_t user_page_list, unsigned int *page_list_count, - int *flags) + upl_control_flags_t *flags) { vm_object_t object; kern_return_t ret; - int caller_flags; + upl_control_flags_t caller_flags; caller_flags = *flags; @@ -1495,8 +1506,9 @@ memory_object_iopl_request( /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; - if(named_entry->is_sub_map) - return (KERN_INVALID_ARGUMENT); + if (named_entry->is_sub_map || + named_entry->is_copy) + return KERN_INVALID_ARGUMENT; named_entry_lock(named_entry); @@ -1555,8 +1567,6 @@ memory_object_iopl_request( return (KERN_INVALID_ARGUMENT); if (!object->private) { - if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) - *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); if (object->phys_contiguous) { *flags = UPL_PHYS_CONTIG; } else { @@ -1608,7 +1618,7 @@ memory_object_upl_request( upl_ptr, user_page_list, page_list_count, - cntrl_flags); + (upl_control_flags_t)(unsigned int) cntrl_flags); } /* @@ -1646,7 +1656,7 @@ memory_object_super_upl_request( upl, user_page_list, page_list_count, - cntrl_flags); + (upl_control_flags_t)(unsigned int) cntrl_flags); } kern_return_t @@ -1712,6 +1722,14 @@ host_default_memory_manager( returned_manager = current_manager; memory_object_default_reference(returned_manager); } else { + /* + * Only allow the kernel to change the value. + */ + extern task_t kernel_task; + if (current_task() != kernel_task) { + result = KERN_NO_ACCESS; + goto out; + } /* * If this is the first non-null manager, start @@ -1915,6 +1933,22 @@ memory_object_mark_unused( vm_object_cache_add(object); } +void +memory_object_mark_io_tracking( + memory_object_control_t control) +{ + vm_object_t object; + + if (control == NULL) + return; + object = memory_object_control_to_vm_object(control); + + if (object != VM_OBJECT_NULL) { + vm_object_lock(object); + object->io_tracking = TRUE; + vm_object_unlock(object); + } +} kern_return_t memory_object_pages_resident( @@ -1953,18 +1987,35 @@ memory_object_signed( return KERN_SUCCESS; } +boolean_t +memory_object_is_signed( + memory_object_control_t control) +{ + boolean_t is_signed; + vm_object_t object; + + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return FALSE; + + vm_object_lock_shared(object); + is_signed = object->code_signed; + vm_object_unlock(object); + + return is_signed; +} + boolean_t memory_object_is_slid( memory_object_control_t control) { vm_object_t object = VM_OBJECT_NULL; - vm_object_t slide_object = slide_info.slide_object; object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) return FALSE; - return (object == slide_object); + return object->object_slid; } static zone_t mem_obj_control_zone;