* External memory management interface control functions.
*/
-#include <advisory_pageout.h>
-
/*
* Interface dependencies:
*/
#include <vm/vm_protos.h>
-
memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
decl_lck_mtx_data(, memory_manager_default_lock)
struct vm_page_delayed_work *dwp;
int dw_count;
int dw_limit;
+ int dirty_count;
dwp = &dw_array[0];
dw_count = 0;
dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+ dirty_count = 0;
for (;
offset < offset_end && object->resident_page_count;
/*
* Limit the number of pages to be cleaned at once to a contiguous
- * run, or at most MAX_UPL_TRANSFER size
+ * run, or at most MAX_UPL_TRANSFER_BYTES
*/
if (data_cnt) {
- if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) {
+ if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
if (dw_count) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
* End of a run of dirty/precious pages.
*/
if (dw_count) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
break;
case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+ if (m->dirty == TRUE)
+ dirty_count++;
dwp->dw_mask |= DW_vm_page_free;
break;
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
break;
}
}
+
+ if (dirty_count) {
+ task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED);
+ }
/*
* We have completed the scan for applicable pages.
* Clean any pages that have been saved.
*/
if (dw_count)
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
if (data_cnt) {
LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
fault_info.interruptible = THREAD_UNINT;
fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
fault_info.user_tag = 0;
+ fault_info.pmap_options = 0;
fault_info.lo_offset = copy_offset;
fault_info.hi_offset = copy_size;
fault_info.no_cache = FALSE;
return(KERN_INVALID_ARGUMENT);
}
-#if !ADVISORY_PAGEOUT
- if (silent_overwrite || advisory_pageout)
- return(KERN_INVALID_ARGUMENT);
-
-#endif /* !ADVISORY_PAGEOUT */
if (may_cache)
may_cache = TRUE;
if (temporary)
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
- int *flags)
+ upl_control_flags_t *flags)
{
vm_object_t object;
kern_return_t ret;
- int caller_flags;
+ upl_control_flags_t caller_flags;
caller_flags = *flags;
return (KERN_INVALID_ARGUMENT);
if (!object->private) {
- if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
- *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
if (object->phys_contiguous) {
*flags = UPL_PHYS_CONTIG;
} else {
upl_ptr,
user_page_list,
page_list_count,
- cntrl_flags);
+ (upl_control_flags_t)(unsigned int) cntrl_flags);
}
/*
upl,
user_page_list,
page_list_count,
- cntrl_flags);
+ (upl_control_flags_t)(unsigned int) cntrl_flags);
}
kern_return_t
returned_manager = current_manager;
memory_object_default_reference(returned_manager);
} else {
+ /*
+ * Only allow the kernel to change the value.
+ */
+ extern task_t kernel_task;
+ if (current_task() != kernel_task) {
+ result = KERN_NO_ACCESS;
+ goto out;
+ }
/*
* If this is the first non-null manager, start
vm_object_cache_add(object);
}
+void
+memory_object_mark_io_tracking(
+ memory_object_control_t control)
+{
+ vm_object_t object;
+
+ if (control == NULL)
+ return;
+ object = memory_object_control_to_vm_object(control);
+
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ object->io_tracking = TRUE;
+ vm_object_unlock(object);
+ }
+}
kern_return_t
memory_object_pages_resident(