#include <vm/vm_protos.h>
-
memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
decl_lck_mtx_data(, memory_manager_default_lock)
if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
if (dw_count) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
* End of a run of dirty/precious pages.
*/
if (dw_count) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
if (dw_count >= dw_limit) {
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
dwp = &dw_array[0];
dw_count = 0;
}
* Clean any pages that have been saved.
*/
if (dw_count)
- vm_page_do_delayed_work(object, &dw_array[0], dw_count);
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
if (data_cnt) {
LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
- int *flags)
+ upl_control_flags_t *flags)
{
vm_object_t object;
kern_return_t ret;
- int caller_flags;
+ upl_control_flags_t caller_flags;
caller_flags = *flags;
upl_ptr,
user_page_list,
page_list_count,
- cntrl_flags);
+ (upl_control_flags_t)(unsigned int) cntrl_flags);
}
/*
upl,
user_page_list,
page_list_count,
- cntrl_flags);
+ (upl_control_flags_t)(unsigned int) cntrl_flags);
}
kern_return_t
returned_manager = current_manager;
memory_object_default_reference(returned_manager);
} else {
+ /*
+ * Only allow the kernel to change the value.
+ */
+ extern task_t kernel_task;
+ if (current_task() != kernel_task) {
+ result = KERN_NO_ACCESS;
+ goto out;
+ }
/*
* If this is the first non-null manager, start