+
+
+#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \
+MACRO_BEGIN \
+ \
+ int upl_flags; \
+ memory_object_t pager; \
+ \
+ if (object->object_slid) { \
+ panic("Objects with slid pages not allowed\n"); \
+ } \
+ \
+ if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
+ vm_object_paging_begin(object); \
+ vm_object_unlock(object); \
+ \
+ if (iosync) \
+ upl_flags = UPL_MSYNC | UPL_IOSYNC; \
+ else \
+ upl_flags = UPL_MSYNC; \
+ \
+ (void) memory_object_data_return(pager, \
+ po, \
+ (memory_object_cluster_size_t)data_cnt, \
+ ro, \
+ ioerr, \
+ FALSE, \
+ FALSE, \
+ upl_flags); \
+ \
+ vm_object_lock(object); \
+ vm_object_paging_end(object); \
+ } \
+MACRO_END
+
+extern struct vnode *
+vnode_pager_lookup_vnode(memory_object_t);
+
+static int
+vm_object_update_extent(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_offset_t offset_end,
+ vm_object_offset_t *offset_resid,
+ int *io_errno,
+ boolean_t should_flush,
+ memory_object_return_t should_return,
+ boolean_t should_iosync,
+ vm_prot_t prot)
+{
+ vm_page_t m;
+ int retval = 0;
+ vm_object_offset_t paging_offset = 0;
+ vm_object_offset_t next_offset = offset;
+ memory_object_lock_result_t page_lock_result;
+ memory_object_cluster_size_t data_cnt = 0;
+ struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT];
+ struct vm_page_delayed_work *dwp;
+ int dw_count;
+ int dw_limit;
+ int dirty_count;
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
+ dirty_count = 0;
+
+ for (;
+ offset < offset_end && object->resident_page_count;
+ offset += PAGE_SIZE_64) {
+
+ /*
+ * Limit the number of pages to be cleaned at once to a contiguous
+ * run, or at most MAX_UPL_TRANSFER_BYTES
+ */
+ if (data_cnt) {
+ if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
+
+ if (dw_count) {
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ data_cnt = 0;
+ }
+ }
+ while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+
+ dwp->dw_mask = 0;
+
+ page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
+
+ if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
+ /*
+ * End of a run of dirty/precious pages.
+ */
+ if (dw_count) {
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ /*
+ * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
+ * allow the state of page 'm' to change... we need to re-lookup
+ * the current offset
+ */
+ data_cnt = 0;
+ continue;
+ }
+
+ switch (page_lock_result) {
+
+ case MEMORY_OBJECT_LOCK_RESULT_DONE:
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+ if (m->dirty == TRUE)
+ dirty_count++;
+ dwp->dw_mask |= DW_vm_page_free;
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+ PAGE_SLEEP(object, m, THREAD_UNINT);
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ if (data_cnt == 0)
+ paging_offset = offset;
+
+ data_cnt += PAGE_SIZE;
+ next_offset = offset + PAGE_SIZE_64;
+
+ /*
+ * wired pages shouldn't be flushed and
+ * since they aren't on any queue,
+ * no need to remove them
+ */
+ if (!VM_PAGE_WIRED(m)) {
+
+ if (should_flush) {
+ /*
+ * add additional state for the flush
+ */
+ m->free_when_done = TRUE;
+ }
+ /*
+ * we use to remove the page from the queues at this
+ * point, but we do not believe that an msync
+ * should cause the 'age' of a page to be changed
+ *
+ * else
+ * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
+ */
+ }
+ retval = 1;
+ break;
+ }
+ if (dwp->dw_mask) {
+ VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+ if (dw_count >= dw_limit) {
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ }
+ break;
+ }
+ }
+
+ if (object->pager)
+ task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
+ /*
+ * We have completed the scan for applicable pages.
+ * Clean any pages that have been saved.
+ */
+ if (dw_count)
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+
+ if (data_cnt) {
+ LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ }
+ return (retval);
+}
+
+
+