+
+
+
+static int
+vm_object_update_extent(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_offset_t offset_end,
+ vm_object_offset_t *offset_resid,
+ int *io_errno,
+ boolean_t should_flush,
+ memory_object_return_t should_return,
+ boolean_t should_iosync,
+ vm_prot_t prot)
+{
+ vm_page_t m;
+ int retval = 0;
+ vm_size_t data_cnt = 0;
+ vm_object_offset_t paging_offset = 0;
+ vm_object_offset_t last_offset = offset;
+ memory_object_lock_result_t page_lock_result;
+ memory_object_lock_result_t pageout_action;
+
+ pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE;
+
+ for (;
+ offset < offset_end && object->resident_page_count;
+ offset += PAGE_SIZE_64) {
+
+ /*
+ * Limit the number of pages to be cleaned at once.
+ */
+ if (data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) {
+ LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+ pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
+ data_cnt = 0;
+ }
+
+ while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
+
+ XPR(XPR_MEMORY_OBJECT,
+ "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
+ (integer_t)object, offset, page_lock_result, 0, 0);
+
+ switch (page_lock_result)
+ {
+ case MEMORY_OBJECT_LOCK_RESULT_DONE:
+ /*
+ * End of a cluster of dirty pages.
+ */
+ if (data_cnt) {
+ LIST_REQ_PAGEOUT_PAGES(object,
+ data_cnt, pageout_action,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ data_cnt = 0;
+ continue;
+ }
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+ /*
+ * Since it is necessary to block,
+ * clean any dirty pages now.
+ */
+ if (data_cnt) {
+ LIST_REQ_PAGEOUT_PAGES(object,
+ data_cnt, pageout_action,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ data_cnt = 0;
+ continue;
+ }
+ PAGE_SLEEP(object, m, THREAD_UNINT);
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ /*
+ * The clean and return cases are similar.
+ *
+ * if this would form a discontiguous block,
+ * clean the old pages and start anew.
+ *
+ * Mark the page busy since we will unlock the
+ * object if we issue the LIST_REQ_PAGEOUT
+ */
+ m->busy = TRUE;
+ if (data_cnt &&
+ ((last_offset != offset) || (pageout_action != page_lock_result))) {
+ LIST_REQ_PAGEOUT_PAGES(object,
+ data_cnt, pageout_action,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ data_cnt = 0;
+ }
+ m->busy = FALSE;
+
+ if (m->cleaning) {
+ PAGE_SLEEP(object, m, THREAD_UNINT);
+ continue;
+ }
+ if (data_cnt == 0) {
+ pageout_action = page_lock_result;
+ paging_offset = offset;
+ }
+ data_cnt += PAGE_SIZE;
+ last_offset = offset + PAGE_SIZE_64;
+
+ vm_page_lockspin_queues();
+ /*
+ * Clean
+ */
+ m->list_req_pending = TRUE;
+ m->cleaning = TRUE;
+
+ if (should_flush) {
+ /*
+ * and add additional state
+ * for the flush
+ */
+ m->busy = TRUE;
+ m->pageout = TRUE;
+ vm_page_wire(m);
+ }
+ vm_page_unlock_queues();
+
+ retval = 1;
+ break;
+ }
+ break;
+ }
+ }
+ /*
+ * We have completed the scan for applicable pages.
+ * Clean any pages that have been saved.
+ */
+ if (data_cnt) {
+ LIST_REQ_PAGEOUT_PAGES(object,
+ data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
+ }
+ return (retval);
+}
+
+
+