- page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
-
- XPR(XPR_MEMORY_OBJECT,
- "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
- (integer_t)object, offset, page_lock_result, 0, 0);
-
- switch (page_lock_result)
- {
- case MEMORY_OBJECT_LOCK_RESULT_DONE:
- /*
- * End of a cluster of dirty pages.
- */
- if (data_cnt) {
- LIST_REQ_PAGEOUT_PAGES(object,
- data_cnt, pageout_action,
- paging_offset, offset_resid, io_errno, should_iosync);
- data_cnt = 0;
- continue;
- }
- break;
-
- case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
- /*
- * Since it is necessary to block,
- * clean any dirty pages now.
- */
- if (data_cnt) {
- LIST_REQ_PAGEOUT_PAGES(object,
- data_cnt, pageout_action,
- paging_offset, offset_resid, io_errno, should_iosync);
- data_cnt = 0;
- continue;
- }
- PAGE_SLEEP(object, m, THREAD_UNINT);
- continue;
-
- case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
- case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
- /*
- * The clean and return cases are similar.
- *
- * if this would form a discontiguous block,
- * clean the old pages and start anew.
- *
- * Mark the page busy since we will unlock the
- * object if we issue the LIST_REQ_PAGEOUT
- */
- m->busy = TRUE;
- if (data_cnt &&
- ((last_offset != offset) || (pageout_action != page_lock_result))) {
- LIST_REQ_PAGEOUT_PAGES(object,
- data_cnt, pageout_action,
- paging_offset, offset_resid, io_errno, should_iosync);
- data_cnt = 0;
- }
- m->busy = FALSE;
-
- if (m->cleaning) {
- PAGE_SLEEP(object, m, THREAD_UNINT);
- continue;
- }
- if (data_cnt == 0) {
- pageout_action = page_lock_result;
- paging_offset = offset;
- }
- data_cnt += PAGE_SIZE;
- last_offset = offset + PAGE_SIZE_64;
-
- vm_page_lockspin_queues();
- /*
- * Clean
- */
- m->list_req_pending = TRUE;
- m->cleaning = TRUE;
-
- if (should_flush &&
- /* let's no flush a wired page... */
- !m->wire_count) {
- /*
- * and add additional state
- * for the flush
- */
- m->busy = TRUE;
- m->pageout = TRUE;
- vm_page_wire(m);
- }
- vm_page_unlock_queues();
-
- retval = 1;
- break;
+ dwp->dw_mask = 0;
+
+ page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
+
+ if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
+ /*
+ * End of a run of dirty/precious pages.
+ */
+ if (dw_count) {
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
+ paging_offset, offset_resid, io_errno, should_iosync);
+ /*
+ * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
+ * allow the state of page 'm' to change... we need to re-lookup
+ * the current offset
+ */
+ data_cnt = 0;
+ continue;
+ }
+
+ switch (page_lock_result) {
+ case MEMORY_OBJECT_LOCK_RESULT_DONE:
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
+ if (m->vmp_dirty == TRUE) {
+ dirty_count++;
+ }
+ dwp->dw_mask |= DW_vm_page_free;
+ break;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
+ PAGE_SLEEP(object, m, THREAD_UNINT);
+ continue;
+
+ case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
+ if (data_cnt == 0) {
+ paging_offset = offset;
+ }
+
+ data_cnt += PAGE_SIZE;
+ next_offset = offset + PAGE_SIZE_64;
+
+ /*
+ * wired pages shouldn't be flushed and
+ * since they aren't on any queue,
+ * no need to remove them
+ */
+ if (!VM_PAGE_WIRED(m)) {
+ if (should_flush) {
+ /*
+ * add additional state for the flush
+ */
+ m->vmp_free_when_done = TRUE;
+ }
+ /*
+ * we use to remove the page from the queues at this
+ * point, but we do not believe that an msync
+ * should cause the 'age' of a page to be changed
+ *
+ * else
+ * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
+ */
+ }
+ retval = 1;
+ break;
+ }
+ if (dwp->dw_mask) {
+ VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
+
+ if (dw_count >= dw_limit) {
+ vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count);
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }