*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
unsigned int vm_pageout_pause_max = 0;
unsigned int vm_free_page_pause = 100; /* milliseconds */
+/*
+ * Protection against zero fill flushing live working sets derived
+ * from existing backing store and files
+ */
+unsigned int vm_accellerate_zf_pageout_trigger = 400;
+unsigned int vm_zf_iterator;
+unsigned int vm_zf_iterator_count = 40;
+unsigned int last_page_zf;
+unsigned int vm_zf_count = 0;
+
/*
* These variables record the pageout daemon's actions:
* how many pages it looks at and what happens to those pages.
* 2) Flow control - wait for untrusted pagers to catch up.
*/
- if (queue_empty(&vm_page_queue_inactive) ||
+ if ((queue_empty(&vm_page_queue_inactive) &&
+ (queue_empty(&vm_page_queue_zf))) ||
((--loop_detect) == 0) ||
(burst_count >= vm_pageout_burst_max)) {
unsigned int pages, msecs;
}
if (queue_empty(&vm_page_queue_inactive) &&
+ queue_empty(&vm_page_queue_zf) &&
(msecs < vm_pageout_empty_wait))
msecs = vm_pageout_empty_wait;
vm_page_unlock_queues();
}
vm_pageout_inactive++;
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+
+ if (vm_zf_count < vm_accellerate_zf_pageout_trigger) {
+ vm_zf_iterator = 0;
+ } else {
+ last_page_zf = 0;
+ if((vm_zf_iterator+=1) >= vm_zf_iterator_count) {
+ vm_zf_iterator = 0;
+ }
+ }
+ if(queue_empty(&vm_page_queue_zf) ||
+ (((last_page_zf) || (vm_zf_iterator == 0)) &&
+ !queue_empty(&vm_page_queue_inactive))) {
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ last_page_zf = 0;
+ } else {
+ m = (vm_page_t) queue_first(&vm_page_queue_zf);
+ last_page_zf = 1;
+ }
if ((vm_page_free_count <= vm_page_free_reserved) &&
(IP_VALID(memory_manager_default))) {
vm_object_unlock(object);
}
m = (vm_page_t) queue_next(&m->pageq);
- } while (!queue_end(&vm_page_queue_inactive,
- (queue_entry_t) m));
- if (queue_end(&vm_page_queue_inactive,
- (queue_entry_t) m)) {
+ } while ((!queue_end(&vm_page_queue_zf,
+ (queue_entry_t) m))
+ && (!queue_end(&vm_page_queue_inactive,
+ (queue_entry_t) m)));
+
+ if ((queue_end(&vm_page_queue_zf,
+ (queue_entry_t) m))
+ || (queue_end(&vm_page_queue_inactive,
+ (queue_entry_t) m))) {
vm_pageout_scan_inactive_emm_throttle_failure++;
/*
* We should check the "active" queue
*/
need_more_inactive_pages = TRUE;
- m = (vm_page_t)
- queue_first(&vm_page_queue_inactive);
+ if(last_page_zf == 0) {
+ last_page_zf = 1;
+ vm_zf_iterator = vm_zf_iterator_count - 1;
+ } else {
+ last_page_zf = 0;
+ vm_zf_iterator = vm_zf_iterator_count - 2;
+ }
+ vm_page_unlock_queues();
+ goto Restart;
}
}
* Move page to end and continue.
* Don't re-issue ticket
*/
- queue_remove(&vm_page_queue_inactive, m,
+ if(m->zero_fill) {
+ queue_remove(&vm_page_queue_zf, m,
vm_page_t, pageq);
- queue_enter(&vm_page_queue_inactive, m,
+ queue_enter(&vm_page_queue_zf, m,
vm_page_t, pageq);
+ } else {
+ queue_remove(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ queue_enter(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ }
vm_page_unlock_queues();
mutex_pause();
* one of its logically adjacent fellows is
* targeted.
*/
- queue_remove(&vm_page_queue_inactive, m,
- vm_page_t, pageq);
- queue_enter(&vm_page_queue_inactive, m,
- vm_page_t, pageq);
+ if(m->zero_fill) {
+ queue_remove(&vm_page_queue_zf, m,
+ vm_page_t, pageq);
+ queue_enter(&vm_page_queue_zf, m,
+ vm_page_t, pageq);
+ last_page_zf = 1;
+ vm_zf_iterator = vm_zf_iterator_count - 1;
+ } else {
+ queue_remove(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ queue_enter(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ last_page_zf = 0;
+ vm_zf_iterator = 1;
+ }
vm_page_unlock_queues();
vm_object_unlock(object);
vm_pageout_inactive_avoid++;
* Remove the page from the inactive list.
*/
- queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ if(m->zero_fill) {
+ queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
+ } else {
+ queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ }
m->inactive = FALSE;
if (!m->fictitious)
vm_page_inactive_count--;
m->discard_request = FALSE;
}
#endif /* ADVISORY_PAGEOUT */
+ last_page_zf = 0;
vm_object_unlock(object);
vm_page_activate(m);
VM_STAT(reactivations++);
s = splsched();
thread_lock(self);
-
self->priority = BASEPRI_PREEMPT - 1;
- self->sched_pri = self->priority;
-
+ set_sched_pri(self, self->priority);
thread_unlock(self);
splx(s);
vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
vm_page_free_count_init = vm_page_free_count;
+ vm_zf_iterator = 0;
/*
* even if we've already called vm_page_free_reserve
* call it again here to insure that the targets are
/*NOTREACHED*/
}
+kern_return_t
+vm_pageout_emergency_availability_request()
+{
+ vm_page_t m;
+ vm_object_t object;
+
+ vm_page_lock_queues();
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+
+ while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
+ if(m->fictitious) {
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ if (!m->dirty)
+ m->dirty = pmap_is_modified(m->phys_addr);
+ if(m->dirty || m->busy || m->wire_count || m->absent
+ || m->precious || m->cleaning
+ || m->dump_cleaning || m->error
+ || m->pageout || m->laundry
+ || m->list_req_pending
+ || m->overwriting) {
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ object = m->object;
+
+ if (vm_object_lock_try(object)) {
+ if((!object->alive) ||
+ (object->pageout)) {
+ vm_object_unlock(object);
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ m->busy = TRUE;
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ vm_page_free(m);
+ vm_object_unlock(object);
+ vm_page_unlock_queues();
+ return KERN_SUCCESS;
+ }
+ m = (vm_page_t) queue_next(&m->pageq);
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+
+ while (!queue_end(&vm_page_queue_active, (queue_entry_t) m)) {
+ if(m->fictitious) {
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ if (!m->dirty)
+ m->dirty = pmap_is_modified(m->phys_addr);
+ if(m->dirty || m->busy || m->wire_count || m->absent
+ || m->precious || m->cleaning
+ || m->dump_cleaning || m->error
+ || m->pageout || m->laundry
+ || m->list_req_pending
+ || m->overwriting) {
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ object = m->object;
+
+ if (vm_object_lock_try(object)) {
+ if((!object->alive) ||
+ (object->pageout)) {
+ vm_object_unlock(object);
+ m = (vm_page_t) queue_next(&m->pageq);
+ continue;
+ }
+ m->busy = TRUE;
+ pmap_page_protect(m->phys_addr, VM_PROT_NONE);
+ vm_page_free(m);
+ vm_object_unlock(object);
+ vm_page_unlock_queues();
+ return KERN_SUCCESS;
+ }
+ m = (vm_page_t) queue_next(&m->pageq);
+ }
+ vm_page_unlock_queues();
+ return KERN_FAILURE;
+}
+
static upl_t
upl_create(
boolean_t internal,
- vm_size_t size)
+ vm_size_t size)
{
upl_t upl;
if(upl->flags & UPL_INTERNAL) {
kfree((vm_offset_t)upl,
sizeof(struct upl) +
- (sizeof(struct upl_page_info) * (upl->size/page_size)));
+ (sizeof(struct upl_page_info) * (upl->size/page_size)));
} else {
kfree((vm_offset_t)upl, sizeof(struct upl));
}
upl->map_object->can_persist = FALSE;
upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
upl->map_object->shadow_offset = offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
vm_object_unlock(upl->map_object);
*upl_ptr = upl;
}
}
/*someone else is playing with the */
/* page. We will have to wait. */
- PAGE_ASSERT_WAIT(
- dst_page, THREAD_UNINT);
- vm_object_unlock(object);
- thread_block((void(*)(void))0);
- vm_object_lock(object);
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
/* Someone else already cleaning the page? */
/* original object and its prodigy */
vm_page_lock_queues();
- pmap_page_protect(dst_page->phys_addr,
- VM_PROT_NONE);
-
+ if( !(cntrl_flags & UPL_FILE_IO)) {
+ pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE);
+ }
/* pageout statistics gathering. count */
/* all the pages we will page out that */
/* were not counted in the initial */
}
dst_page = vm_page_lookup(object, dst_offset);
if(dst_page != VM_PAGE_NULL) {
+ if((cntrl_flags & UPL_RET_ONLY_ABSENT) &&
+ !((dst_page->list_req_pending)
+ && (dst_page->absent))) {
+ /* we are doing extended range */
+ /* requests. we want to grab */
+ /* pages around some which are */
+ /* already present. */
+ if(user_page_list)
+ user_page_list[entry].phys_addr = 0;
+ entry++;
+ dst_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ continue;
+ }
if((dst_page->cleaning) &&
!(dst_page->list_req_pending)) {
/*someone else is writing to the */
/* page. We will have to wait. */
- PAGE_ASSERT_WAIT(dst_page,THREAD_UNINT);
- vm_object_unlock(object);
- thread_block((void(*)(void))0);
- vm_object_lock(object);
+ PAGE_SLEEP(object,dst_page,THREAD_UNINT);
continue;
}
if ((dst_page->fictitious &&
if(dst_page->busy) {
/*someone else is playing with the */
/* page. We will have to wait. */
- PAGE_ASSERT_WAIT(
- dst_page, THREAD_UNINT);
- vm_object_unlock(object);
- thread_block((void(*)(void))0);
- vm_object_lock(object);
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
continue;
}
vm_page_lock_queues();
- pmap_page_protect(dst_page->phys_addr,
- VM_PROT_NONE);
+ if( !(cntrl_flags & UPL_FILE_IO)) {
+ pmap_page_protect(dst_page->phys_addr, VM_PROT_NONE);
+ }
dirty = pmap_is_modified(dst_page->phys_addr);
dirty = dirty ? TRUE : dst_page->dirty;
? VM_PROT_READ : VM_PROT_WRITE;
while (TRUE) {
kern_return_t rc;
- thread_t thread;
if(!object->pager_ready) {
- thread = current_thread();
- vm_object_assert_wait(object,
- VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
- vm_object_unlock(object);
- thread_block((void (*)(void))0);
- if (thread->wait_result != THREAD_AWAKENED) {
- return(KERN_FAILURE);
+ wait_result_t wait_result;
+
+ wait_result = vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ if (wait_result != THREAD_AWAKENED) {
+ vm_object_unlock(object);
+ return(KERN_FAILURE);
}
- vm_object_lock(object);
continue;
}
for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) {
m = vm_page_lookup(upl->map_object, offset);
if(m) {
- PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, TRUE);
+ unsigned int cache_attr;
+ cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+ PMAP_ENTER(map->pmap, addr,
+ m, VM_PROT_ALL,
+ cache_attr, TRUE);
}
offset+=PAGE_SIZE_64;
}
} while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
vm_page_unlock_queues();
+ vm_page_lock_queues();
+ m = (vm_page_t) queue_first(&vm_page_queue_zf);
+ do {
+ if (m ==(vm_page_t )0) break;
+
+ if(m->dirty) dpages++;
+ if(m->pageout) pgopages++;
+ if(m->precious) precpages++;
+
+ m = (vm_page_t) queue_next(&m->pageq);
+ if (m ==(vm_page_t )0) break;
+
+ } while (!queue_end(&vm_page_queue_zf,(queue_entry_t) m));
+ vm_page_unlock_queues();
+
printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages);
dpages=0;